,code,docs,doc_len,words,lang,prompt 0,"def rolling(self, *args, **kwargs) -> RollingGroupby: from pandas.core.window import RollingGroupby return RollingGroupby( self._selected_obj, *args, _grouper=self.grouper, _as_index=self.as_index, **kwargs, ) "," Return a rolling grouper, providing rolling functionality per group. ",9,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rolling(self, *args, **kwargs) -> RollingGroupby: from pandas.core.window import RollingGroupby return RollingGroupby( self._selected_obj, *args, _grouper=self.grouper, _as_index=self.as_index, **kwargs, ) ``` ###Assistant : Return a rolling grouper, providing rolling functionality per group. " 1,"def expected_degree_graph(w, seed=None, selfloops=True): r n = len(w) G = nx.empty_graph(n) # If there are no nodes are no edges in the graph, return the empty graph. if n == 0 or max(w) == 0: return G rho = 1 / sum(w) # Sort the weights in decreasing order. The original order of the # weights dictates the order of the (integer) node labels, so we # need to remember the permutation applied in the sorting. order = sorted(enumerate(w), key=itemgetter(1), reverse=True) mapping = {c: u for c, (u, v) in enumerate(order)} seq = [v for u, v in order] last = n if not selfloops: last -= 1 for u in range(last): v = u if not selfloops: v += 1 factor = seq[u] * rho p = min(seq[v] * factor, 1) while v < n and p > 0: if p != 1: r = seed.random() v += math.floor(math.log(r, 1 - p)) if v < n: q = min(seq[v] * factor, 1) if seed.random() < q / p: G.add_edge(mapping[u], mapping[v]) v += 1 p = q return G ","Returns a random graph with given expected degrees. Given a sequence of expected degrees $W=(w_0,w_1,\ldots,w_{n-1})$ of length $n$ this algorithm assigns an edge between node $u$ and node $v$ with probability .. math:: p_{uv} = \frac{w_u w_v}{\sum_k w_k} . Parameters ---------- w : list The list of expected degrees. selfloops: bool (default=True) Set to False to remove the possibility of self-loop edges. seed : integer, random_state, or None (default) Indicator of random number generation state. See :ref:`Randomness`. Returns ------- Graph Examples -------- >>> z = [10 for i in range(100)] >>> G = nx.expected_degree_graph(z) Notes ----- The nodes have integer labels corresponding to index of expected degrees input sequence. The complexity of this algorithm is $\mathcal{O}(n+m)$ where $n$ is the number of nodes and $m$ is the expected number of edges. The model in [1]_ includes the possibility of self-loop edges. Set selfloops=False to produce a graph without self loops. For finite graphs this model doesn't produce exactly the given expected degree sequence. Instead the expected degrees are as follows. For the case without self loops (selfloops=False), .. math:: E[deg(u)] = \sum_{v \ne u} p_{uv} = w_u \left( 1 - \frac{w_u}{\sum_k w_k} \right) . NetworkX uses the standard convention that a self-loop edge counts 2 in the degree of a node, so with self loops (selfloops=True), .. math:: E[deg(u)] = \sum_{v \ne u} p_{uv} + 2 p_{uu} = w_u \left( 1 + \frac{w_u}{\sum_k w_k} \right) . References ---------- .. [1] Fan Chung and L. Lu, Connected components in random graphs with given expected degree sequences, Ann. Combinatorics, 6, pp. 125-145, 2002. .. [2] Joel Miller and Aric Hagberg, Efficient generation of networks with given expected degrees, in Algorithms and Models for the Web-Graph (WAW 2011), Alan Frieze, Paul Horn, and Paweł Prałat (Eds), LNCS 6732, pp. 115-126, 2011. ",298,179,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def expected_degree_graph(w, seed=None, selfloops=True): r n = len(w) G = nx.empty_graph(n) # If there are no nodes are no edges in the graph, return the empty graph. if n == 0 or max(w) == 0: return G rho = 1 / sum(w) # Sort the weights in decreasing order. The original order of the # weights dictates the order of the (integer) node labels, so we # need to remember the permutation applied in the sorting. order = sorted(enumerate(w), key=itemgetter(1), reverse=True) mapping = {c: u for c, (u, v) in enumerate(order)} seq = [v for u, v in order] last = n if not selfloops: last -= 1 for u in range(last): v = u if not selfloops: v += 1 factor = seq[u] * rho p = min(seq[v] * factor, 1) while v < n and p > 0: if p != 1: r = seed.random() v += math.floor(math.log(r, 1 - p)) if v < n: q = min(seq[v] * factor, 1) if seed.random() < q / p: G.add_edge(mapping[u], mapping[v]) v += 1 p = q return G ``` ###Assistant : Returns a random graph with given expected degrees. Given a sequence of expected degrees $W=(w_0,w_1,\ldots,w_{n-1})$ of length $n$ this algorithm assigns an edge between node $u$ and node $v$ with probability .. math:: p_{uv} = \frac{w_u w_v}{\sum_k w_k} . Parameters ---------- w : list The list of expected degrees. selfloops: bool (default=True) Set to False to remove the possibility of self-loop edges. seed : integer, random_state, or None (default) Indicator of random number generation state. See :ref:`Randomness`. Returns ------- Graph Examples -------- >>> z = [10 for i in range(100)] >>> G = nx.expected_degree_graph(z) Notes ----- The nodes have integer labels corresponding to index of expected degrees input sequence. The complexity of this algorithm is $\mathcal{O}(n+m)$ where $n$ is the number of nodes and $m$ is the expected number of edges. The model in [1]_ includes the possibility of self-loop edges. Set selfloops=False to produce a graph without self loops. For finite graphs this model doesn't produce exactly the given expected degree sequence. Instead the expected degrees are as follows. For the case without self loops (selfloops=False), .. math:: E[deg(u)] = \sum_{v \ne u} p_{uv} = w_u \left( 1 - \frac{w_u}{\sum_k w_k} \right) . NetworkX uses the standard convention that a self-loop edge counts 2 in the degree of a node, so with self loops (selfloops=True), .. math:: E[deg(u)] = \sum_{v \ne u} p_{uv} + 2 p_{uu} = w_u \left( 1 + \frac{w_u}{\sum_k w_k} \right) . References ---------- .. [1] Fan Chung and L. Lu, Connected components in random graphs with given expected degree sequences, Ann. Combinatorics, 6, pp. 125-145, 2002. .. [2] Joel Miller and Aric Hagberg, Efficient generation of networks with given expected degrees, in Algorithms and Models for the Web-Graph (WAW 2011), Alan Frieze, Paul Horn, and Paweł Prałat (Eds), LNCS 6732, pp. 115-126, 2011. " 2,"def save(self, path): os.makedirs(path, exist_ok=True) with open(os.path.join(path, ""metrics.json""), ""w"") as fp: json.dump(self.metrics, fp) artifacts_metadata = { artifact_name: { ""uri"": artifact.uri, ""class_name"": _get_fully_qualified_class_name(artifact), } for artifact_name, artifact in self.artifacts.items() } with open(os.path.join(path, ""artifacts_metadata.json""), ""w"") as fp: json.dump(artifacts_metadata, fp) artifacts_dir = os.path.join(path, ""artifacts"") os.mkdir(artifacts_dir) for artifact_name, artifact in self.artifacts.items(): artifact._save(os.path.join(artifacts_dir, artifact_name)) ",Write the evaluation results to the specified local filesystem path,10,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save(self, path): os.makedirs(path, exist_ok=True) with open(os.path.join(path, ""metrics.json""), ""w"") as fp: json.dump(self.metrics, fp) artifacts_metadata = { artifact_name: { ""uri"": artifact.uri, ""class_name"": _get_fully_qualified_class_name(artifact), } for artifact_name, artifact in self.artifacts.items() } with open(os.path.join(path, ""artifacts_metadata.json""), ""w"") as fp: json.dump(artifacts_metadata, fp) artifacts_dir = os.path.join(path, ""artifacts"") os.mkdir(artifacts_dir) for artifact_name, artifact in self.artifacts.items(): artifact._save(os.path.join(artifacts_dir, artifact_name)) ``` ###Assistant : Write the evaluation results to the specified local filesystem path " 3,"def test_build_group_generic_issue_attachment(self): event = self.store_event( data={""message"": ""Hello world"", ""level"": ""error""}, project_id=self.project.id ) event = event.for_group(event.groups[0]) occurrence = self.build_occurrence(level=""info"") occurrence.save(project_id=self.project.id) event.occurrence = occurrence event.group.type = GroupType.PROFILE_BLOCKED_THREAD attachments = SlackIssuesMessageBuilder(group=event.group, event=event).build() assert attachments[""title""] == occurrence.issue_title assert attachments[""text""] == occurrence.evidence_display[0].value assert attachments[""fallback""] == f""[{self.project.slug}] {occurrence.issue_title}"" assert attachments[""color""] == ""#2788CE"" # blue for info level ",Test that a generic issue type's Slack alert contains the expected values,12,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_build_group_generic_issue_attachment(self): event = self.store_event( data={""message"": ""Hello world"", ""level"": ""error""}, project_id=self.project.id ) event = event.for_group(event.groups[0]) occurrence = self.build_occurrence(level=""info"") occurrence.save(project_id=self.project.id) event.occurrence = occurrence event.group.type = GroupType.PROFILE_BLOCKED_THREAD attachments = SlackIssuesMessageBuilder(group=event.group, event=event).build() assert attachments[""title""] == occurrence.issue_title assert attachments[""text""] == occurrence.evidence_display[0].value assert attachments[""fallback""] == f""[{self.project.slug}] {occurrence.issue_title}"" assert attachments[""color""] == ""#2788CE"" # blue for info level ``` ###Assistant : Test that a generic issue type's Slack alert contains the expected values " 4,"def apply(self, func, mask=None) -> 'ImageProcessor': img = orig_img = self._img img = func(img).astype(orig_img.dtype) if img.ndim != 4: raise Exception('func used in ImageProcessor.apply changed format of image') if mask is not None: mask = self._check_normalize_mask(mask) img = ne.evaluate('orig_img*(1-mask) + img*mask').astype(orig_img.dtype) self._img = img return self "," apply your own function on internal image image has NHWC format. Do not change format, but dims can be changed. func callable (img) -> img example: .apply( lambda img: img-[102,127,63] ) ",31,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def apply(self, func, mask=None) -> 'ImageProcessor': img = orig_img = self._img img = func(img).astype(orig_img.dtype) if img.ndim != 4: raise Exception('func used in ImageProcessor.apply changed format of image') if mask is not None: mask = self._check_normalize_mask(mask) img = ne.evaluate('orig_img*(1-mask) + img*mask').astype(orig_img.dtype) self._img = img return self ``` ###Assistant : apply your own function on internal image image has NHWC format. Do not change format, but dims can be changed. func callable (img) -> img example: .apply( lambda img: img-[102,127,63] ) " 5,"def predict(self, x): # start the timer self.timer.start() v_, _ = self.eval_out() # evaluate the ratings and the associated probabilities vp = self.sess.run(v_, feed_dict={self.vu: x}) # stop the timer self.timer.stop() log.info(""Done inference, time %f2"" % self.timer.interval) return vp ","Returns the inferred ratings. This method is similar to recommend_k_items() with the exceptions that it returns all the inferred ratings Basic mechanics: The method samples new ratings from the learned joint distribution, together with their probabilities. The input x must have the same number of columns as the one used for training the model, i.e. the same number of items, but it can have an arbitrary number of rows (users). Args: x (numpy.ndarray, int32): Input user/affinity matrix. Note that this can be a single vector, i.e. the ratings of a single user. Returns: numpy.ndarray, float: - A matrix with the inferred ratings. - The elapsed time for predediction. ",108,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def predict(self, x): # start the timer self.timer.start() v_, _ = self.eval_out() # evaluate the ratings and the associated probabilities vp = self.sess.run(v_, feed_dict={self.vu: x}) # stop the timer self.timer.stop() log.info(""Done inference, time %f2"" % self.timer.interval) return vp ``` ###Assistant : Returns the inferred ratings. This method is similar to recommend_k_items() with the exceptions that it returns all the inferred ratings Basic mechanics: The method samples new ratings from the learned joint distribution, together with their probabilities. The input x must have the same number of columns as the one used for training the model, i.e. the same number of items, but it can have an arbitrary number of rows (users). Args: x (numpy.ndarray, int32): Input user/affinity matrix. Note that this can be a single vector, i.e. the ratings of a single user. Returns: numpy.ndarray, float: - A matrix with the inferred ratings. - The elapsed time for predediction. " 6,"def raw_decode(self, s, idx=0): try: obj, end = self.scan_once(s, idx) except StopIteration as err: raise JSONDecodeError(""Expecting value"", s, err.value) from None return obj, end ","Decode a JSON document from ``s`` (a ``str`` beginning with a JSON document) and return a 2-tuple of the Python representation and the index in ``s`` where the document ended. This can be used to decode a JSON document from a string that may have extraneous data at the end. ",50,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def raw_decode(self, s, idx=0): try: obj, end = self.scan_once(s, idx) except StopIteration as err: raise JSONDecodeError(""Expecting value"", s, err.value) from None return obj, end ``` ###Assistant : Decode a JSON document from ``s`` (a ``str`` beginning with a JSON document) and return a 2-tuple of the Python representation and the index in ``s`` where the document ended. This can be used to decode a JSON document from a string that may have extraneous data at the end. " 7,"def has_bridges(G, root=None): try: next(bridges(G)) except StopIteration: return False else: return True @not_implemented_for(""multigraph"") @not_implemented_for(""directed"")","Decide whether a graph has any bridges. A *bridge* in a graph is an edge whose removal causes the number of connected components of the graph to increase. Parameters ---------- G : undirected graph root : node (optional) A node in the graph `G`. If specified, only the bridges in the connected component containing this node will be considered. Returns ------- bool Whether the graph (or the connected component containing `root`) has any bridges. Raises ------ NodeNotFound If `root` is not in the graph `G`. NetworkXNotImplemented If `G` is a directed graph. Examples -------- The barbell graph with parameter zero has a single bridge:: >>> G = nx.barbell_graph(10, 0) >>> nx.has_bridges(G) True On the other hand, the cycle graph has no bridges:: >>> G = nx.cycle_graph(5) >>> nx.has_bridges(G) False Notes ----- This implementation uses the :func:`networkx.bridges` function, so it shares its worst-case time complexity, $O(m + n)$, ignoring polylogarithmic factors, where $n$ is the number of nodes in the graph and $m$ is the number of edges. ",167,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def has_bridges(G, root=None): try: next(bridges(G)) except StopIteration: return False else: return True @not_implemented_for(""multigraph"") @not_implemented_for(""directed"") ``` ###Assistant : Decide whether a graph has any bridges. A *bridge* in a graph is an edge whose removal causes the number of connected components of the graph to increase. Parameters ---------- G : undirected graph root : node (optional) A node in the graph `G`. If specified, only the bridges in the connected component containing this node will be considered. Returns ------- bool Whether the graph (or the connected component containing `root`) has any bridges. Raises ------ NodeNotFound If `root` is not in the graph `G`. NetworkXNotImplemented If `G` is a directed graph. Examples -------- The barbell graph with parameter zero has a single bridge:: >>> G = nx.barbell_graph(10, 0) >>> nx.has_bridges(G) True On the other hand, the cycle graph has no bridges:: >>> G = nx.cycle_graph(5) >>> nx.has_bridges(G) False Notes ----- This implementation uses the :func:`networkx.bridges` function, so it shares its worst-case time complexity, $O(m + n)$, ignoring polylogarithmic factors, where $n$ is the number of nodes in the graph and $m$ is the number of edges. " 8,"def wheel_metadata(source, dist_info_dir): # type: (ZipFile, str) -> Message path = f""{dist_info_dir}/WHEEL"" # Zip file path separators must be / wheel_contents = read_wheel_metadata_file(source, path) try: wheel_text = wheel_contents.decode() except UnicodeDecodeError as e: raise UnsupportedWheel(f""error decoding {path!r}: {e!r}"") # FeedParser (used by Parser) does not raise any exceptions. The returned # message may have .defects populated, but for backwards-compatibility we # currently ignore them. return Parser().parsestr(wheel_text) ","Return the WHEEL metadata of an extracted wheel, if possible. Otherwise, raise UnsupportedWheel. ",13,65,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def wheel_metadata(source, dist_info_dir): # type: (ZipFile, str) -> Message path = f""{dist_info_dir}/WHEEL"" # Zip file path separators must be / wheel_contents = read_wheel_metadata_file(source, path) try: wheel_text = wheel_contents.decode() except UnicodeDecodeError as e: raise UnsupportedWheel(f""error decoding {path!r}: {e!r}"") # FeedParser (used by Parser) does not raise any exceptions. The returned # message may have .defects populated, but for backwards-compatibility we # currently ignore them. return Parser().parsestr(wheel_text) ``` ###Assistant : Return the WHEEL metadata of an extracted wheel, if possible. Otherwise, raise UnsupportedWheel. " 9,"def remove_column(self, i, *args, **kwargs): table = self.table.remove_column(i, *args, **kwargs) name = self.table.column_names[i] blocks = [] for tables in self.blocks: blocks.append( [ t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t for t in tables ] ) return ConcatenationTable(table, blocks) "," Create new Table with the indicated column removed. Args: i (:obj:`int`): Index of column to remove. Returns: :class:`datasets.table.Table`: New table without the column. ",23,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def remove_column(self, i, *args, **kwargs): table = self.table.remove_column(i, *args, **kwargs) name = self.table.column_names[i] blocks = [] for tables in self.blocks: blocks.append( [ t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t for t in tables ] ) return ConcatenationTable(table, blocks) ``` ###Assistant : Create new Table with the indicated column removed. Args: i (:obj:`int`): Index of column to remove. Returns: :class:`datasets.table.Table`: New table without the column. " 10,"def test_cable_cannot_terminate_to_a_wireless_interface(self): wireless_interface = Interface(device=self.device1, name=""W1"", type=InterfaceTypeChoices.TYPE_80211A) cable = Cable(a_terminations=[self.interface2], b_terminations=[wireless_interface]) with self.assertRaises(ValidationError): cable.clean() "," A cable cannot terminate to a wireless interface ",8,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_cable_cannot_terminate_to_a_wireless_interface(self): wireless_interface = Interface(device=self.device1, name=""W1"", type=InterfaceTypeChoices.TYPE_80211A) cable = Cable(a_terminations=[self.interface2], b_terminations=[wireless_interface]) with self.assertRaises(ValidationError): cable.clean() ``` ###Assistant : A cable cannot terminate to a wireless interface " 11,"def get_test_db_clone_settings(self, suffix): # When this function is called, the test database has been created # already and its name has been copied to settings_dict['NAME'] so # we don't need to call _get_test_db_name. orig_settings_dict = self.connection.settings_dict return { **orig_settings_dict, ""NAME"": ""{}_{}"".format(orig_settings_dict[""NAME""], suffix), } "," Return a modified connection settings dict for the n-th clone of a DB. ",13,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_test_db_clone_settings(self, suffix): # When this function is called, the test database has been created # already and its name has been copied to settings_dict['NAME'] so # we don't need to call _get_test_db_name. orig_settings_dict = self.connection.settings_dict return { **orig_settings_dict, ""NAME"": ""{}_{}"".format(orig_settings_dict[""NAME""], suffix), } ``` ###Assistant : Return a modified connection settings dict for the n-th clone of a DB. " 12,"def open(self, host='', port=IMAP4_PORT, timeout=None): self.host = host self.port = port self.sock = self._create_socket(timeout) self.file = self.sock.makefile('rb') ","Setup connection to remote server on ""host:port"" (default: localhost:standard IMAP4 port). This connection will be used by the routines: read, readline, send, shutdown. ",23,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def open(self, host='', port=IMAP4_PORT, timeout=None): self.host = host self.port = port self.sock = self._create_socket(timeout) self.file = self.sock.makefile('rb') ``` ###Assistant : Setup connection to remote server on ""host:port"" (default: localhost:standard IMAP4 port). This connection will be used by the routines: read, readline, send, shutdown. " 13,"def synchronized_output_end_sequence(self) -> str: if self.synchronised_output: return TERMINAL_MODES_ANSI_SEQUENCES[Mode.SynchronizedOutput][""end_sync""] return """" "," Returns the ANSI sequence that we should send to the terminal to tell it that it should stop buffering the content we're about to send. If the terminal doesn't seem to support synchronised updates the string will be empty. Returns: str: the ""synchronised output stop"" ANSI sequence. It will be ab empty string if the terminal emulator doesn't seem to support the ""synchronised updates"" mode. ",65,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def synchronized_output_end_sequence(self) -> str: if self.synchronised_output: return TERMINAL_MODES_ANSI_SEQUENCES[Mode.SynchronizedOutput][""end_sync""] return """" ``` ###Assistant : Returns the ANSI sequence that we should send to the terminal to tell it that it should stop buffering the content we're about to send. If the terminal doesn't seem to support synchronised updates the string will be empty. Returns: str: the ""synchronised output stop"" ANSI sequence. It will be ab empty string if the terminal emulator doesn't seem to support the ""synchronised updates"" mode. " 14,"def _band_penalty_coefficients(self, fc, q, gain, filter_frs): ref_frs = biquad.digital_coeffs(self.frequency, 192e3, *biquad.peaking(fc, q, gain, fs=192e3)) est_sums = np.sum(filter_frs, axis=1) ref_sums = np.sum(ref_frs, axis=1) penalties = np.zeros((len(fc),)) mask = np.squeeze(ref_sums) != 0.0 penalties[mask] = est_sums[mask] / ref_sums[mask] return 10 * (1 - np.expand_dims(penalties, 1)) ","Calculates penalty coefficients for filters if their transition bands extend beyond Nyquist frequency The calculation is based on ratio of frequency response integrals between 44.1 kHz and 192 kHz Args: fc: Filter center frequencies, 1-D array q: Filter qualities, 1-D array gain: Filter gains, 1-D array filter_frs: Filter frequency responses, 2-D array, one fr per row Returns: Column array of penalty coefficients, one per filter ",65,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _band_penalty_coefficients(self, fc, q, gain, filter_frs): ref_frs = biquad.digital_coeffs(self.frequency, 192e3, *biquad.peaking(fc, q, gain, fs=192e3)) est_sums = np.sum(filter_frs, axis=1) ref_sums = np.sum(ref_frs, axis=1) penalties = np.zeros((len(fc),)) mask = np.squeeze(ref_sums) != 0.0 penalties[mask] = est_sums[mask] / ref_sums[mask] return 10 * (1 - np.expand_dims(penalties, 1)) ``` ###Assistant : Calculates penalty coefficients for filters if their transition bands extend beyond Nyquist frequency The calculation is based on ratio of frequency response integrals between 44.1 kHz and 192 kHz Args: fc: Filter center frequencies, 1-D array q: Filter qualities, 1-D array gain: Filter gains, 1-D array filter_frs: Filter frequency responses, 2-D array, one fr per row Returns: Column array of penalty coefficients, one per filter " 15,"def test_predict_on_toy_problem(global_random_seed): clf1 = LogisticRegression(random_state=global_random_seed) clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed) clf3 = GaussianNB() X = np.array( [[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2], [2.1, 1.4], [3.1, 2.3]] ) y = np.array([1, 1, 1, 2, 2, 2]) assert_array_equal(clf1.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) assert_array_equal(clf2.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) assert_array_equal(clf3.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) eclf = VotingClassifier( estimators=[(""lr"", clf1), (""rf"", clf2), (""gnb"", clf3)], voting=""hard"", weights=[1, 1, 1], ) assert_array_equal(eclf.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) eclf = VotingClassifier( estimators=[(""lr"", clf1), (""rf"", clf2), (""gnb"", clf3)], voting=""soft"", weights=[1, 1, 1], ) assert_array_equal(eclf.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) ",Manually check predicted class labels for toy dataset.,8,104,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_predict_on_toy_problem(global_random_seed): clf1 = LogisticRegression(random_state=global_random_seed) clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed) clf3 = GaussianNB() X = np.array( [[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2], [2.1, 1.4], [3.1, 2.3]] ) y = np.array([1, 1, 1, 2, 2, 2]) assert_array_equal(clf1.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) assert_array_equal(clf2.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) assert_array_equal(clf3.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) eclf = VotingClassifier( estimators=[(""lr"", clf1), (""rf"", clf2), (""gnb"", clf3)], voting=""hard"", weights=[1, 1, 1], ) assert_array_equal(eclf.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) eclf = VotingClassifier( estimators=[(""lr"", clf1), (""rf"", clf2), (""gnb"", clf3)], voting=""soft"", weights=[1, 1, 1], ) assert_array_equal(eclf.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) ``` ###Assistant : Manually check predicted class labels for toy dataset. " 16,"def fit_transform(self, X, y=None): self._validate_params() return self._transform(X, fitting=True) ","Learn a list of feature name -> indices mappings and transform X. Like fit(X) followed by transform(X), but does not require materializing X in memory. Parameters ---------- X : Mapping or iterable over Mappings Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). .. versionchanged:: 0.24 Accepts multiple string values for one categorical feature. y : (ignored) Ignored parameter. Returns ------- Xa : {array, sparse matrix} Feature vectors; always 2-d. ",78,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fit_transform(self, X, y=None): self._validate_params() return self._transform(X, fitting=True) ``` ###Assistant : Learn a list of feature name -> indices mappings and transform X. Like fit(X) followed by transform(X), but does not require materializing X in memory. Parameters ---------- X : Mapping or iterable over Mappings Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). .. versionchanged:: 0.24 Accepts multiple string values for one categorical feature. y : (ignored) Ignored parameter. Returns ------- Xa : {array, sparse matrix} Feature vectors; always 2-d. " 17,"def _on_feature_permission_requested(self, url, feature): page = self._widget.page() grant_permission = functools.partial( page.setFeaturePermission, url, feature, QWebEnginePage.PermissionPolicy.PermissionGrantedByUser) deny_permission = functools.partial( page.setFeaturePermission, url, feature, QWebEnginePage.PermissionPolicy.PermissionDeniedByUser) permission_str = debug.qenum_key(QWebEnginePage, feature) if not url.isValid(): # WORKAROUND for https://bugreports.qt.io/browse/QTBUG-85116 is_qtbug = (qtutils.version_check('5.15.0', compiled=False, exact=True) and self._tab.is_private and feature == QWebEnginePage.Feature.Notifications) logger = log.webview.debug if is_qtbug else log.webview.warning logger(""Ignoring feature permission {} for invalid URL {}"".format( permission_str, url)) deny_permission() return if feature not in self._options: log.webview.error(""Unhandled feature permission {}"".format( permission_str)) deny_permission() return if ( feature in [QWebEnginePage.Feature.DesktopVideoCapture, QWebEnginePage.Feature.DesktopAudioVideoCapture] and qtutils.version_check('5.13', compiled=False) and not qtutils.version_check('5.13.2', compiled=False) ): # WORKAROUND for https://bugreports.qt.io/browse/QTBUG-78016 log.webview.warning(""Ignoring desktop sharing request due to "" ""crashes in Qt < 5.13.2"") deny_permission() return question = shared.feature_permission( url=url.adjusted(QUrl.UrlFormattingOption.RemovePath), option=self._options[feature], msg=self._messages[feature], yes_action=grant_permission, no_action=deny_permission, abort_on=[self._tab.abort_questions]) if question is not None: page.featurePermissionRequestCanceled.connect( functools.partial(self._on_feature_permission_cancelled, question, url, feature)) ",Ask the user for approval for geolocation/media/etc..,7,125,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _on_feature_permission_requested(self, url, feature): page = self._widget.page() grant_permission = functools.partial( page.setFeaturePermission, url, feature, QWebEnginePage.PermissionPolicy.PermissionGrantedByUser) deny_permission = functools.partial( page.setFeaturePermission, url, feature, QWebEnginePage.PermissionPolicy.PermissionDeniedByUser) permission_str = debug.qenum_key(QWebEnginePage, feature) if not url.isValid(): # WORKAROUND for https://bugreports.qt.io/browse/QTBUG-85116 is_qtbug = (qtutils.version_check('5.15.0', compiled=False, exact=True) and self._tab.is_private and feature == QWebEnginePage.Feature.Notifications) logger = log.webview.debug if is_qtbug else log.webview.warning logger(""Ignoring feature permission {} for invalid URL {}"".format( permission_str, url)) deny_permission() return if feature not in self._options: log.webview.error(""Unhandled feature permission {}"".format( permission_str)) deny_permission() return if ( feature in [QWebEnginePage.Feature.DesktopVideoCapture, QWebEnginePage.Feature.DesktopAudioVideoCapture] and qtutils.version_check('5.13', compiled=False) and not qtutils.version_check('5.13.2', compiled=False) ): # WORKAROUND for https://bugreports.qt.io/browse/QTBUG-78016 log.webview.warning(""Ignoring desktop sharing request due to "" ""crashes in Qt < 5.13.2"") deny_permission() return question = shared.feature_permission( url=url.adjusted(QUrl.UrlFormattingOption.RemovePath), option=self._options[feature], msg=self._messages[feature], yes_action=grant_permission, no_action=deny_permission, abort_on=[self._tab.abort_questions]) if question is not None: page.featurePermissionRequestCanceled.connect( functools.partial(self._on_feature_permission_cancelled, question, url, feature)) ``` ###Assistant : Ask the user for approval for geolocation/media/etc.. " 18,"def add_find_python(self): start = 402 for ver in self.versions: install_path = r""SOFTWARE\Python\PythonCore\%s\InstallPath"" % ver machine_reg = ""python.machine."" + ver user_reg = ""python.user."" + ver machine_prop = ""PYTHON.MACHINE."" + ver user_prop = ""PYTHON.USER."" + ver machine_action = ""PythonFromMachine"" + ver user_action = ""PythonFromUser"" + ver exe_action = ""PythonExe"" + ver target_dir_prop = ""TARGETDIR"" + ver exe_prop = ""PYTHON"" + ver if msilib.Win64: # type: msidbLocatorTypeRawValue + msidbLocatorType64bit Type = 2+16 else: Type = 2 add_data(self.db, ""RegLocator"", [(machine_reg, 2, install_path, None, Type), (user_reg, 1, install_path, None, Type)]) add_data(self.db, ""AppSearch"", [(machine_prop, machine_reg), (user_prop, user_reg)]) add_data(self.db, ""CustomAction"", [(machine_action, 51+256, target_dir_prop, ""["" + machine_prop + ""]""), (user_action, 51+256, target_dir_prop, ""["" + user_prop + ""]""), (exe_action, 51+256, exe_prop, ""["" + target_dir_prop + ""]\\python.exe""), ]) add_data(self.db, ""InstallExecuteSequence"", [(machine_action, machine_prop, start), (user_action, user_prop, start + 1), (exe_action, None, start + 2), ]) add_data(self.db, ""InstallUISequence"", [(machine_action, machine_prop, start), (user_action, user_prop, start + 1), (exe_action, None, start + 2), ]) add_data(self.db, ""Condition"", [(""Python"" + ver, 0, ""NOT TARGETDIR"" + ver)]) start += 4 assert start < 500 ","Adds code to the installer to compute the location of Python. Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the registry for each version of Python. Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined, else from PYTHON.MACHINE.X.Y. Properties PYTHONX.Y will be set to TARGETDIRX.Y\\python.exe",45,167,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add_find_python(self): start = 402 for ver in self.versions: install_path = r""SOFTWARE\Python\PythonCore\%s\InstallPath"" % ver machine_reg = ""python.machine."" + ver user_reg = ""python.user."" + ver machine_prop = ""PYTHON.MACHINE."" + ver user_prop = ""PYTHON.USER."" + ver machine_action = ""PythonFromMachine"" + ver user_action = ""PythonFromUser"" + ver exe_action = ""PythonExe"" + ver target_dir_prop = ""TARGETDIR"" + ver exe_prop = ""PYTHON"" + ver if msilib.Win64: # type: msidbLocatorTypeRawValue + msidbLocatorType64bit Type = 2+16 else: Type = 2 add_data(self.db, ""RegLocator"", [(machine_reg, 2, install_path, None, Type), (user_reg, 1, install_path, None, Type)]) add_data(self.db, ""AppSearch"", [(machine_prop, machine_reg), (user_prop, user_reg)]) add_data(self.db, ""CustomAction"", [(machine_action, 51+256, target_dir_prop, ""["" + machine_prop + ""]""), (user_action, 51+256, target_dir_prop, ""["" + user_prop + ""]""), (exe_action, 51+256, exe_prop, ""["" + target_dir_prop + ""]\\python.exe""), ]) add_data(self.db, ""InstallExecuteSequence"", [(machine_action, machine_prop, start), (user_action, user_prop, start + 1), (exe_action, None, start + 2), ]) add_data(self.db, ""InstallUISequence"", [(machine_action, machine_prop, start), (user_action, user_prop, start + 1), (exe_action, None, start + 2), ]) add_data(self.db, ""Condition"", [(""Python"" + ver, 0, ""NOT TARGETDIR"" + ver)]) start += 4 assert start < 500 ``` ###Assistant : Adds code to the installer to compute the location of Python. Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the registry for each version of Python. Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined, else from PYTHON.MACHINE.X.Y. Properties PYTHONX.Y will be set to TARGETDIRX.Y\\python.exe " 19,"def write_exports(self, exports): rf = self.get_distinfo_file(EXPORTS_FILENAME) with open(rf, 'w') as f: write_exports(exports, f) "," Write a dictionary of exports to a file in .ini format. :param exports: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries. ",32,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def write_exports(self, exports): rf = self.get_distinfo_file(EXPORTS_FILENAME) with open(rf, 'w') as f: write_exports(exports, f) ``` ###Assistant : Write a dictionary of exports to a file in .ini format. :param exports: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries. " 20,"def _get_action_handler_with_module_context(self, connection, templar): module_collection, separator, module_name = self._task.action.rpartition(""."") module_prefix = module_name.split('_')[0] if module_collection: # For network modules, which look for one action plugin per platform, look for the # action plugin in the same collection as the module by prefixing the action plugin # with the same collection. network_action = ""{0}.{1}"".format(module_collection, module_prefix) else: network_action = module_prefix collections = self._task.collections # Check if the module has specified an action handler module = self._shared_loader_obj.module_loader.find_plugin_with_context( self._task.action, collection_list=collections ) if not module.resolved or not module.action_plugin: module = None if module is not None: handler_name = module.action_plugin # let action plugin override module, fallback to 'normal' action plugin otherwise elif self._shared_loader_obj.action_loader.has_plugin(self._task.action, collection_list=collections): handler_name = self._task.action elif all((module_prefix in C.NETWORK_GROUP_MODULES, self._shared_loader_obj.action_loader.has_plugin(network_action, collection_list=collections))): handler_name = network_action display.vvvv(""Using network group action {handler} for {action}"".format(handler=handler_name, action=self._task.action), host=self._play_context.remote_addr) else: # use ansible.legacy.normal to allow (historic) local action_plugins/ override without collections search handler_name = 'ansible.legacy.normal' collections = None # until then, we don't want the task's collection list to be consulted; use the builtin handler = self._shared_loader_obj.action_loader.get( handler_name, task=self._task, connection=connection, play_context=self._play_context, loader=self._loader, templar=templar, shared_loader_obj=self._shared_loader_obj, collection_list=collections ) if not handler: raise AnsibleError(""the handler '%s' was not found"" % handler_name) return handler, module "," Returns the correct action plugin to handle the requestion task action and the module context ",15,191,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_action_handler_with_module_context(self, connection, templar): module_collection, separator, module_name = self._task.action.rpartition(""."") module_prefix = module_name.split('_')[0] if module_collection: # For network modules, which look for one action plugin per platform, look for the # action plugin in the same collection as the module by prefixing the action plugin # with the same collection. network_action = ""{0}.{1}"".format(module_collection, module_prefix) else: network_action = module_prefix collections = self._task.collections # Check if the module has specified an action handler module = self._shared_loader_obj.module_loader.find_plugin_with_context( self._task.action, collection_list=collections ) if not module.resolved or not module.action_plugin: module = None if module is not None: handler_name = module.action_plugin # let action plugin override module, fallback to 'normal' action plugin otherwise elif self._shared_loader_obj.action_loader.has_plugin(self._task.action, collection_list=collections): handler_name = self._task.action elif all((module_prefix in C.NETWORK_GROUP_MODULES, self._shared_loader_obj.action_loader.has_plugin(network_action, collection_list=collections))): handler_name = network_action display.vvvv(""Using network group action {handler} for {action}"".format(handler=handler_name, action=self._task.action), host=self._play_context.remote_addr) else: # use ansible.legacy.normal to allow (historic) local action_plugins/ override without collections search handler_name = 'ansible.legacy.normal' collections = None # until then, we don't want the task's collection list to be consulted; use the builtin handler = self._shared_loader_obj.action_loader.get( handler_name, task=self._task, connection=connection, play_context=self._play_context, loader=self._loader, templar=templar, shared_loader_obj=self._shared_loader_obj, collection_list=collections ) if not handler: raise AnsibleError(""the handler '%s' was not found"" % handler_name) return handler, module ``` ###Assistant : Returns the correct action plugin to handle the requestion task action and the module context " 21,"def forward(self, y_hat, y, length): mask = sequence_mask(sequence_length=length, max_len=y.size(1)).unsqueeze(2) y_norm = sample_wise_min_max(y, mask) y_hat_norm = sample_wise_min_max(y_hat, mask) ssim_loss = self.loss_func((y_norm * mask).unsqueeze(1), (y_hat_norm * mask).unsqueeze(1)) if ssim_loss.item() > 1.0: print(f"" > SSIM loss is out-of-range {ssim_loss.item()}, setting it 1.0"") ssim_loss == 1.0 if ssim_loss.item() < 0.0: print(f"" > SSIM loss is out-of-range {ssim_loss.item()}, setting it 0.0"") ssim_loss == 0.0 return ssim_loss "," Args: y_hat (tensor): model prediction values. y (tensor): target values. length (tensor): length of each sample in a batch for masking. Shapes: y_hat: B x T X D y: B x T x D length: B Returns: loss: An average loss value in range [0, 1] masked by the length. ",50,61,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def forward(self, y_hat, y, length): mask = sequence_mask(sequence_length=length, max_len=y.size(1)).unsqueeze(2) y_norm = sample_wise_min_max(y, mask) y_hat_norm = sample_wise_min_max(y_hat, mask) ssim_loss = self.loss_func((y_norm * mask).unsqueeze(1), (y_hat_norm * mask).unsqueeze(1)) if ssim_loss.item() > 1.0: print(f"" > SSIM loss is out-of-range {ssim_loss.item()}, setting it 1.0"") ssim_loss == 1.0 if ssim_loss.item() < 0.0: print(f"" > SSIM loss is out-of-range {ssim_loss.item()}, setting it 0.0"") ssim_loss == 0.0 return ssim_loss ``` ###Assistant : Args: y_hat (tensor): model prediction values. y (tensor): target values. length (tensor): length of each sample in a batch for masking. Shapes: y_hat: B x T X D y: B x T x D length: B Returns: loss: An average loss value in range [0, 1] masked by the length. " 22,"def get_commands(): commands = {name: 'django.core' for name in find_commands(__path__[0])} if not settings.configured: return commands for app_config in reversed(apps.get_app_configs()): path = os.path.join(app_config.path, 'management') commands.update({name: app_config.name for name in find_commands(path)}) return commands "," Return a dictionary mapping command names to their callback applications. Look for a management.commands package in django.core, and in each installed application -- if a commands package exists, register all commands in that package. Core commands are always included. If a settings module has been specified, also include user-defined commands. The dictionary is in the format {command_name: app_name}. Key-value pairs from this dictionary can then be used in calls to load_command_class(app_name, command_name) If a specific version of a command must be loaded (e.g., with the startapp command), the instantiated module can be placed in the dictionary in place of the application name. The dictionary is cached on the first call and reused on subsequent calls. ",115,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_commands(): commands = {name: 'django.core' for name in find_commands(__path__[0])} if not settings.configured: return commands for app_config in reversed(apps.get_app_configs()): path = os.path.join(app_config.path, 'management') commands.update({name: app_config.name for name in find_commands(path)}) return commands ``` ###Assistant : Return a dictionary mapping command names to their callback applications. Look for a management.commands package in django.core, and in each installed application -- if a commands package exists, register all commands in that package. Core commands are always included. If a settings module has been specified, also include user-defined commands. The dictionary is in the format {command_name: app_name}. Key-value pairs from this dictionary can then be used in calls to load_command_class(app_name, command_name) If a specific version of a command must be loaded (e.g., with the startapp command), the instantiated module can be placed in the dictionary in place of the application name. The dictionary is cached on the first call and reused on subsequent calls. " 23,"def getphraselist(self): plist = [] while self.pos < len(self.field): if self.field[self.pos] in self.FWS: self.pos += 1 elif self.field[self.pos] == '""': plist.append(self.getquote()) elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) elif self.field[self.pos] in self.phraseends: break else: plist.append(self.getatom(self.phraseends)) return plist ","Parse a sequence of RFC 2822 phrases. A phrase is a sequence of words, which are in turn either RFC 2822 atoms or quoted-strings. Phrases are canonicalized by squeezing all runs of continuous whitespace into one space. ",37,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def getphraselist(self): plist = [] while self.pos < len(self.field): if self.field[self.pos] in self.FWS: self.pos += 1 elif self.field[self.pos] == '""': plist.append(self.getquote()) elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) elif self.field[self.pos] in self.phraseends: break else: plist.append(self.getatom(self.phraseends)) return plist ``` ###Assistant : Parse a sequence of RFC 2822 phrases. A phrase is a sequence of words, which are in turn either RFC 2822 atoms or quoted-strings. Phrases are canonicalized by squeezing all runs of continuous whitespace into one space. " 24,"def set_location(self, location): # This puts the rectangle into figure-relative coordinates. if isinstance(location, str): _api.check_in_list(self._locstrings, location=location) self._pos = 1. if location in ('top', 'right') else 0. elif isinstance(location, numbers.Real): self._pos = location else: raise ValueError( f""location must be {self._locstrings[0]!r}, "" f""{self._locstrings[1]!r}, or a float, not {location!r}"") self._loc = location if self._orientation == 'x': # An x-secondary axes is like an inset axes from x = 0 to x = 1 and # from y = pos to y = pos + eps, in the parent's transAxes coords. bounds = [0, self._pos, 1., 1e-10] else: # 'y' bounds = [self._pos, 0, 1e-10, 1] # this locator lets the axes move in the parent axes coordinates. # so it never needs to know where the parent is explicitly in # figure coordinates. # it gets called in ax.apply_aspect() (of all places) self.set_axes_locator( _TransformedBoundsLocator(bounds, self._parent.transAxes)) "," Set the vertical or horizontal location of the axes in parent-normalized coordinates. Parameters ---------- location : {'top', 'bottom', 'left', 'right'} or float The position to put the secondary axis. Strings can be 'top' or 'bottom' for orientation='x' and 'right' or 'left' for orientation='y'. A float indicates the relative position on the parent axes to put the new axes, 0.0 being the bottom (or left) and 1.0 being the top (or right). ",71,142,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_location(self, location): # This puts the rectangle into figure-relative coordinates. if isinstance(location, str): _api.check_in_list(self._locstrings, location=location) self._pos = 1. if location in ('top', 'right') else 0. elif isinstance(location, numbers.Real): self._pos = location else: raise ValueError( f""location must be {self._locstrings[0]!r}, "" f""{self._locstrings[1]!r}, or a float, not {location!r}"") self._loc = location if self._orientation == 'x': # An x-secondary axes is like an inset axes from x = 0 to x = 1 and # from y = pos to y = pos + eps, in the parent's transAxes coords. bounds = [0, self._pos, 1., 1e-10] else: # 'y' bounds = [self._pos, 0, 1e-10, 1] # this locator lets the axes move in the parent axes coordinates. # so it never needs to know where the parent is explicitly in # figure coordinates. # it gets called in ax.apply_aspect() (of all places) self.set_axes_locator( _TransformedBoundsLocator(bounds, self._parent.transAxes)) ``` ###Assistant : Set the vertical or horizontal location of the axes in parent-normalized coordinates. Parameters ---------- location : {'top', 'bottom', 'left', 'right'} or float The position to put the secondary axis. Strings can be 'top' or 'bottom' for orientation='x' and 'right' or 'left' for orientation='y'. A float indicates the relative position on the parent axes to put the new axes, 0.0 being the bottom (or left) and 1.0 being the top (or right). " 25,"def length(self): if self._length_cache is None: if len(self.call_queue): self.drain_call_queue() else: self._length_cache, self._width_cache = _get_index_and_columns.remote( self.oid ) if isinstance(self._length_cache, ObjectIDType): self._length_cache = ray.get(self._length_cache) return self._length_cache "," Get the length of the object wrapped by this partition. Returns ------- int The length of the object. ",18,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def length(self): if self._length_cache is None: if len(self.call_queue): self.drain_call_queue() else: self._length_cache, self._width_cache = _get_index_and_columns.remote( self.oid ) if isinstance(self._length_cache, ObjectIDType): self._length_cache = ray.get(self._length_cache) return self._length_cache ``` ###Assistant : Get the length of the object wrapped by this partition. Returns ------- int The length of the object. " 26,"def dmp_l2_norm_squared(f, u, K): if not u: return dup_l2_norm_squared(f, K) v = u - 1 return sum([ dmp_l2_norm_squared(c, v, K) for c in f ]) "," Returns squared l2 norm of a polynomial in ``K[X]``. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y = ring(""x,y"", ZZ) >>> R.dmp_l2_norm_squared(2*x*y - x - 3) 14 ",30,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dmp_l2_norm_squared(f, u, K): if not u: return dup_l2_norm_squared(f, K) v = u - 1 return sum([ dmp_l2_norm_squared(c, v, K) for c in f ]) ``` ###Assistant : Returns squared l2 norm of a polynomial in ``K[X]``. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y = ring(""x,y"", ZZ) >>> R.dmp_l2_norm_squared(2*x*y - x - 3) 14 " 27,"def cloud_filter(args, targets): # type: (IntegrationConfig, t.Tuple[IntegrationTarget, ...]) -> t.List[str] if args.metadata.cloud_config is not None: return [] # cloud filter already performed prior to delegation exclude = [] # type: t.List[str] for provider in get_cloud_providers(args, targets): provider.filter(targets, exclude) return exclude ",Return a list of target names to exclude based on the given targets.,13,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cloud_filter(args, targets): # type: (IntegrationConfig, t.Tuple[IntegrationTarget, ...]) -> t.List[str] if args.metadata.cloud_config is not None: return [] # cloud filter already performed prior to delegation exclude = [] # type: t.List[str] for provider in get_cloud_providers(args, targets): provider.filter(targets, exclude) return exclude ``` ###Assistant : Return a list of target names to exclude based on the given targets. " 28,"def test_upgrade_available_none(): chk_upgrade_out = ( ""Last metadata expiration check: 22:5:48 ago on Mon Dec 6 19:26:36 EST 2021."" ) dnf_call = MagicMock(return_value={""retcode"": 100, ""stdout"": chk_upgrade_out}) version_mock = MagicMock(return_value=""6.6-2"") with patch(""pathlib.Path.is_file"", return_value=True): with patch.dict( aixpkg.__salt__, {""cmd.run_all"": dnf_call, ""config.get"": MagicMock(return_value=False)}, ), patch.object(aixpkg, ""version"", version_mock): result = aixpkg.upgrade_available(""info"") assert dnf_call.call_count == 1 libpath_env = {""LIBPATH"": ""/opt/freeware/lib:/usr/lib""} dnf_call.assert_any_call( ""/opt/freeware/bin/dnf check-update info"", env=libpath_env, ignore_retcode=True, python_shell=False, ) assert not result "," test upgrade available where a valid upgrade is not available ",10,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_upgrade_available_none(): chk_upgrade_out = ( ""Last metadata expiration check: 22:5:48 ago on Mon Dec 6 19:26:36 EST 2021."" ) dnf_call = MagicMock(return_value={""retcode"": 100, ""stdout"": chk_upgrade_out}) version_mock = MagicMock(return_value=""6.6-2"") with patch(""pathlib.Path.is_file"", return_value=True): with patch.dict( aixpkg.__salt__, {""cmd.run_all"": dnf_call, ""config.get"": MagicMock(return_value=False)}, ), patch.object(aixpkg, ""version"", version_mock): result = aixpkg.upgrade_available(""info"") assert dnf_call.call_count == 1 libpath_env = {""LIBPATH"": ""/opt/freeware/lib:/usr/lib""} dnf_call.assert_any_call( ""/opt/freeware/bin/dnf check-update info"", env=libpath_env, ignore_retcode=True, python_shell=False, ) assert not result ``` ###Assistant : test upgrade available where a valid upgrade is not available " 29,"def test_too_many_boosted_releases_do_not_boost_anymore(self): release_2 = Release.get_or_create(self.project, ""2.0"") release_3 = Release.get_or_create(self.project, ""3.0"") for release_id in (self.release.id, release_2.id): self.redis_client.set(f""ds::p:{self.project.id}:r:{release_id}"", 1, 60 * 60 * 24) self.redis_client.hset( f""ds::p:{self.project.id}:boosted_releases"", release_id, time(), ) with self.options( { ""dynamic-sampling:boost-latest-release"": True, } ): self.make_release_transaction( release_version=release_3.version, environment_name=self.environment1.name, project_id=self.project.id, checksum=""b"" * 32, timestamp=self.timestamp, ) assert self.redis_client.hgetall(f""ds::p:{self.project.id}:boosted_releases"") == { str(self.release.id): str(time()), str(release_2.id): str(time()), } assert self.redis_client.get(f""ds::p:{self.project.id}:r:{release_3.id}"") is None "," This test tests the case when we have already too many boosted releases, in this case we want to skip the boosting of anymore releases ",25,56,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_too_many_boosted_releases_do_not_boost_anymore(self): release_2 = Release.get_or_create(self.project, ""2.0"") release_3 = Release.get_or_create(self.project, ""3.0"") for release_id in (self.release.id, release_2.id): self.redis_client.set(f""ds::p:{self.project.id}:r:{release_id}"", 1, 60 * 60 * 24) self.redis_client.hset( f""ds::p:{self.project.id}:boosted_releases"", release_id, time(), ) with self.options( { ""dynamic-sampling:boost-latest-release"": True, } ): self.make_release_transaction( release_version=release_3.version, environment_name=self.environment1.name, project_id=self.project.id, checksum=""b"" * 32, timestamp=self.timestamp, ) assert self.redis_client.hgetall(f""ds::p:{self.project.id}:boosted_releases"") == { str(self.release.id): str(time()), str(release_2.id): str(time()), } assert self.redis_client.get(f""ds::p:{self.project.id}:r:{release_3.id}"") is None ``` ###Assistant : This test tests the case when we have already too many boosted releases, in this case we want to skip the boosting of anymore releases " 30,"def hits(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True): import numpy as np import scipy as sp import scipy.sparse.linalg # call as sp.sparse.linalg if len(G) == 0: return {}, {} A = nx.adjacency_matrix(G, nodelist=list(G), dtype=float) if nstart is None: u, s, vt = sp.sparse.linalg.svds(A, k=1, maxiter=max_iter, tol=tol) else: nstart = np.array(list(nstart.values())) u, s, vt = sp.sparse.linalg.svds(A, k=1, v0=nstart, maxiter=max_iter, tol=tol) a = vt.flatten().real h = A @ a if normalized: h = h / h.sum() a = a / a.sum() hubs = dict(zip(G, map(float, h))) authorities = dict(zip(G, map(float, a))) return hubs, authorities ","Returns HITS hubs and authorities values for nodes. The HITS algorithm computes two numbers for a node. Authorities estimates the node value based on the incoming links. Hubs estimates the node value based on outgoing links. Parameters ---------- G : graph A NetworkX graph max_iter : integer, optional Maximum number of iterations in power method. tol : float, optional Error tolerance used to check convergence in power method iteration. nstart : dictionary, optional Starting value of each node for power method iteration. normalized : bool (default=True) Normalize results by the sum of all of the values. Returns ------- (hubs,authorities) : two-tuple of dictionaries Two dictionaries keyed by node containing the hub and authority values. Raises ------ PowerIterationFailedConvergence If the algorithm fails to converge to the specified tolerance within the specified number of iterations of the power iteration method. Examples -------- >>> G = nx.path_graph(4) >>> h, a = nx.hits(G) Notes ----- The eigenvector calculation is done by the power iteration method and has no guarantee of convergence. The iteration will stop after max_iter iterations or an error tolerance of number_of_nodes(G)*tol has been reached. The HITS algorithm was designed for directed graphs but this algorithm does not check if the input graph is directed and will execute on undirected graphs. References ---------- .. [1] A. Langville and C. Meyer, ""A survey of eigenvector methods of web information retrieval."" http://citeseer.ist.psu.edu/713792.html .. [2] Jon Kleinberg, Authoritative sources in a hyperlinked environment Journal of the ACM 46 (5): 604-32, 1999. doi:10.1145/324133.324140. http://www.cs.cornell.edu/home/kleinber/auth.pdf. ",248,90,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def hits(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True): import numpy as np import scipy as sp import scipy.sparse.linalg # call as sp.sparse.linalg if len(G) == 0: return {}, {} A = nx.adjacency_matrix(G, nodelist=list(G), dtype=float) if nstart is None: u, s, vt = sp.sparse.linalg.svds(A, k=1, maxiter=max_iter, tol=tol) else: nstart = np.array(list(nstart.values())) u, s, vt = sp.sparse.linalg.svds(A, k=1, v0=nstart, maxiter=max_iter, tol=tol) a = vt.flatten().real h = A @ a if normalized: h = h / h.sum() a = a / a.sum() hubs = dict(zip(G, map(float, h))) authorities = dict(zip(G, map(float, a))) return hubs, authorities ``` ###Assistant : Returns HITS hubs and authorities values for nodes. The HITS algorithm computes two numbers for a node. Authorities estimates the node value based on the incoming links. Hubs estimates the node value based on outgoing links. Parameters ---------- G : graph A NetworkX graph max_iter : integer, optional Maximum number of iterations in power method. tol : float, optional Error tolerance used to check convergence in power method iteration. nstart : dictionary, optional Starting value of each node for power method iteration. normalized : bool (default=True) Normalize results by the sum of all of the values. Returns ------- (hubs,authorities) : two-tuple of dictionaries Two dictionaries keyed by node containing the hub and authority values. Raises ------ PowerIterationFailedConvergence If the algorithm fails to converge to the specified tolerance within the specified number of iterations of the power iteration method. Examples -------- >>> G = nx.path_graph(4) >>> h, a = nx.hits(G) Notes ----- The eigenvector calculation is done by the power iteration method and has no guarantee of convergence. The iteration will stop after max_iter iterations or an error tolerance of number_of_nodes(G)*tol has been reached. The HITS algorithm was designed for directed graphs but this algorithm does not check if the input graph is directed and will execute on undirected graphs. References ---------- .. [1] A. Langville and C. Meyer, ""A survey of eigenvector methods of web information retrieval."" http://citeseer.ist.psu.edu/713792.html .. [2] Jon Kleinberg, Authoritative sources in a hyperlinked environment Journal of the ACM 46 (5): 604-32, 1999. doi:10.1145/324133.324140. http://www.cs.cornell.edu/home/kleinber/auth.pdf. " 31,"def test_connection(self) -> Tuple[bool, str]: try: conn = self.get_conn() conn.pwd return True, ""Connection successfully tested"" except Exception as e: return False, str(e) ",Test the FTP connection by calling path with directory,9,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_connection(self) -> Tuple[bool, str]: try: conn = self.get_conn() conn.pwd return True, ""Connection successfully tested"" except Exception as e: return False, str(e) ``` ###Assistant : Test the FTP connection by calling path with directory " 32,"def call_price(self, other_args): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog=""price"", description=, ) parser.add_argument( ""-s"", ""--symbol"", required=""-h"" not in other_args, type=str, dest=""symbol"", help=""Symbol of coin to load data for, ~100 symbols are available"", ) if other_args and ""-"" not in other_args[0][0]: other_args.insert(0, ""-s"") ns_parser = self.parse_known_args_and_warn(parser, other_args) if ns_parser: if ns_parser.symbol in pyth_model.ASSETS.keys(): console.print( ""[param]If it takes too long, you can use 'Ctrl + C' to cancel.\n[/param]"" ) pyth_view.display_price(ns_parser.symbol) else: console.print(""[red]The symbol selected does not exist.[/red]\n"") ",Process price commandDisplay price and interval of confidence in real-time. [Source: Pyth],12,74,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call_price(self, other_args): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog=""price"", description=, ) parser.add_argument( ""-s"", ""--symbol"", required=""-h"" not in other_args, type=str, dest=""symbol"", help=""Symbol of coin to load data for, ~100 symbols are available"", ) if other_args and ""-"" not in other_args[0][0]: other_args.insert(0, ""-s"") ns_parser = self.parse_known_args_and_warn(parser, other_args) if ns_parser: if ns_parser.symbol in pyth_model.ASSETS.keys(): console.print( ""[param]If it takes too long, you can use 'Ctrl + C' to cancel.\n[/param]"" ) pyth_view.display_price(ns_parser.symbol) else: console.print(""[red]The symbol selected does not exist.[/red]\n"") ``` ###Assistant : Process price commandDisplay price and interval of confidence in real-time. [Source: Pyth] " 33,"def _single_map_nested(args): function, data_struct, types, rank, disable_tqdm, desc = args # Singleton first to spare some computation if not isinstance(data_struct, dict) and not isinstance(data_struct, types): return function(data_struct) # Reduce logging to keep things readable in multiprocessing with tqdm if rank is not None and logging.get_verbosity() < logging.WARNING: logging.set_verbosity_warning() # Print at least one thing to fix tqdm in notebooks in multiprocessing # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308 if rank is not None and not disable_tqdm and any(""notebook"" in tqdm_cls.__name__ for tqdm_cls in tqdm.__mro__): print("" "", end="""", flush=True) # Loop over single examples or batches and write to buffer/file if examples are to be updated pbar_iterable = data_struct.items() if isinstance(data_struct, dict) else data_struct pbar_desc = (desc + "" "" if desc is not None else """") + ""#"" + str(rank) if rank is not None else desc pbar = utils.tqdm(pbar_iterable, disable=disable_tqdm, position=rank, unit=""obj"", desc=pbar_desc) if isinstance(data_struct, dict): return {k: _single_map_nested((function, v, types, None, True, None)) for k, v in pbar} else: mapped = [_single_map_nested((function, v, types, None, True, None)) for v in pbar] if isinstance(data_struct, list): return mapped elif isinstance(data_struct, tuple): return tuple(mapped) else: return np.array(mapped) ",Apply a function recursively to each element of a nested data struct.,12,182,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _single_map_nested(args): function, data_struct, types, rank, disable_tqdm, desc = args # Singleton first to spare some computation if not isinstance(data_struct, dict) and not isinstance(data_struct, types): return function(data_struct) # Reduce logging to keep things readable in multiprocessing with tqdm if rank is not None and logging.get_verbosity() < logging.WARNING: logging.set_verbosity_warning() # Print at least one thing to fix tqdm in notebooks in multiprocessing # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308 if rank is not None and not disable_tqdm and any(""notebook"" in tqdm_cls.__name__ for tqdm_cls in tqdm.__mro__): print("" "", end="""", flush=True) # Loop over single examples or batches and write to buffer/file if examples are to be updated pbar_iterable = data_struct.items() if isinstance(data_struct, dict) else data_struct pbar_desc = (desc + "" "" if desc is not None else """") + ""#"" + str(rank) if rank is not None else desc pbar = utils.tqdm(pbar_iterable, disable=disable_tqdm, position=rank, unit=""obj"", desc=pbar_desc) if isinstance(data_struct, dict): return {k: _single_map_nested((function, v, types, None, True, None)) for k, v in pbar} else: mapped = [_single_map_nested((function, v, types, None, True, None)) for v in pbar] if isinstance(data_struct, list): return mapped elif isinstance(data_struct, tuple): return tuple(mapped) else: return np.array(mapped) ``` ###Assistant : Apply a function recursively to each element of a nested data struct. " 34,"def test_unified(self): self.write_settings(""settings_to_diff.py"", sdict={""FOO"": '""bar""'}) args = [""diffsettings"", ""--settings=settings_to_diff"", ""--output=unified""] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, ""+ FOO = 'bar'"") self.assertOutput(out, ""- SECRET_KEY = ''"") self.assertOutput(out, ""+ SECRET_KEY = 'django_tests_secret_key'"") self.assertNotInOutput(out, "" APPEND_SLASH = True"") ",--output=unified emits settings diff in unified mode.,7,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_unified(self): self.write_settings(""settings_to_diff.py"", sdict={""FOO"": '""bar""'}) args = [""diffsettings"", ""--settings=settings_to_diff"", ""--output=unified""] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, ""+ FOO = 'bar'"") self.assertOutput(out, ""- SECRET_KEY = ''"") self.assertOutput(out, ""+ SECRET_KEY = 'django_tests_secret_key'"") self.assertNotInOutput(out, "" APPEND_SLASH = True"") ``` ###Assistant : --output=unified emits settings diff in unified mode. " 35,"def runtime_env(self): return RuntimeEnv.deserialize(self._get_runtime_env_string()) ","Get the runtime env of the current job/worker. If this API is called in driver or ray client, returns the job level runtime env. If this API is called in workers/actors, returns the worker level runtime env. Returns: A new ray.runtime_env.RuntimeEnv instance. To merge from the current runtime env in some specific cases, you can get the current runtime env by this API and modify it by yourself. Example: >>> # Inherit current runtime env, except `env_vars` >>> Actor.options( # doctest: +SKIP ... runtime_env=ray.get_runtime_context().runtime_env.update( ... {""env_vars"": {""A"": ""a"", ""B"": ""b""}}) ... ) # doctest: +SKIP ",95,4,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def runtime_env(self): return RuntimeEnv.deserialize(self._get_runtime_env_string()) ``` ###Assistant : Get the runtime env of the current job/worker. If this API is called in driver or ray client, returns the job level runtime env. If this API is called in workers/actors, returns the worker level runtime env. Returns: A new ray.runtime_env.RuntimeEnv instance. To merge from the current runtime env in some specific cases, you can get the current runtime env by this API and modify it by yourself. Example: >>> # Inherit current runtime env, except `env_vars` >>> Actor.options( # doctest: +SKIP ... runtime_env=ray.get_runtime_context().runtime_env.update( ... {""env_vars"": {""A"": ""a"", ""B"": ""b""}}) ... ) # doctest: +SKIP " 36,"def sleeper(self, duration): s = time() yield time_to_sleep = duration - (time() - s) if time_to_sleep > 0: self.wait(time_to_sleep) "," Do something and then wait for a given duration minus the time it took doing something ",16,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def sleeper(self, duration): s = time() yield time_to_sleep = duration - (time() - s) if time_to_sleep > 0: self.wait(time_to_sleep) ``` ###Assistant : Do something and then wait for a given duration minus the time it took doing something " 37,"def test_BoundaryNorm(): boundaries = [0, 1.1, 2.2] vals = [-1, 0, 1, 2, 2.2, 4] # Without interpolation expected = [-1, 0, 0, 1, 2, 2] ncolors = len(boundaries) - 1 bn = mcolors.BoundaryNorm(boundaries, ncolors) assert_array_equal(bn(vals), expected) # ncolors != len(boundaries) - 1 triggers interpolation expected = [-1, 0, 0, 2, 3, 3] ncolors = len(boundaries) bn = mcolors.BoundaryNorm(boundaries, ncolors) assert_array_equal(bn(vals), expected) # with a single region and interpolation expected = [-1, 1, 1, 1, 3, 3] bn = mcolors.BoundaryNorm([0, 2.2], ncolors) assert_array_equal(bn(vals), expected) # more boundaries for a third color boundaries = [0, 1, 2, 3] vals = [-1, 0.1, 1.1, 2.2, 4] ncolors = 5 expected = [-1, 0, 2, 4, 5] bn = mcolors.BoundaryNorm(boundaries, ncolors) assert_array_equal(bn(vals), expected) # a scalar as input should not trigger an error and should return a scalar boundaries = [0, 1, 2] vals = [-1, 0.1, 1.1, 2.2] bn = mcolors.BoundaryNorm(boundaries, 2) expected = [-1, 0, 1, 2] for v, ex in zip(vals, expected): ret = bn(v) assert isinstance(ret, int) assert_array_equal(ret, ex) assert_array_equal(bn([v]), ex) # same with interp bn = mcolors.BoundaryNorm(boundaries, 3) expected = [-1, 0, 2, 3] for v, ex in zip(vals, expected): ret = bn(v) assert isinstance(ret, int) assert_array_equal(ret, ex) assert_array_equal(bn([v]), ex) # Clipping bn = mcolors.BoundaryNorm(boundaries, 3, clip=True) expected = [0, 0, 2, 2] for v, ex in zip(vals, expected): ret = bn(v) assert isinstance(ret, int) assert_array_equal(ret, ex) assert_array_equal(bn([v]), ex) # Masked arrays boundaries = [0, 1.1, 2.2] vals = np.ma.masked_invalid([-1., np.NaN, 0, 1.4, 9]) # Without interpolation ncolors = len(boundaries) - 1 bn = mcolors.BoundaryNorm(boundaries, ncolors) expected = np.ma.masked_array([-1, -99, 0, 1, 2], mask=[0, 1, 0, 0, 0]) assert_array_equal(bn(vals), expected) # With interpolation bn = mcolors.BoundaryNorm(boundaries, len(boundaries)) expected = np.ma.masked_array([-1, -99, 0, 2, 3], mask=[0, 1, 0, 0, 0]) assert_array_equal(bn(vals), expected) # Non-trivial masked arrays vals = np.ma.masked_invalid([np.Inf, np.NaN]) assert np.all(bn(vals).mask) vals = np.ma.masked_invalid([np.Inf]) assert np.all(bn(vals).mask) # Incompatible extend and clip with pytest.raises(ValueError, match=""not compatible""): mcolors.BoundaryNorm(np.arange(4), 5, extend='both', clip=True) # Too small ncolors argument with pytest.raises(ValueError, match=""ncolors must equal or exceed""): mcolors.BoundaryNorm(np.arange(4), 2) with pytest.raises(ValueError, match=""ncolors must equal or exceed""): mcolors.BoundaryNorm(np.arange(4), 3, extend='min') with pytest.raises(ValueError, match=""ncolors must equal or exceed""): mcolors.BoundaryNorm(np.arange(4), 4, extend='both') # Testing extend keyword, with interpolation (large cmap) bounds = [1, 2, 3] cmap = mpl.colormaps['viridis'] mynorm = mcolors.BoundaryNorm(bounds, cmap.N, extend='both') refnorm = mcolors.BoundaryNorm([0] + bounds + [4], cmap.N) x = np.random.randn(100) * 10 + 2 ref = refnorm(x) ref[ref == 0] = -1 ref[ref == cmap.N - 1] = cmap.N assert_array_equal(mynorm(x), ref) # Without interpolation cmref = mcolors.ListedColormap(['blue', 'red']) cmref.set_over('black') cmref.set_under('white') cmshould = mcolors.ListedColormap(['white', 'blue', 'red', 'black']) assert mcolors.same_color(cmref.get_over(), 'black') assert mcolors.same_color(cmref.get_under(), 'white') refnorm = mcolors.BoundaryNorm(bounds, cmref.N) mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='both') assert mynorm.vmin == refnorm.vmin assert mynorm.vmax == refnorm.vmax assert mynorm(bounds[0] - 0.1) == -1 # under assert mynorm(bounds[0] + 0.1) == 1 # first bin -> second color assert mynorm(bounds[-1] - 0.1) == cmshould.N - 2 # next-to-last color assert mynorm(bounds[-1] + 0.1) == cmshould.N # over x = [-1, 1.2, 2.3, 9.6] assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2, 3])) x = np.random.randn(100) * 10 + 2 assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x))) # Just min cmref = mcolors.ListedColormap(['blue', 'red']) cmref.set_under('white') cmshould = mcolors.ListedColormap(['white', 'blue', 'red']) assert mcolors.same_color(cmref.get_under(), 'white') assert cmref.N == 2 assert cmshould.N == 3 refnorm = mcolors.BoundaryNorm(bounds, cmref.N) mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='min') assert mynorm.vmin == refnorm.vmin assert mynorm.vmax == refnorm.vmax x = [-1, 1.2, 2.3] assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2])) x = np.random.randn(100) * 10 + 2 assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x))) # Just max cmref = mcolors.ListedColormap(['blue', 'red']) cmref.set_over('black') cmshould = mcolors.ListedColormap(['blue', 'red', 'black']) assert mcolors.same_color(cmref.get_over(), 'black') assert cmref.N == 2 assert cmshould.N == 3 refnorm = mcolors.BoundaryNorm(bounds, cmref.N) mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='max') assert mynorm.vmin == refnorm.vmin assert mynorm.vmax == refnorm.vmax x = [1.2, 2.3, 4] assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2])) x = np.random.randn(100) * 10 + 2 assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x))) "," GitHub issue #1258: interpolation was failing with numpy 1.7 pre-release. ",10,623,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_BoundaryNorm(): boundaries = [0, 1.1, 2.2] vals = [-1, 0, 1, 2, 2.2, 4] # Without interpolation expected = [-1, 0, 0, 1, 2, 2] ncolors = len(boundaries) - 1 bn = mcolors.BoundaryNorm(boundaries, ncolors) assert_array_equal(bn(vals), expected) # ncolors != len(boundaries) - 1 triggers interpolation expected = [-1, 0, 0, 2, 3, 3] ncolors = len(boundaries) bn = mcolors.BoundaryNorm(boundaries, ncolors) assert_array_equal(bn(vals), expected) # with a single region and interpolation expected = [-1, 1, 1, 1, 3, 3] bn = mcolors.BoundaryNorm([0, 2.2], ncolors) assert_array_equal(bn(vals), expected) # more boundaries for a third color boundaries = [0, 1, 2, 3] vals = [-1, 0.1, 1.1, 2.2, 4] ncolors = 5 expected = [-1, 0, 2, 4, 5] bn = mcolors.BoundaryNorm(boundaries, ncolors) assert_array_equal(bn(vals), expected) # a scalar as input should not trigger an error and should return a scalar boundaries = [0, 1, 2] vals = [-1, 0.1, 1.1, 2.2] bn = mcolors.BoundaryNorm(boundaries, 2) expected = [-1, 0, 1, 2] for v, ex in zip(vals, expected): ret = bn(v) assert isinstance(ret, int) assert_array_equal(ret, ex) assert_array_equal(bn([v]), ex) # same with interp bn = mcolors.BoundaryNorm(boundaries, 3) expected = [-1, 0, 2, 3] for v, ex in zip(vals, expected): ret = bn(v) assert isinstance(ret, int) assert_array_equal(ret, ex) assert_array_equal(bn([v]), ex) # Clipping bn = mcolors.BoundaryNorm(boundaries, 3, clip=True) expected = [0, 0, 2, 2] for v, ex in zip(vals, expected): ret = bn(v) assert isinstance(ret, int) assert_array_equal(ret, ex) assert_array_equal(bn([v]), ex) # Masked arrays boundaries = [0, 1.1, 2.2] vals = np.ma.masked_invalid([-1., np.NaN, 0, 1.4, 9]) # Without interpolation ncolors = len(boundaries) - 1 bn = mcolors.BoundaryNorm(boundaries, ncolors) expected = np.ma.masked_array([-1, -99, 0, 1, 2], mask=[0, 1, 0, 0, 0]) assert_array_equal(bn(vals), expected) # With interpolation bn = mcolors.BoundaryNorm(boundaries, len(boundaries)) expected = np.ma.masked_array([-1, -99, 0, 2, 3], mask=[0, 1, 0, 0, 0]) assert_array_equal(bn(vals), expected) # Non-trivial masked arrays vals = np.ma.masked_invalid([np.Inf, np.NaN]) assert np.all(bn(vals).mask) vals = np.ma.masked_invalid([np.Inf]) assert np.all(bn(vals).mask) # Incompatible extend and clip with pytest.raises(ValueError, match=""not compatible""): mcolors.BoundaryNorm(np.arange(4), 5, extend='both', clip=True) # Too small ncolors argument with pytest.raises(ValueError, match=""ncolors must equal or exceed""): mcolors.BoundaryNorm(np.arange(4), 2) with pytest.raises(ValueError, match=""ncolors must equal or exceed""): mcolors.BoundaryNorm(np.arange(4), 3, extend='min') with pytest.raises(ValueError, match=""ncolors must equal or exceed""): mcolors.BoundaryNorm(np.arange(4), 4, extend='both') # Testing extend keyword, with interpolation (large cmap) bounds = [1, 2, 3] cmap = mpl.colormaps['viridis'] mynorm = mcolors.BoundaryNorm(bounds, cmap.N, extend='both') refnorm = mcolors.BoundaryNorm([0] + bounds + [4], cmap.N) x = np.random.randn(100) * 10 + 2 ref = refnorm(x) ref[ref == 0] = -1 ref[ref == cmap.N - 1] = cmap.N assert_array_equal(mynorm(x), ref) # Without interpolation cmref = mcolors.ListedColormap(['blue', 'red']) cmref.set_over('black') cmref.set_under('white') cmshould = mcolors.ListedColormap(['white', 'blue', 'red', 'black']) assert mcolors.same_color(cmref.get_over(), 'black') assert mcolors.same_color(cmref.get_under(), 'white') refnorm = mcolors.BoundaryNorm(bounds, cmref.N) mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='both') assert mynorm.vmin == refnorm.vmin assert mynorm.vmax == refnorm.vmax assert mynorm(bounds[0] - 0.1) == -1 # under assert mynorm(bounds[0] + 0.1) == 1 # first bin -> second color assert mynorm(bounds[-1] - 0.1) == cmshould.N - 2 # next-to-last color assert mynorm(bounds[-1] + 0.1) == cmshould.N # over x = [-1, 1.2, 2.3, 9.6] assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2, 3])) x = np.random.randn(100) * 10 + 2 assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x))) # Just min cmref = mcolors.ListedColormap(['blue', 'red']) cmref.set_under('white') cmshould = mcolors.ListedColormap(['white', 'blue', 'red']) assert mcolors.same_color(cmref.get_under(), 'white') assert cmref.N == 2 assert cmshould.N == 3 refnorm = mcolors.BoundaryNorm(bounds, cmref.N) mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='min') assert mynorm.vmin == refnorm.vmin assert mynorm.vmax == refnorm.vmax x = [-1, 1.2, 2.3] assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2])) x = np.random.randn(100) * 10 + 2 assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x))) # Just max cmref = mcolors.ListedColormap(['blue', 'red']) cmref.set_over('black') cmshould = mcolors.ListedColormap(['blue', 'red', 'black']) assert mcolors.same_color(cmref.get_over(), 'black') assert cmref.N == 2 assert cmshould.N == 3 refnorm = mcolors.BoundaryNorm(bounds, cmref.N) mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='max') assert mynorm.vmin == refnorm.vmin assert mynorm.vmax == refnorm.vmax x = [1.2, 2.3, 4] assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2])) x = np.random.randn(100) * 10 + 2 assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x))) ``` ###Assistant : GitHub issue #1258: interpolation was failing with numpy 1.7 pre-release. " 38,"def _join_by_index(self, other_modin_frames, how, sort, ignore_index): if how == ""outer"": raise NotImplementedError(""outer join is not supported in HDK engine"") lhs = self._maybe_materialize_rowid() reset_index_names = False for rhs in other_modin_frames: rhs = rhs._maybe_materialize_rowid() if len(lhs._index_cols) != len(rhs._index_cols): raise NotImplementedError( ""join by indexes with different sizes is not supported"" ) reset_index_names = reset_index_names or lhs._index_cols != rhs._index_cols condition = lhs._build_equi_join_condition( rhs, lhs._index_cols, rhs._index_cols ) exprs = lhs._index_exprs() new_columns = lhs.columns.to_list() for col in lhs.columns: exprs[col] = lhs.ref(col) for col in rhs.columns: # Handle duplicating column names here. When user specifies # suffixes to make a join, actual renaming is done in front-end. new_col_name = col rename_idx = 0 while new_col_name in exprs: new_col_name = f""{col}{rename_idx}"" rename_idx += 1 exprs[new_col_name] = rhs.ref(col) new_columns.append(new_col_name) op = JoinNode( lhs, rhs, how=how, exprs=exprs, condition=condition, ) new_columns = Index.__new__( Index, data=new_columns, dtype=self.columns.dtype ) lhs = lhs.__constructor__( dtypes=lhs._dtypes_for_exprs(exprs), columns=new_columns, index_cols=lhs._index_cols, op=op, force_execution_mode=self._force_execution_mode, ) if sort: lhs = lhs.sort_rows( lhs._index_cols, ascending=True, ignore_index=False, na_position=""last"", ) if reset_index_names: lhs = lhs._reset_index_names() if ignore_index: new_columns = Index.__new__(RangeIndex, data=range(len(lhs.columns))) lhs = lhs._set_columns(new_columns) return lhs "," Perform equi-join operation for multiple frames by index columns. Parameters ---------- other_modin_frames : list of HdkOnNativeDataframe Frames to join with. how : str A type of join. sort : bool Sort the result by join keys. ignore_index : bool If True then reset column index for the resulting frame. Returns ------- HdkOnNativeDataframe The new frame. ",55,171,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _join_by_index(self, other_modin_frames, how, sort, ignore_index): if how == ""outer"": raise NotImplementedError(""outer join is not supported in HDK engine"") lhs = self._maybe_materialize_rowid() reset_index_names = False for rhs in other_modin_frames: rhs = rhs._maybe_materialize_rowid() if len(lhs._index_cols) != len(rhs._index_cols): raise NotImplementedError( ""join by indexes with different sizes is not supported"" ) reset_index_names = reset_index_names or lhs._index_cols != rhs._index_cols condition = lhs._build_equi_join_condition( rhs, lhs._index_cols, rhs._index_cols ) exprs = lhs._index_exprs() new_columns = lhs.columns.to_list() for col in lhs.columns: exprs[col] = lhs.ref(col) for col in rhs.columns: # Handle duplicating column names here. When user specifies # suffixes to make a join, actual renaming is done in front-end. new_col_name = col rename_idx = 0 while new_col_name in exprs: new_col_name = f""{col}{rename_idx}"" rename_idx += 1 exprs[new_col_name] = rhs.ref(col) new_columns.append(new_col_name) op = JoinNode( lhs, rhs, how=how, exprs=exprs, condition=condition, ) new_columns = Index.__new__( Index, data=new_columns, dtype=self.columns.dtype ) lhs = lhs.__constructor__( dtypes=lhs._dtypes_for_exprs(exprs), columns=new_columns, index_cols=lhs._index_cols, op=op, force_execution_mode=self._force_execution_mode, ) if sort: lhs = lhs.sort_rows( lhs._index_cols, ascending=True, ignore_index=False, na_position=""last"", ) if reset_index_names: lhs = lhs._reset_index_names() if ignore_index: new_columns = Index.__new__(RangeIndex, data=range(len(lhs.columns))) lhs = lhs._set_columns(new_columns) return lhs ``` ###Assistant : Perform equi-join operation for multiple frames by index columns. Parameters ---------- other_modin_frames : list of HdkOnNativeDataframe Frames to join with. how : str A type of join. sort : bool Sort the result by join keys. ignore_index : bool If True then reset column index for the resulting frame. Returns ------- HdkOnNativeDataframe The new frame. " 39,"def _object2proto(self) -> RunFunctionOrConstructorAction_PB: return RunFunctionOrConstructorAction_PB( path=self.path, args=[serialize(x, to_bytes=True) for x in self.args], kwargs={k: serialize(v, to_bytes=True) for k, v in self.kwargs.items()}, id_at_location=serialize(self.id_at_location), address=serialize(self.address), msg_id=serialize(self.id), ) ","Returns a protobuf serialization of self. As a requirement of all objects which inherit from Serializable, this method transforms the current object into the corresponding Protobuf object so that it can be further serialized. :return: returns a protobuf object :rtype: RunFunctionOrConstructorAction_PB .. note:: This method is purely an internal method. Please use serialize(object) or one of the other public serialization methods if you wish to serialize an object. ",68,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _object2proto(self) -> RunFunctionOrConstructorAction_PB: return RunFunctionOrConstructorAction_PB( path=self.path, args=[serialize(x, to_bytes=True) for x in self.args], kwargs={k: serialize(v, to_bytes=True) for k, v in self.kwargs.items()}, id_at_location=serialize(self.id_at_location), address=serialize(self.address), msg_id=serialize(self.id), ) ``` ###Assistant : Returns a protobuf serialization of self. As a requirement of all objects which inherit from Serializable, this method transforms the current object into the corresponding Protobuf object so that it can be further serialized. :return: returns a protobuf object :rtype: RunFunctionOrConstructorAction_PB .. note:: This method is purely an internal method. Please use serialize(object) or one of the other public serialization methods if you wish to serialize an object. " 40,"def truncated_cube_graph(create_using=None): description = [ ""adjacencylist"", ""Truncated Cube Graph"", 24, [ [2, 3, 5], [12, 15], [4, 5], [7, 9], [6], [17, 19], [8, 9], [11, 13], [10], [18, 21], [12, 13], [15], [14], [22, 23], [16], [20, 24], [18, 19], [21], [20], [24], [22], [23], [24], [], ], ] G = make_small_undirected_graph(description, create_using) return G "," Returns the skeleton of the truncated cube. The truncated cube is an Archimedean solid with 14 regular faces (6 octagonal and 8 triangular), 36 edges and 24 nodes [1]_. The truncated cube is created by truncating (cutting off) the tips of the cube one third of the way into each edge [2]_. Parameters ---------- create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Returns ------- G : networkx Graph Skeleton of the truncated cube References ---------- .. [1] https://en.wikipedia.org/wiki/Truncated_cube .. [2] https://www.coolmath.com/reference/polyhedra-truncated-cube ",91,56,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def truncated_cube_graph(create_using=None): description = [ ""adjacencylist"", ""Truncated Cube Graph"", 24, [ [2, 3, 5], [12, 15], [4, 5], [7, 9], [6], [17, 19], [8, 9], [11, 13], [10], [18, 21], [12, 13], [15], [14], [22, 23], [16], [20, 24], [18, 19], [21], [20], [24], [22], [23], [24], [], ], ] G = make_small_undirected_graph(description, create_using) return G ``` ###Assistant : Returns the skeleton of the truncated cube. The truncated cube is an Archimedean solid with 14 regular faces (6 octagonal and 8 triangular), 36 edges and 24 nodes [1]_. The truncated cube is created by truncating (cutting off) the tips of the cube one third of the way into each edge [2]_. Parameters ---------- create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Returns ------- G : networkx Graph Skeleton of the truncated cube References ---------- .. [1] https://en.wikipedia.org/wiki/Truncated_cube .. [2] https://www.coolmath.com/reference/polyhedra-truncated-cube " 41,"def get_admin_urls_for_registration(self): urls = () for instance in self.modeladmin_instances: urls += instance.get_admin_urls_for_registration() return urls "," Utilised by Wagtail's 'register_admin_urls' hook to register urls for used by any associated ModelAdmin instances ",15,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_admin_urls_for_registration(self): urls = () for instance in self.modeladmin_instances: urls += instance.get_admin_urls_for_registration() return urls ``` ###Assistant : Utilised by Wagtail's 'register_admin_urls' hook to register urls for used by any associated ModelAdmin instances " 42,"def setName(self, name): self.name = name self.errmsg = ""Expected "" + self.name if __diag__.enable_debug_on_named_expressions: self.setDebug() return self "," Define name for this expression, makes debugging and exception messages clearer. Example:: Word(nums).parseString(""ABC"") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) Word(nums).setName(""integer"").parseString(""ABC"") # -> Exception: Expected integer (at char 0), (line:1, col:1) ",34,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def setName(self, name): self.name = name self.errmsg = ""Expected "" + self.name if __diag__.enable_debug_on_named_expressions: self.setDebug() return self ``` ###Assistant : Define name for this expression, makes debugging and exception messages clearer. Example:: Word(nums).parseString(""ABC"") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) Word(nums).setName(""integer"").parseString(""ABC"") # -> Exception: Expected integer (at char 0), (line:1, col:1) " 43,"def get_func(cls, key, **kwargs): if ""agg_func"" in kwargs: return cls.inplace_applyier_builder(key, kwargs[""agg_func""]) elif ""func_dict"" in kwargs: return cls.inplace_applyier_builder(key, kwargs[""func_dict""]) else: return cls.inplace_applyier_builder(key) "," Extract aggregation function from groupby arguments. Parameters ---------- key : callable or str Default aggregation function. If aggregation function is not specified via groupby arguments, then `key` function is used. **kwargs : dict GroupBy arguments that may contain aggregation function. Returns ------- callable Aggregation function. Notes ----- There are two ways of how groupby aggregation can be invoked: 1. Explicitly with query compiler method: `qc.groupby_sum()`. 2. By passing aggregation function as an argument: `qc.groupby_agg(""sum"")`. Both are going to produce the same result, however in the first case actual aggregation function can be extracted from the method name, while for the second only from the method arguments. ",106,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_func(cls, key, **kwargs): if ""agg_func"" in kwargs: return cls.inplace_applyier_builder(key, kwargs[""agg_func""]) elif ""func_dict"" in kwargs: return cls.inplace_applyier_builder(key, kwargs[""func_dict""]) else: return cls.inplace_applyier_builder(key) ``` ###Assistant : Extract aggregation function from groupby arguments. Parameters ---------- key : callable or str Default aggregation function. If aggregation function is not specified via groupby arguments, then `key` function is used. **kwargs : dict GroupBy arguments that may contain aggregation function. Returns ------- callable Aggregation function. Notes ----- There are two ways of how groupby aggregation can be invoked: 1. Explicitly with query compiler method: `qc.groupby_sum()`. 2. By passing aggregation function as an argument: `qc.groupby_agg(""sum"")`. Both are going to produce the same result, however in the first case actual aggregation function can be extracted from the method name, while for the second only from the method arguments. " 44,"def update_scheduler(self, metric): self.worker_group.apply_all_operators( lambda op: [sched.step(metric) for sched in op._schedulers] ) ","Calls ``scheduler.step(metric)`` on all registered schedulers. This is useful for lr_schedulers such as ``ReduceLROnPlateau``. ",14,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update_scheduler(self, metric): self.worker_group.apply_all_operators( lambda op: [sched.step(metric) for sched in op._schedulers] ) ``` ###Assistant : Calls ``scheduler.step(metric)`` on all registered schedulers. This is useful for lr_schedulers such as ``ReduceLROnPlateau``. " 45,"def paired_cosine_distances(X, Y): X, Y = check_paired_arrays(X, Y) return 0.5 * row_norms(normalize(X) - normalize(Y), squared=True) PAIRED_DISTANCES = { ""cosine"": paired_cosine_distances, ""euclidean"": paired_euclidean_distances, ""l2"": paired_euclidean_distances, ""l1"": paired_manhattan_distances, ""manhattan"": paired_manhattan_distances, ""cityblock"": paired_manhattan_distances, } "," Compute the paired cosine distances between X and Y. Read more in the :ref:`User Guide `. Parameters ---------- X : array-like of shape (n_samples, n_features) An array where each row is a sample and each column is a feature. Y : array-like of shape (n_samples, n_features) An array where each row is a sample and each column is a feature. Returns ------- distances : ndarray of shape (n_samples,) Returns the distances between the row vectors of `X` and the row vectors of `Y`, where `distances[i]` is the distance between `X[i]` and `Y[i]`. Notes ----- The cosine distance is equivalent to the half the squared euclidean distance if each sample is normalized to unit norm. ",114,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def paired_cosine_distances(X, Y): X, Y = check_paired_arrays(X, Y) return 0.5 * row_norms(normalize(X) - normalize(Y), squared=True) PAIRED_DISTANCES = { ""cosine"": paired_cosine_distances, ""euclidean"": paired_euclidean_distances, ""l2"": paired_euclidean_distances, ""l1"": paired_manhattan_distances, ""manhattan"": paired_manhattan_distances, ""cityblock"": paired_manhattan_distances, } ``` ###Assistant : Compute the paired cosine distances between X and Y. Read more in the :ref:`User Guide `. Parameters ---------- X : array-like of shape (n_samples, n_features) An array where each row is a sample and each column is a feature. Y : array-like of shape (n_samples, n_features) An array where each row is a sample and each column is a feature. Returns ------- distances : ndarray of shape (n_samples,) Returns the distances between the row vectors of `X` and the row vectors of `Y`, where `distances[i]` is the distance between `X[i]` and `Y[i]`. Notes ----- The cosine distance is equivalent to the half the squared euclidean distance if each sample is normalized to unit norm. " 46,"def torchdynamo_smart_context_manager(self): ctx_manager = contextlib.nullcontext() if is_torchdynamo_available(): import torchdynamo from torchdynamo.optimizations.training import aot_autograd_speedup_strategy if self.args.torchdynamo == ""eager"": ctx_manager = torchdynamo.optimize(""eager"") elif self.args.torchdynamo == ""nvfuser"": ctx_manager = torchdynamo.optimize(aot_autograd_speedup_strategy) return ctx_manager "," A helper wrapper that creates an appropriate context manager for `torchdynamo`. ",11,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def torchdynamo_smart_context_manager(self): ctx_manager = contextlib.nullcontext() if is_torchdynamo_available(): import torchdynamo from torchdynamo.optimizations.training import aot_autograd_speedup_strategy if self.args.torchdynamo == ""eager"": ctx_manager = torchdynamo.optimize(""eager"") elif self.args.torchdynamo == ""nvfuser"": ctx_manager = torchdynamo.optimize(aot_autograd_speedup_strategy) return ctx_manager ``` ###Assistant : A helper wrapper that creates an appropriate context manager for `torchdynamo`. " 47,"def check_aug_version(self) -> bool: self.aug.set(""/test/path/testing/arg"", ""aRgUMeNT"") try: matches = self.aug.match( ""/test//*[self::arg=~regexp('argument', 'i')]"") except RuntimeError: self.aug.remove(""/test/path"") return False self.aug.remove(""/test/path"") return matches "," Checks that we have recent enough version of libaugeas. If augeas version is recent enough, it will support case insensitive regexp matching",22,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_aug_version(self) -> bool: self.aug.set(""/test/path/testing/arg"", ""aRgUMeNT"") try: matches = self.aug.match( ""/test//*[self::arg=~regexp('argument', 'i')]"") except RuntimeError: self.aug.remove(""/test/path"") return False self.aug.remove(""/test/path"") return matches ``` ###Assistant : Checks that we have recent enough version of libaugeas. If augeas version is recent enough, it will support case insensitive regexp matching " 48,"def resize_depth(depth, width, height): depth = torch.squeeze(depth[0, :, :, :]).to(""cpu"") depth_resized = cv2.resize( depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC ) return depth_resized ","Resize depth map and bring to CPU (numpy). Args: depth (tensor): depth width (int): image width height (int): image height Returns: array: processed depth ",24,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def resize_depth(depth, width, height): depth = torch.squeeze(depth[0, :, :, :]).to(""cpu"") depth_resized = cv2.resize( depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC ) return depth_resized ``` ###Assistant : Resize depth map and bring to CPU (numpy). Args: depth (tensor): depth width (int): image width height (int): image height Returns: array: processed depth " 49,"def comp(z1, z2, tol=None): r if type(z2) is str: if not pure_complex(z1, or_real=True): raise ValueError('when z2 is a str z1 must be a Number') return str(z1) == z2 if not z1: z1, z2 = z2, z1 if not z1: return True if not tol: a, b = z1, z2 if tol == '': return str(a) == str(b) if tol is None: a, b = sympify(a), sympify(b) if not all(i.is_number for i in (a, b)): raise ValueError('expecting 2 numbers') fa = a.atoms(Float) fb = b.atoms(Float) if not fa and not fb: # no floats -- compare exactly return a == b # get a to be pure_complex for _ in range(2): ca = pure_complex(a, or_real=True) if not ca: if fa: a = a.n(prec_to_dps(min([i._prec for i in fa]))) ca = pure_complex(a, or_real=True) break else: fa, fb = fb, fa a, b = b, a cb = pure_complex(b) if not cb and fb: b = b.n(prec_to_dps(min([i._prec for i in fb]))) cb = pure_complex(b, or_real=True) if ca and cb and (ca[1] or cb[1]): return all(comp(i, j) for i, j in zip(ca, cb)) tol = 10**prec_to_dps(min(a._prec, getattr(b, '_prec', a._prec))) return int(abs(a - b)*tol) <= 5 diff = abs(z1 - z2) az1 = abs(z1) if z2 and az1 > 1: return diff/az1 <= tol else: return diff <= tol ","Return a bool indicating whether the error between z1 and z2 is $\le$ ``tol``. Examples ======== If ``tol`` is ``None`` then ``True`` will be returned if :math:`|z1 - z2|\times 10^p \le 5` where $p$ is minimum value of the decimal precision of each value. >>> from sympy import comp, pi >>> pi4 = pi.n(4); pi4 3.142 >>> comp(_, 3.142) True >>> comp(pi4, 3.141) False >>> comp(pi4, 3.143) False A comparison of strings will be made if ``z1`` is a Number and ``z2`` is a string or ``tol`` is ''. >>> comp(pi4, 3.1415) True >>> comp(pi4, 3.1415, '') False When ``tol`` is provided and $z2$ is non-zero and :math:`|z1| > 1` the error is normalized by :math:`|z1|`: >>> abs(pi4 - 3.14)/pi4 0.000509791731426756 >>> comp(pi4, 3.14, .001) # difference less than 0.1% True >>> comp(pi4, 3.14, .0005) # difference less than 0.1% False When :math:`|z1| \le 1` the absolute error is used: >>> 1/pi4 0.3183 >>> abs(1/pi4 - 0.3183)/(1/pi4) 3.07371499106316e-5 >>> abs(1/pi4 - 0.3183) 9.78393554684764e-6 >>> comp(1/pi4, 0.3183, 1e-5) True To see if the absolute error between ``z1`` and ``z2`` is less than or equal to ``tol``, call this as ``comp(z1 - z2, 0, tol)`` or ``comp(z1 - z2, tol=tol)``: >>> abs(pi4 - 3.14) 0.00160156249999988 >>> comp(pi4 - 3.14, 0, .002) True >>> comp(pi4 - 3.14, 0, .001) False ",217,213,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def comp(z1, z2, tol=None): r if type(z2) is str: if not pure_complex(z1, or_real=True): raise ValueError('when z2 is a str z1 must be a Number') return str(z1) == z2 if not z1: z1, z2 = z2, z1 if not z1: return True if not tol: a, b = z1, z2 if tol == '': return str(a) == str(b) if tol is None: a, b = sympify(a), sympify(b) if not all(i.is_number for i in (a, b)): raise ValueError('expecting 2 numbers') fa = a.atoms(Float) fb = b.atoms(Float) if not fa and not fb: # no floats -- compare exactly return a == b # get a to be pure_complex for _ in range(2): ca = pure_complex(a, or_real=True) if not ca: if fa: a = a.n(prec_to_dps(min([i._prec for i in fa]))) ca = pure_complex(a, or_real=True) break else: fa, fb = fb, fa a, b = b, a cb = pure_complex(b) if not cb and fb: b = b.n(prec_to_dps(min([i._prec for i in fb]))) cb = pure_complex(b, or_real=True) if ca and cb and (ca[1] or cb[1]): return all(comp(i, j) for i, j in zip(ca, cb)) tol = 10**prec_to_dps(min(a._prec, getattr(b, '_prec', a._prec))) return int(abs(a - b)*tol) <= 5 diff = abs(z1 - z2) az1 = abs(z1) if z2 and az1 > 1: return diff/az1 <= tol else: return diff <= tol ``` ###Assistant : Return a bool indicating whether the error between z1 and z2 is $\le$ ``tol``. Examples ======== If ``tol`` is ``None`` then ``True`` will be returned if :math:`|z1 - z2|\times 10^p \le 5` where $p$ is minimum value of the decimal precision of each value. >>> from sympy import comp, pi >>> pi4 = pi.n(4); pi4 3.142 >>> comp(_, 3.142) True >>> comp(pi4, 3.141) False >>> comp(pi4, 3.143) False A comparison of strings will be made if ``z1`` is a Number and ``z2`` is a string or ``tol`` is ''. >>> comp(pi4, 3.1415) True >>> comp(pi4, 3.1415, '') False When ``tol`` is provided and $z2$ is non-zero and :math:`|z1| > 1` the error is normalized by :math:`|z1|`: >>> abs(pi4 - 3.14)/pi4 0.000509791731426756 >>> comp(pi4, 3.14, .001) # difference less than 0.1% True >>> comp(pi4, 3.14, .0005) # difference less than 0.1% False When :math:`|z1| \le 1` the absolute error is used: >>> 1/pi4 0.3183 >>> abs(1/pi4 - 0.3183)/(1/pi4) 3.07371499106316e-5 >>> abs(1/pi4 - 0.3183) 9.78393554684764e-6 >>> comp(1/pi4, 0.3183, 1e-5) True To see if the absolute error between ``z1`` and ``z2`` is less than or equal to ``tol``, call this as ``comp(z1 - z2, 0, tol)`` or ``comp(z1 - z2, tol=tol)``: >>> abs(pi4 - 3.14) 0.00160156249999988 >>> comp(pi4 - 3.14, 0, .002) True >>> comp(pi4 - 3.14, 0, .001) False " 50,"def _parse_img_level_ann(self, image_level_ann_file): item_lists = defaultdict(list) with self.file_client.get_local_path( image_level_ann_file) as local_path: with open(local_path, 'r') as f: reader = csv.reader(f) i = -1 for line in reader: i += 1 if i == 0: continue else: img_id = line[0] label_id = line[1] assert label_id in self.label_id_mapping image_level_label = int( self.label_id_mapping[label_id]) confidence = float(line[2]) item_lists[img_id].append( dict( image_level_label=image_level_label, confidence=confidence)) return item_lists ","Parse image level annotations from csv style ann_file. Args: image_level_ann_file (str): CSV style image level annotation file path. Returns: defaultdict[list[dict]]: Annotations where item of the defaultdict indicates an image, each of which has (n) dicts. Keys of dicts are: - `image_level_label` (int): of shape 1. - `confidence` (float): of shape 1. ",51,58,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _parse_img_level_ann(self, image_level_ann_file): item_lists = defaultdict(list) with self.file_client.get_local_path( image_level_ann_file) as local_path: with open(local_path, 'r') as f: reader = csv.reader(f) i = -1 for line in reader: i += 1 if i == 0: continue else: img_id = line[0] label_id = line[1] assert label_id in self.label_id_mapping image_level_label = int( self.label_id_mapping[label_id]) confidence = float(line[2]) item_lists[img_id].append( dict( image_level_label=image_level_label, confidence=confidence)) return item_lists ``` ###Assistant : Parse image level annotations from csv style ann_file. Args: image_level_ann_file (str): CSV style image level annotation file path. Returns: defaultdict[list[dict]]: Annotations where item of the defaultdict indicates an image, each of which has (n) dicts. Keys of dicts are: - `image_level_label` (int): of shape 1. - `confidence` (float): of shape 1. " 51,"def logical_and(self, a, b): a = _convert_other(a, raiseit=True) return a.logical_and(b, context=self) ","Applies the logical operation 'and' between each operand's digits. The operands must be both logical numbers. >>> ExtendedContext.logical_and(Decimal('0'), Decimal('0')) Decimal('0') >>> ExtendedContext.logical_and(Decimal('0'), Decimal('1')) Decimal('0') >>> ExtendedContext.logical_and(Decimal('1'), Decimal('0')) Decimal('0') >>> ExtendedContext.logical_and(Decimal('1'), Decimal('1')) Decimal('1') >>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010')) Decimal('1000') >>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10')) Decimal('10') >>> ExtendedContext.logical_and(110, 1101) Decimal('100') >>> ExtendedContext.logical_and(Decimal(110), 1101) Decimal('100') >>> ExtendedContext.logical_and(110, Decimal(1101)) Decimal('100') ",52,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def logical_and(self, a, b): a = _convert_other(a, raiseit=True) return a.logical_and(b, context=self) ``` ###Assistant : Applies the logical operation 'and' between each operand's digits. The operands must be both logical numbers. >>> ExtendedContext.logical_and(Decimal('0'), Decimal('0')) Decimal('0') >>> ExtendedContext.logical_and(Decimal('0'), Decimal('1')) Decimal('0') >>> ExtendedContext.logical_and(Decimal('1'), Decimal('0')) Decimal('0') >>> ExtendedContext.logical_and(Decimal('1'), Decimal('1')) Decimal('1') >>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010')) Decimal('1000') >>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10')) Decimal('10') >>> ExtendedContext.logical_and(110, 1101) Decimal('100') >>> ExtendedContext.logical_and(Decimal(110), 1101) Decimal('100') >>> ExtendedContext.logical_and(110, Decimal(1101)) Decimal('100') " 52,"def decrement_part_small(self, part, ub): if self.lpart >= ub - 1: self.p1 += 1 # increment to keep track of usefulness of tests return False plen = len(part) for j in range(plen - 1, -1, -1): # Knuth's mod, (answer to problem 7.2.1.5.69) if j == 0 and (part[0].v - 1)*(ub - self.lpart) < part[0].u: self.k1 += 1 return False if j == 0 and part[j].v > 1 or j > 0 and part[j].v > 0: # found val to decrement part[j].v -= 1 # Reset trailing parts back to maximum for k in range(j + 1, plen): part[k].v = part[k].u # Have now decremented part, but are we doomed to # failure when it is expanded? Check one oddball case # that turns out to be surprisingly common - exactly # enough room to expand the leading component, but no # room for the second component, which has v=0. if (plen > 1 and part[1].v == 0 and (part[0].u - part[0].v) == ((ub - self.lpart - 1) * part[0].v)): self.k2 += 1 self.db_trace(""Decrement fails test 3"") return False return True return False ","Decrements part (a subrange of pstack), if possible, returning True iff the part was successfully decremented. Parameters ========== part part to be decremented (topmost part on the stack) ub the maximum number of parts allowed in a partition returned by the calling traversal. Notes ===== The goal of this modification of the ordinary decrement method is to fail (meaning that the subtree rooted at this part is to be skipped) when it can be proved that this part can only have child partitions which are larger than allowed by ``ub``. If a decision is made to fail, it must be accurate, otherwise the enumeration will miss some partitions. But, it is OK not to capture all the possible failures -- if a part is passed that should not be, the resulting too-large partitions are filtered by the enumeration one level up. However, as is usual in constrained enumerations, failing early is advantageous. The tests used by this method catch the most common cases, although this implementation is by no means the last word on this problem. The tests include: 1) ``lpart`` must be less than ``ub`` by at least 2. This is because once a part has been decremented, the partition will gain at least one child in the spread step. 2) If the leading component of the part is about to be decremented, check for how many parts will be added in order to use up the unallocated multiplicity in that leading component, and fail if this number is greater than allowed by ``ub``. (See code for the exact expression.) This test is given in the answer to Knuth's problem 7.2.1.5.69. 3) If there is *exactly* enough room to expand the leading component by the above test, check the next component (if it exists) once decrementing has finished. If this has ``v == 0``, this next component will push the expansion over the limit by 1, so fail. ",319,182,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def decrement_part_small(self, part, ub): if self.lpart >= ub - 1: self.p1 += 1 # increment to keep track of usefulness of tests return False plen = len(part) for j in range(plen - 1, -1, -1): # Knuth's mod, (answer to problem 7.2.1.5.69) if j == 0 and (part[0].v - 1)*(ub - self.lpart) < part[0].u: self.k1 += 1 return False if j == 0 and part[j].v > 1 or j > 0 and part[j].v > 0: # found val to decrement part[j].v -= 1 # Reset trailing parts back to maximum for k in range(j + 1, plen): part[k].v = part[k].u # Have now decremented part, but are we doomed to # failure when it is expanded? Check one oddball case # that turns out to be surprisingly common - exactly # enough room to expand the leading component, but no # room for the second component, which has v=0. if (plen > 1 and part[1].v == 0 and (part[0].u - part[0].v) == ((ub - self.lpart - 1) * part[0].v)): self.k2 += 1 self.db_trace(""Decrement fails test 3"") return False return True return False ``` ###Assistant : Decrements part (a subrange of pstack), if possible, returning True iff the part was successfully decremented. Parameters ========== part part to be decremented (topmost part on the stack) ub the maximum number of parts allowed in a partition returned by the calling traversal. Notes ===== The goal of this modification of the ordinary decrement method is to fail (meaning that the subtree rooted at this part is to be skipped) when it can be proved that this part can only have child partitions which are larger than allowed by ``ub``. If a decision is made to fail, it must be accurate, otherwise the enumeration will miss some partitions. But, it is OK not to capture all the possible failures -- if a part is passed that should not be, the resulting too-large partitions are filtered by the enumeration one level up. However, as is usual in constrained enumerations, failing early is advantageous. The tests used by this method catch the most common cases, although this implementation is by no means the last word on this problem. The tests include: 1) ``lpart`` must be less than ``ub`` by at least 2. This is because once a part has been decremented, the partition will gain at least one child in the spread step. 2) If the leading component of the part is about to be decremented, check for how many parts will be added in order to use up the unallocated multiplicity in that leading component, and fail if this number is greater than allowed by ``ub``. (See code for the exact expression.) This test is given in the answer to Knuth's problem 7.2.1.5.69. 3) If there is *exactly* enough room to expand the leading component by the above test, check the next component (if it exists) once decrementing has finished. If this has ``v == 0``, this next component will push the expansion over the limit by 1, so fail. " 53,"def get_node_id(self) -> str: node_id = self.worker.current_node_id assert not node_id.is_nil() return node_id.hex() ","Get current node ID for this worker or driver. Node ID is the id of a node that your driver, task, or actor runs. The ID will be in hex format. Returns: A node id in hex format for this worker or driver. ",43,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_node_id(self) -> str: node_id = self.worker.current_node_id assert not node_id.is_nil() return node_id.hex() ``` ###Assistant : Get current node ID for this worker or driver. Node ID is the id of a node that your driver, task, or actor runs. The ID will be in hex format. Returns: A node id in hex format for this worker or driver. " 54,"def list_option(*, info): return _option( info, ""List options"", lambda opt: (isinstance(info.config.get_obj(opt.name), list) and not opt.no_autoconfig) ) ",A CompletionModel filled with settings whose values are lists.,9,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def list_option(*, info): return _option( info, ""List options"", lambda opt: (isinstance(info.config.get_obj(opt.name), list) and not opt.no_autoconfig) ) ``` ###Assistant : A CompletionModel filled with settings whose values are lists. " 55,"def get_instance_from_config_with_end_date(config, query): start_date = ""2021-03-04"" end_date = ""2021-04-04"" conversion_window_days = 14 google_api = GoogleAds(credentials=config[""credentials""], customer_id=config[""customer_id""]) instance = CustomQuery( api=google_api, conversion_window_days=conversion_window_days, start_date=start_date, end_date=end_date, time_zone=""local"", custom_query_config={""query"": query, ""table_name"": ""whatever_table""}, ) return instance @pytest.mark.parametrize( ""query, fields"", [ ( , [""campaign.id"", ""campaign.name"", ""campaign.status"", ""metrics.impressions""], ), ( , [""campaign.accessible_bidding_strategy"", ""segments.ad_destination_type"", ""campaign.start_date"", ""campaign.end_date""], ), (, []), ], )"," SELecT campaign.id, campaign.name, campaign.status, metrics.impressions FROM campaign wheRe campaign.status = 'PAUSED' AND metrics.impressions > 100 order by campaign.status SELECT campaign.accessible_bidding_strategy, segments.ad_destination_type, campaign.start_date, campaign.end_date FROM campaign selet aasdasd from aaa",29,53,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_instance_from_config_with_end_date(config, query): start_date = ""2021-03-04"" end_date = ""2021-04-04"" conversion_window_days = 14 google_api = GoogleAds(credentials=config[""credentials""], customer_id=config[""customer_id""]) instance = CustomQuery( api=google_api, conversion_window_days=conversion_window_days, start_date=start_date, end_date=end_date, time_zone=""local"", custom_query_config={""query"": query, ""table_name"": ""whatever_table""}, ) return instance @pytest.mark.parametrize( ""query, fields"", [ ( , [""campaign.id"", ""campaign.name"", ""campaign.status"", ""metrics.impressions""], ), ( , [""campaign.accessible_bidding_strategy"", ""segments.ad_destination_type"", ""campaign.start_date"", ""campaign.end_date""], ), (, []), ], ) ``` ###Assistant : SELecT campaign.id, campaign.name, campaign.status, metrics.impressions FROM campaign wheRe campaign.status = 'PAUSED' AND metrics.impressions > 100 order by campaign.status SELECT campaign.accessible_bidding_strategy, segments.ad_destination_type, campaign.start_date, campaign.end_date FROM campaign selet aasdasd from aaa " 56,"def node_degree_xy(G, x=""out"", y=""in"", weight=None, nodes=None): nodes = set(G) if nodes is None else set(nodes) if G.is_directed(): direction = {""out"": G.out_degree, ""in"": G.in_degree} xdeg = direction[x] ydeg = direction[y] else: xdeg = ydeg = G.degree for u, degu in xdeg(nodes, weight=weight): # use G.edges to treat multigraphs correctly neighbors = (nbr for _, nbr in G.edges(u) if nbr in nodes) for _, degv in ydeg(neighbors, weight=weight): yield degu, degv ","Generate node degree-degree pairs for edges in G. Parameters ---------- G: NetworkX graph x: string ('in','out') The degree type for source node (directed graphs only). y: string ('in','out') The degree type for target node (directed graphs only). weight: string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. nodes: list or iterable (optional) Use only edges that are adjacency to specified nodes. The default is all nodes. Returns ------- (x, y): 2-tuple Generates 2-tuple of (degree, degree) values. Examples -------- >>> G = nx.DiGraph() >>> G.add_edge(1, 2) >>> list(nx.node_degree_xy(G, x=""out"", y=""in"")) [(1, 1)] >>> list(nx.node_degree_xy(G, x=""in"", y=""out"")) [(0, 0)] Notes ----- For undirected graphs each edge is produced twice, once for each edge representation (u, v) and (v, u), with the exception of self-loop edges which only appear once. ",157,69,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def node_degree_xy(G, x=""out"", y=""in"", weight=None, nodes=None): nodes = set(G) if nodes is None else set(nodes) if G.is_directed(): direction = {""out"": G.out_degree, ""in"": G.in_degree} xdeg = direction[x] ydeg = direction[y] else: xdeg = ydeg = G.degree for u, degu in xdeg(nodes, weight=weight): # use G.edges to treat multigraphs correctly neighbors = (nbr for _, nbr in G.edges(u) if nbr in nodes) for _, degv in ydeg(neighbors, weight=weight): yield degu, degv ``` ###Assistant : Generate node degree-degree pairs for edges in G. Parameters ---------- G: NetworkX graph x: string ('in','out') The degree type for source node (directed graphs only). y: string ('in','out') The degree type for target node (directed graphs only). weight: string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. nodes: list or iterable (optional) Use only edges that are adjacency to specified nodes. The default is all nodes. Returns ------- (x, y): 2-tuple Generates 2-tuple of (degree, degree) values. Examples -------- >>> G = nx.DiGraph() >>> G.add_edge(1, 2) >>> list(nx.node_degree_xy(G, x=""out"", y=""in"")) [(1, 1)] >>> list(nx.node_degree_xy(G, x=""in"", y=""out"")) [(0, 0)] Notes ----- For undirected graphs each edge is produced twice, once for each edge representation (u, v) and (v, u), with the exception of self-loop edges which only appear once. " 57,"def validate(self, num_steps=None, profile=False, reduce_results=True, info=None): worker_stats = self.worker_group.validate( num_steps=num_steps, profile=profile, info=info ) if reduce_results: return self._process_stats(worker_stats) else: return worker_stats ","Evaluates the model on the validation data set. Args: num_steps (int): Number of batches to compute update steps on per worker. This corresponds also to the number of times ``TrainingOperator.validate_batch`` is called per worker. profile (bool): Returns time stats for the evaluation procedure. reduce_results (bool): Whether to average all metrics across all workers into one dict. If a metric is a non-numerical value (or nested dictionaries), one value will be randomly selected among the workers. If False, returns a list of dicts. info (dict): Optional dictionary passed to the training operator for `validate` and `validate_batch`. Returns: A dictionary of metrics for validation. You can provide custom metrics by passing in a custom ``training_operator_cls``. ",113,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def validate(self, num_steps=None, profile=False, reduce_results=True, info=None): worker_stats = self.worker_group.validate( num_steps=num_steps, profile=profile, info=info ) if reduce_results: return self._process_stats(worker_stats) else: return worker_stats ``` ###Assistant : Evaluates the model on the validation data set. Args: num_steps (int): Number of batches to compute update steps on per worker. This corresponds also to the number of times ``TrainingOperator.validate_batch`` is called per worker. profile (bool): Returns time stats for the evaluation procedure. reduce_results (bool): Whether to average all metrics across all workers into one dict. If a metric is a non-numerical value (or nested dictionaries), one value will be randomly selected among the workers. If False, returns a list of dicts. info (dict): Optional dictionary passed to the training operator for `validate` and `validate_batch`. Returns: A dictionary of metrics for validation. You can provide custom metrics by passing in a custom ``training_operator_cls``. " 58,"def set_raw_scale(self, in_, scale): self.__check_input(in_) self.raw_scale[in_] = scale "," Set the scale of raw features s.t. the input blob = input * scale. While Python represents images in [0, 1], certain Caffe models like CaffeNet and AlexNet represent images in [0, 255] so the raw_scale of these models must be 255. Parameters ---------- in_ : which input to assign this scale factor scale : scale coefficient ",57,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_raw_scale(self, in_, scale): self.__check_input(in_) self.raw_scale[in_] = scale ``` ###Assistant : Set the scale of raw features s.t. the input blob = input * scale. While Python represents images in [0, 1], certain Caffe models like CaffeNet and AlexNet represent images in [0, 255] so the raw_scale of these models must be 255. Parameters ---------- in_ : which input to assign this scale factor scale : scale coefficient " 59,"def test_add_rule_to_best_shard(): # If we start with an empty list, then add to first shard shards: List[List[bazel_sharding.BazelRule]] = [list() for _ in range(4)] optimum = 600 rule = bazel_sharding.BazelRule(""mock"", ""medium"") bazel_sharding.add_rule_to_best_shard(rule, shards, optimum) assert shards[0][0] == rule assert all(not shard for shard in shards[1:]) # Add to first shard below optimum old_rule = bazel_sharding.BazelRule(""mock"", ""medium"") shards: List[List[bazel_sharding.BazelRule]] = [[old_rule] for _ in range(4)] shards[3] = [] optimum = old_rule.actual_timeout_s rule = bazel_sharding.BazelRule(""mock"", ""small"") bazel_sharding.add_rule_to_best_shard(rule, shards, optimum) assert shards[3][0] == rule assert all(shard[-1] == old_rule for shard in shards[0:3]) # If all shards are above or equal optimum, add to the one with the smallest # difference old_rule = bazel_sharding.BazelRule(""mock"", ""large"") shards: List[List[bazel_sharding.BazelRule]] = [[old_rule] for _ in range(4)] optimum = old_rule.actual_timeout_s old_rule_medium = bazel_sharding.BazelRule(""mock"", ""medium"") shards[3][0] = old_rule_medium rule = bazel_sharding.BazelRule(""mock"", ""small"") bazel_sharding.add_rule_to_best_shard(rule, shards, optimum) assert shards[3][0] == old_rule_medium assert shards[3][-1] == rule assert all(shard[-1] == old_rule for shard in shards[0:3]) ",Test that the best shard in optimal strategy is chosen correctly.,11,151,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_add_rule_to_best_shard(): # If we start with an empty list, then add to first shard shards: List[List[bazel_sharding.BazelRule]] = [list() for _ in range(4)] optimum = 600 rule = bazel_sharding.BazelRule(""mock"", ""medium"") bazel_sharding.add_rule_to_best_shard(rule, shards, optimum) assert shards[0][0] == rule assert all(not shard for shard in shards[1:]) # Add to first shard below optimum old_rule = bazel_sharding.BazelRule(""mock"", ""medium"") shards: List[List[bazel_sharding.BazelRule]] = [[old_rule] for _ in range(4)] shards[3] = [] optimum = old_rule.actual_timeout_s rule = bazel_sharding.BazelRule(""mock"", ""small"") bazel_sharding.add_rule_to_best_shard(rule, shards, optimum) assert shards[3][0] == rule assert all(shard[-1] == old_rule for shard in shards[0:3]) # If all shards are above or equal optimum, add to the one with the smallest # difference old_rule = bazel_sharding.BazelRule(""mock"", ""large"") shards: List[List[bazel_sharding.BazelRule]] = [[old_rule] for _ in range(4)] optimum = old_rule.actual_timeout_s old_rule_medium = bazel_sharding.BazelRule(""mock"", ""medium"") shards[3][0] = old_rule_medium rule = bazel_sharding.BazelRule(""mock"", ""small"") bazel_sharding.add_rule_to_best_shard(rule, shards, optimum) assert shards[3][0] == old_rule_medium assert shards[3][-1] == rule assert all(shard[-1] == old_rule for shard in shards[0:3]) ``` ###Assistant : Test that the best shard in optimal strategy is chosen correctly. " 60,"def async_heartbeat(self) -> None: self._computed_state = False self._restart_timer() self.async_write_ha_state() ","Mark the device as online, and restart the 25 hour timer. This gets called when the heartbeat node beats, but also when the parent sensor sends any events, as we can trust that to mean the device is online. This mitigates the risk of false positives due to a single missed heartbeat event. ",53,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def async_heartbeat(self) -> None: self._computed_state = False self._restart_timer() self.async_write_ha_state() ``` ###Assistant : Mark the device as online, and restart the 25 hour timer. This gets called when the heartbeat node beats, but also when the parent sensor sends any events, as we can trust that to mean the device is online. This mitigates the risk of false positives due to a single missed heartbeat event. " 61,"def test_assert_series_equal_interval_dtype_mismatch(): # https://github.com/pandas-dev/pandas/issues/32747 left = Series([pd.Interval(0, 1, ""right"")], dtype=""interval"") right = left.astype(object) msg = tm.assert_series_equal(left, right, check_dtype=False) with pytest.raises(AssertionError, match=msg): tm.assert_series_equal(left, right, check_dtype=True) ","Attributes of Series are different Attribute ""dtype"" are different \\[left\\]: interval\\[int64, right\\] \\[right\\]: object",14,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_assert_series_equal_interval_dtype_mismatch(): # https://github.com/pandas-dev/pandas/issues/32747 left = Series([pd.Interval(0, 1, ""right"")], dtype=""interval"") right = left.astype(object) msg = tm.assert_series_equal(left, right, check_dtype=False) with pytest.raises(AssertionError, match=msg): tm.assert_series_equal(left, right, check_dtype=True) ``` ###Assistant : Attributes of Series are different Attribute ""dtype"" are different \\[left\\]: interval\\[int64, right\\] \\[right\\]: object " 62,"def rc_file_defaults(): # Deprecation warnings were already handled when creating rcParamsOrig, no # need to reemit them here. with _api.suppress_matplotlib_deprecation_warning(): from .style.core import STYLE_BLACKLIST rcParams.update({k: rcParamsOrig[k] for k in rcParamsOrig if k not in STYLE_BLACKLIST}) "," Restore the `.rcParams` from the original rc file loaded by Matplotlib. Style-blacklisted `.rcParams` (defined in ``matplotlib.style.core.STYLE_BLACKLIST``) are not updated. ",19,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rc_file_defaults(): # Deprecation warnings were already handled when creating rcParamsOrig, no # need to reemit them here. with _api.suppress_matplotlib_deprecation_warning(): from .style.core import STYLE_BLACKLIST rcParams.update({k: rcParamsOrig[k] for k in rcParamsOrig if k not in STYLE_BLACKLIST}) ``` ###Assistant : Restore the `.rcParams` from the original rc file loaded by Matplotlib. Style-blacklisted `.rcParams` (defined in ``matplotlib.style.core.STYLE_BLACKLIST``) are not updated. " 63,"def lexer(self) -> Optional[Lexer]: if isinstance(self._lexer, Lexer): return self._lexer try: return get_lexer_by_name( self._lexer, stripnl=False, ensurenl=True, tabsize=self.tab_size, ) except ClassNotFound: return None ","The lexer for this syntax, or None if no lexer was found. Tries to find the lexer by name if a string was passed to the constructor. ",27,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def lexer(self) -> Optional[Lexer]: if isinstance(self._lexer, Lexer): return self._lexer try: return get_lexer_by_name( self._lexer, stripnl=False, ensurenl=True, tabsize=self.tab_size, ) except ClassNotFound: return None ``` ###Assistant : The lexer for this syntax, or None if no lexer was found. Tries to find the lexer by name if a string was passed to the constructor. " 64,"def test_numeric_repl(file, multiline_file): file.replace(multiline_file, r""Etiam"", 123) assert ""123"" in multiline_file.read_text() "," This test covers cases where the replacement string is numeric. The CLI parser yaml-fies it into a numeric type. If not converted back to a string type in file.replace, a TypeError occurs when the replace is attempted. See https://github.com/saltstack/salt/issues/9097 for more information. ",42,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_numeric_repl(file, multiline_file): file.replace(multiline_file, r""Etiam"", 123) assert ""123"" in multiline_file.read_text() ``` ###Assistant : This test covers cases where the replacement string is numeric. The CLI parser yaml-fies it into a numeric type. If not converted back to a string type in file.replace, a TypeError occurs when the replace is attempted. See https://github.com/saltstack/salt/issues/9097 for more information. " 65,"def set_interpret_parameters(self, segments=16): self.interpretation_segments = segments return self "," Calculates interpretation score of image subsections by splitting the image into subsections, then using a ""leave one out"" method to calculate the score of each subsection by whiting out the subsection and measuring the delta of the output value. Parameters: segments (int): Number of interpretation segments to split image into. ",50,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_interpret_parameters(self, segments=16): self.interpretation_segments = segments return self ``` ###Assistant : Calculates interpretation score of image subsections by splitting the image into subsections, then using a ""leave one out"" method to calculate the score of each subsection by whiting out the subsection and measuring the delta of the output value. Parameters: segments (int): Number of interpretation segments to split image into. " 66,"def can_native_upsert(self) -> bool: return sqlite3.sqlite_version_info >= (3, 24, 0) "," Do we support native UPSERTs? This requires SQLite3 3.24+, plus some more work we haven't done yet to tell what was inserted vs updated. ",24,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def can_native_upsert(self) -> bool: return sqlite3.sqlite_version_info >= (3, 24, 0) ``` ###Assistant : Do we support native UPSERTs? This requires SQLite3 3.24+, plus some more work we haven't done yet to tell what was inserted vs updated. " 67,"async def get_actors(self) -> dict: reply = await self._client.get_all_actor_info(timeout=DEFAULT_RPC_TIMEOUT) result = {} for message in reply.actor_table_data: data = self._message_to_dict(message=message, fields_to_decode=[""actor_id""]) data = filter_fields(data, ActorState) result[data[""actor_id""]] = data return result ","List all actor information from the cluster. Returns: {actor_id -> actor_data_in_dict} actor_data_in_dict's schema is in ActorState ",16,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def get_actors(self) -> dict: reply = await self._client.get_all_actor_info(timeout=DEFAULT_RPC_TIMEOUT) result = {} for message in reply.actor_table_data: data = self._message_to_dict(message=message, fields_to_decode=[""actor_id""]) data = filter_fields(data, ActorState) result[data[""actor_id""]] = data return result ``` ###Assistant : List all actor information from the cluster. Returns: {actor_id -> actor_data_in_dict} actor_data_in_dict's schema is in ActorState " 68,"def insert_predictor_answer(self, insert): model_interface = self.session.model_interface data_store = self.session.data_store select_data_query = insert.get('select_data_query') if isinstance(select_data_query, str) is False or len(select_data_query) == 0: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg=""'select_data_query' should not be empty"" ).send() return models = model_interface.get_models() if insert['name'] in [x['name'] for x in models]: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg=f""predictor with name '{insert['name']}'' already exists"" ).send() return kwargs = {} if isinstance(insert.get('training_options'), str) \ and len(insert['training_options']) > 0: try: kwargs = json.loads(insert['training_options']) except Exception: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg='training_options should be in valid JSON string' ).send() return integration = self.session.integration if isinstance(integration, str) is False or len(integration) == 0: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg='select_data_query can be used only in query from database' ).send() return insert['select_data_query'] = insert['select_data_query'].replace(r""\'"", ""'"") ds_name = data_store.get_vacant_name(insert['name']) ds = data_store.save_datasource(ds_name, integration, {'query': insert['select_data_query']}) insert['predict'] = [x.strip() for x in insert['predict'].split(',')] ds_data = data_store.get_datasource(ds_name) if ds_data is None: raise Exception(f""DataSource '{ds_name}' does not exists"") ds_columns = [x['name'] for x in ds_data['columns']] for col in insert['predict']: if col not in ds_columns: data_store.delete_datasource(ds_name) raise Exception(f""Column '{col}' not exists"") try: insert['predict'] = self._check_predict_columns(insert['predict'], ds_columns) except Exception: data_store.delete_datasource(ds_name) raise model_interface.learn( insert['name'], ds, insert['predict'], ds_data['id'], kwargs=kwargs, delete_ds_on_fail=True ) self.packet(OkPacket).send() "," Start learn new predictor. Parameters: - insert - dict with keys as columns of mindsb.predictors table. ",16,181,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def insert_predictor_answer(self, insert): model_interface = self.session.model_interface data_store = self.session.data_store select_data_query = insert.get('select_data_query') if isinstance(select_data_query, str) is False or len(select_data_query) == 0: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg=""'select_data_query' should not be empty"" ).send() return models = model_interface.get_models() if insert['name'] in [x['name'] for x in models]: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg=f""predictor with name '{insert['name']}'' already exists"" ).send() return kwargs = {} if isinstance(insert.get('training_options'), str) \ and len(insert['training_options']) > 0: try: kwargs = json.loads(insert['training_options']) except Exception: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg='training_options should be in valid JSON string' ).send() return integration = self.session.integration if isinstance(integration, str) is False or len(integration) == 0: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg='select_data_query can be used only in query from database' ).send() return insert['select_data_query'] = insert['select_data_query'].replace(r""\'"", ""'"") ds_name = data_store.get_vacant_name(insert['name']) ds = data_store.save_datasource(ds_name, integration, {'query': insert['select_data_query']}) insert['predict'] = [x.strip() for x in insert['predict'].split(',')] ds_data = data_store.get_datasource(ds_name) if ds_data is None: raise Exception(f""DataSource '{ds_name}' does not exists"") ds_columns = [x['name'] for x in ds_data['columns']] for col in insert['predict']: if col not in ds_columns: data_store.delete_datasource(ds_name) raise Exception(f""Column '{col}' not exists"") try: insert['predict'] = self._check_predict_columns(insert['predict'], ds_columns) except Exception: data_store.delete_datasource(ds_name) raise model_interface.learn( insert['name'], ds, insert['predict'], ds_data['id'], kwargs=kwargs, delete_ds_on_fail=True ) self.packet(OkPacket).send() ``` ###Assistant : Start learn new predictor. Parameters: - insert - dict with keys as columns of mindsb.predictors table. " 69,"def populate_historical_trade_data(self): trade_data = self.__orderbook.pivot( index=""Date"", columns=""Ticker"", values=[ ""Type"", ""Sector"", ""Industry"", ""Country"", ""Price"", ""Quantity"", ""Fees"", ""Premium"", ""Investment"", ""Side"", ""Currency"", ], ) # Make historical prices columns a multi-index. This helps the merging. self.portfolio_historical_prices.columns = pd.MultiIndex.from_product( [[""Close""], self.portfolio_historical_prices.columns] ) # Merge with historical close prices (and fillna) trade_data = pd.merge( trade_data, self.portfolio_historical_prices, how=""right"", left_index=True, right_index=True, ).fillna(0) # Accumulate quantity held by trade date trade_data[""Quantity""] = trade_data[""Quantity""].cumsum() trade_data[""Investment""] = trade_data[""Investment""].cumsum() trade_data.loc[:, (""Investment"", ""Total"")] = trade_data[""Investment""][ self.tickers_list ].sum(axis=1) self.historical_trade_data = trade_data ",Create a new dataframe to store historical prices by ticker,10,78,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def populate_historical_trade_data(self): trade_data = self.__orderbook.pivot( index=""Date"", columns=""Ticker"", values=[ ""Type"", ""Sector"", ""Industry"", ""Country"", ""Price"", ""Quantity"", ""Fees"", ""Premium"", ""Investment"", ""Side"", ""Currency"", ], ) # Make historical prices columns a multi-index. This helps the merging. self.portfolio_historical_prices.columns = pd.MultiIndex.from_product( [[""Close""], self.portfolio_historical_prices.columns] ) # Merge with historical close prices (and fillna) trade_data = pd.merge( trade_data, self.portfolio_historical_prices, how=""right"", left_index=True, right_index=True, ).fillna(0) # Accumulate quantity held by trade date trade_data[""Quantity""] = trade_data[""Quantity""].cumsum() trade_data[""Investment""] = trade_data[""Investment""].cumsum() trade_data.loc[:, (""Investment"", ""Total"")] = trade_data[""Investment""][ self.tickers_list ].sum(axis=1) self.historical_trade_data = trade_data ``` ###Assistant : Create a new dataframe to store historical prices by ticker " 70,"def test_fetch_openml_equivalence_array_dataframe(monkeypatch, parser): pytest.importorskip(""pandas"") data_id = 61 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True) bunch_as_frame_true = fetch_openml( data_id=data_id, as_frame=True, cache=False, parser=parser, ) bunch_as_frame_false = fetch_openml( data_id=data_id, as_frame=False, cache=False, parser=parser, ) assert_allclose(bunch_as_frame_false.data, bunch_as_frame_true.data) assert_array_equal(bunch_as_frame_false.target, bunch_as_frame_true.target) # Known failure of PyPy for OpenML. See the following issue: # https://github.com/scikit-learn/scikit-learn/issues/18906 @fails_if_pypy @pytest.mark.parametrize(""parser"", [""liac-arff"", ""pandas""])","Check the equivalence of the dataset when using `as_frame=False` and `as_frame=True`. ",11,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_fetch_openml_equivalence_array_dataframe(monkeypatch, parser): pytest.importorskip(""pandas"") data_id = 61 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True) bunch_as_frame_true = fetch_openml( data_id=data_id, as_frame=True, cache=False, parser=parser, ) bunch_as_frame_false = fetch_openml( data_id=data_id, as_frame=False, cache=False, parser=parser, ) assert_allclose(bunch_as_frame_false.data, bunch_as_frame_true.data) assert_array_equal(bunch_as_frame_false.target, bunch_as_frame_true.target) # Known failure of PyPy for OpenML. See the following issue: # https://github.com/scikit-learn/scikit-learn/issues/18906 @fails_if_pypy @pytest.mark.parametrize(""parser"", [""liac-arff"", ""pandas""]) ``` ###Assistant : Check the equivalence of the dataset when using `as_frame=False` and `as_frame=True`. " 71,"def wire_type(self): if hasattr(self, '_m_wire_type'): return self._m_wire_type self._m_wire_type = KaitaiStream.resolve_enum(GoogleProtobuf.Pair.WireTypes, (self.key.value & 7)) return getattr(self, '_m_wire_type', None) ","""Wire type"" is a part of the ""key"" that carries enough information to parse value from the wire, i.e. read correct amount of bytes, but there's not enough informaton to interprete in unambiguously. For example, one can't clearly distinguish 64-bit fixed-sized integers from 64-bit floats, signed zigzag-encoded varints from regular unsigned varints, arbitrary bytes from UTF-8 encoded strings, etc. ",59,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def wire_type(self): if hasattr(self, '_m_wire_type'): return self._m_wire_type self._m_wire_type = KaitaiStream.resolve_enum(GoogleProtobuf.Pair.WireTypes, (self.key.value & 7)) return getattr(self, '_m_wire_type', None) ``` ###Assistant : ""Wire type"" is a part of the ""key"" that carries enough information to parse value from the wire, i.e. read correct amount of bytes, but there's not enough informaton to interprete in unambiguously. For example, one can't clearly distinguish 64-bit fixed-sized integers from 64-bit floats, signed zigzag-encoded varints from regular unsigned varints, arbitrary bytes from UTF-8 encoded strings, etc. " 72,"def address(self): # pragma: no cover warnings.warn( ""Client.address is deprecated, use Client.peername instead."", DeprecationWarning, stacklevel=2, ) return self.peername ",*Deprecated:* An outdated alias for Client.peername.,6,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def address(self): # pragma: no cover warnings.warn( ""Client.address is deprecated, use Client.peername instead."", DeprecationWarning, stacklevel=2, ) return self.peername ``` ###Assistant : *Deprecated:* An outdated alias for Client.peername. " 73,"def jordan_block(kls, size=None, eigenvalue=None, *, band='upper', **kwargs): if 'rows' in kwargs or 'cols' in kwargs: msg = if 'rows' in kwargs and 'cols' in kwargs: msg += f","Returns a Jordan block Parameters ========== size : Integer, optional Specifies the shape of the Jordan block matrix. eigenvalue : Number or Symbol Specifies the value for the main diagonal of the matrix. .. note:: The keyword ``eigenval`` is also specified as an alias of this keyword, but it is not recommended to use. We may deprecate the alias in later release. band : 'upper' or 'lower', optional Specifies the position of the off-diagonal to put `1` s on. cls : Matrix, optional Specifies the matrix class of the output form. If it is not specified, the class type where the method is being executed on will be returned. rows, cols : Integer, optional Specifies the shape of the Jordan block matrix. See Notes section for the details of how these key works. .. deprecated:: 1.4 The rows and cols parameters are deprecated and will be removed in a future version. Returns ======= Matrix A Jordan block matrix. Raises ====== ValueError If insufficient arguments are given for matrix size specification, or no eigenvalue is given. Examples ======== Creating a default Jordan block: >>> from sympy import Matrix >>> from sympy.abc import x >>> Matrix.jordan_block(4, x) Matrix([ [x, 1, 0, 0], [0, x, 1, 0], [0, 0, x, 1], [0, 0, 0, x]]) Creating an alternative Jordan block matrix where `1` is on lower off-diagonal: >>> Matrix.jordan_block(4, x, band='lower') Matrix([ [x, 0, 0, 0], [1, x, 0, 0], [0, 1, x, 0], [0, 0, 1, x]]) Creating a Jordan block with keyword arguments >>> Matrix.jordan_block(size=4, eigenvalue=x) Matrix([ [x, 1, 0, 0], [0, x, 1, 0], [0, 0, x, 1], [0, 0, 0, x]]) Notes ===== .. deprecated:: 1.4 This feature is deprecated and will be removed in a future version. The keyword arguments ``size``, ``rows``, ``cols`` relates to the Jordan block size specifications. If you want to create a square Jordan block, specify either one of the three arguments. If you want to create a rectangular Jordan block, specify ``rows`` and ``cols`` individually. +--------------------------------+---------------------+ | Arguments Given | Matrix Shape | +----------+----------+----------+----------+----------+ | size | rows | cols | rows | cols | +==========+==========+==========+==========+==========+ | size | Any | size | size | +----------+----------+----------+----------+----------+ | | None | ValueError | | +----------+----------+----------+----------+ | None | rows | None | rows | rows | | +----------+----------+----------+----------+ | | None | cols | cols | cols | + +----------+----------+----------+----------+ | | rows | cols | rows | cols | +----------+----------+----------+----------+----------+ References ========== .. [1] https://en.wikipedia.org/wiki/Jordan_matrix The 'rows' and 'cols' keywords to Matrix.jordan_block() are deprecated. Use the 'size' parameter instead. \ To get a non-square Jordan block matrix use a more generic banded matrix constructor, like ",442,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def jordan_block(kls, size=None, eigenvalue=None, *, band='upper', **kwargs): if 'rows' in kwargs or 'cols' in kwargs: msg = if 'rows' in kwargs and 'cols' in kwargs: msg += f ``` ###Assistant : Returns a Jordan block Parameters ========== size : Integer, optional Specifies the shape of the Jordan block matrix. eigenvalue : Number or Symbol Specifies the value for the main diagonal of the matrix. .. note:: The keyword ``eigenval`` is also specified as an alias of this keyword, but it is not recommended to use. We may deprecate the alias in later release. band : 'upper' or 'lower', optional Specifies the position of the off-diagonal to put `1` s on. cls : Matrix, optional Specifies the matrix class of the output form. If it is not specified, the class type where the method is being executed on will be returned. rows, cols : Integer, optional Specifies the shape of the Jordan block matrix. See Notes section for the details of how these key works. .. deprecated:: 1.4 The rows and cols parameters are deprecated and will be removed in a future version. Returns ======= Matrix A Jordan block matrix. Raises ====== ValueError If insufficient arguments are given for matrix size specification, or no eigenvalue is given. Examples ======== Creating a default Jordan block: >>> from sympy import Matrix >>> from sympy.abc import x >>> Matrix.jordan_block(4, x) Matrix([ [x, 1, 0, 0], [0, x, 1, 0], [0, 0, x, 1], [0, 0, 0, x]]) Creating an alternative Jordan block matrix where `1` is on lower off-diagonal: >>> Matrix.jordan_block(4, x, band='lower') Matrix([ [x, 0, 0, 0], [1, x, 0, 0], [0, 1, x, 0], [0, 0, 1, x]]) Creating a Jordan block with keyword arguments >>> Matrix.jordan_block(size=4, eigenvalue=x) Matrix([ [x, 1, 0, 0], [0, x, 1, 0], [0, 0, x, 1], [0, 0, 0, x]]) Notes ===== .. deprecated:: 1.4 This feature is deprecated and will be removed in a future version. The keyword arguments ``size``, ``rows``, ``cols`` relates to the Jordan block size specifications. If you want to create a square Jordan block, specify either one of the three arguments. If you want to create a rectangular Jordan block, specify ``rows`` and ``cols`` individually. +--------------------------------+---------------------+ | Arguments Given | Matrix Shape | +----------+----------+----------+----------+----------+ | size | rows | cols | rows | cols | +==========+==========+==========+==========+==========+ | size | Any | size | size | +----------+----------+----------+----------+----------+ | | None | ValueError | | +----------+----------+----------+----------+ | None | rows | None | rows | rows | | +----------+----------+----------+----------+ | | None | cols | cols | cols | + +----------+----------+----------+----------+ | | rows | cols | rows | cols | +----------+----------+----------+----------+----------+ References ========== .. [1] https://en.wikipedia.org/wiki/Jordan_matrix The 'rows' and 'cols' keywords to Matrix.jordan_block() are deprecated. Use the 'size' parameter instead. \ To get a non-square Jordan block matrix use a more generic banded matrix constructor, like " 74,"def get_openapi_specs() -> dict: app = get_app() return get_openapi( title=app.title, version=app.version, openapi_version=app.openapi_version, description=app.description, routes=app.routes, servers=[{""url"": ""http://localhost:8000""}], ) "," Used to autogenerate OpenAPI specs file to use in the documentation. Returns `servers` to specify base URL for OpenAPI Playground (see https://swagger.io/docs/specification/api-host-and-base-path/) See `.github/utils/generate_openapi_specs.py` ",24,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_openapi_specs() -> dict: app = get_app() return get_openapi( title=app.title, version=app.version, openapi_version=app.openapi_version, description=app.description, routes=app.routes, servers=[{""url"": ""http://localhost:8000""}], ) ``` ###Assistant : Used to autogenerate OpenAPI specs file to use in the documentation. Returns `servers` to specify base URL for OpenAPI Playground (see https://swagger.io/docs/specification/api-host-and-base-path/) See `.github/utils/generate_openapi_specs.py` " 75,"def get_all_employee_emails(company): employee_list = frappe.get_all( ""Employee"", fields=[""name"", ""employee_name""], filters={""status"": ""Active"", ""company"": company} ) employee_emails = [] for employee in employee_list: if not employee: continue user, company_email, personal_email = frappe.db.get_value( ""Employee"", employee, [""user_id"", ""company_email"", ""personal_email""] ) email = user or company_email or personal_email if email: employee_emails.append(email) return employee_emails ",Returns list of employee emails either based on user_id or company_email,11,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_all_employee_emails(company): employee_list = frappe.get_all( ""Employee"", fields=[""name"", ""employee_name""], filters={""status"": ""Active"", ""company"": company} ) employee_emails = [] for employee in employee_list: if not employee: continue user, company_email, personal_email = frappe.db.get_value( ""Employee"", employee, [""user_id"", ""company_email"", ""personal_email""] ) email = user or company_email or personal_email if email: employee_emails.append(email) return employee_emails ``` ###Assistant : Returns list of employee emails either based on user_id or company_email " 76,"def test_in_non_semver_projects_resolved_in_next_release_is_equated_to_in_release(self): release_1 = self.create_release( date_added=timezone.now() - timedelta(minutes=45), version=""foobar 1"" ) release_2 = self.create_release(version=""foobar 2"") self.create_release(version=""foobar 3"") group = self.store_event( data={ ""timestamp"": iso_format(before_now(seconds=12)), ""fingerprint"": [""group-1""], ""release"": release_1.version, }, project_id=self.project.id, ).group self.login_as(user=self.user) response = self.get_success_response( qs_params={""id"": group.id}, status=""resolvedInNextRelease"" ) assert response.data[""status""] == ""resolved"" assert response.data[""statusDetails""][""inNextRelease""] grp_resolution = GroupResolution.objects.filter(group=group) assert len(grp_resolution) == 1 grp_resolution = grp_resolution[0] assert grp_resolution.current_release_version == release_1.version assert grp_resolution.release.id == release_2.id assert grp_resolution.type == GroupResolution.Type.in_release assert grp_resolution.status == GroupResolution.Status.resolved activity = Activity.objects.filter( group=grp_resolution.group, type=Activity.SET_RESOLVED_IN_RELEASE, ident=grp_resolution.id, ).first() assert activity.data[""version""] == release_2.version "," Test that ensures that if we basically know the next release when clicking on Resolved In Next Release because that release exists, then we can short circuit setting GroupResolution to type ""inNextRelease"", and then having `clear_exrired_resolutions` run once a new release is created to convert GroupResolution to in_release and set Activity. Basically we treat ""ResolvedInNextRelease"" as ""ResolvedInRelease"" when there is a release that was created after the last release associated with the group being resolved ",75,81,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_in_non_semver_projects_resolved_in_next_release_is_equated_to_in_release(self): release_1 = self.create_release( date_added=timezone.now() - timedelta(minutes=45), version=""foobar 1"" ) release_2 = self.create_release(version=""foobar 2"") self.create_release(version=""foobar 3"") group = self.store_event( data={ ""timestamp"": iso_format(before_now(seconds=12)), ""fingerprint"": [""group-1""], ""release"": release_1.version, }, project_id=self.project.id, ).group self.login_as(user=self.user) response = self.get_success_response( qs_params={""id"": group.id}, status=""resolvedInNextRelease"" ) assert response.data[""status""] == ""resolved"" assert response.data[""statusDetails""][""inNextRelease""] grp_resolution = GroupResolution.objects.filter(group=group) assert len(grp_resolution) == 1 grp_resolution = grp_resolution[0] assert grp_resolution.current_release_version == release_1.version assert grp_resolution.release.id == release_2.id assert grp_resolution.type == GroupResolution.Type.in_release assert grp_resolution.status == GroupResolution.Status.resolved activity = Activity.objects.filter( group=grp_resolution.group, type=Activity.SET_RESOLVED_IN_RELEASE, ident=grp_resolution.id, ).first() assert activity.data[""version""] == release_2.version ``` ###Assistant : Test that ensures that if we basically know the next release when clicking on Resolved In Next Release because that release exists, then we can short circuit setting GroupResolution to type ""inNextRelease"", and then having `clear_exrired_resolutions` run once a new release is created to convert GroupResolution to in_release and set Activity. Basically we treat ""ResolvedInNextRelease"" as ""ResolvedInRelease"" when there is a release that was created after the last release associated with the group being resolved " 77,"def subscription_order_updated_webhook(subscription_webhook): return subscription_webhook( ORDER_UPDATED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.ORDER_UPDATED ) ORDER_CONFIRMED_SUBSCRIPTION_QUERY = @pytest.fixture"," subscription{ event{ ...on OrderConfirmed{ order{ id } } } } ",10,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def subscription_order_updated_webhook(subscription_webhook): return subscription_webhook( ORDER_UPDATED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.ORDER_UPDATED ) ORDER_CONFIRMED_SUBSCRIPTION_QUERY = @pytest.fixture ``` ###Assistant : subscription{ event{ ...on OrderConfirmed{ order{ id } } } } " 78,"def cast_to_compatible_types(table): schema = table.schema new_schema = schema need_cast = False uint_to_int_cast = False new_cols = {} uint_to_int_map = { pa.uint8(): pa.int16(), pa.uint16(): pa.int32(), pa.uint32(): pa.int64(), pa.uint64(): pa.int64(), # May cause overflow } for i, field in enumerate(schema): # Currently OmniSci doesn't support Arrow table import with # dictionary columns. Here we cast dictionaries until support # is in place. # https://github.com/modin-project/modin/issues/1738 if pa.types.is_dictionary(field.type): # Conversion for dictionary of null type to string is not supported # in Arrow. Build new column for this case for now. if pa.types.is_null(field.type.value_type): mask = np.full(table.num_rows, True, dtype=bool) new_col_data = np.empty(table.num_rows, dtype=str) new_col = pa.array(new_col_data, pa.string(), mask) new_cols[i] = new_col else: need_cast = True new_field = pa.field( field.name, pa.string(), field.nullable, field.metadata ) new_schema = new_schema.set(i, new_field) # OmniSci doesn't support importing Arrow's date type: # https://github.com/omnisci/omniscidb/issues/678 elif pa.types.is_date(field.type): # Arrow's date is the number of days since the UNIX-epoch, so we can convert it # to a timestamp[s] (number of seconds since the UNIX-epoch) without losing precision new_field = pa.field( field.name, pa.timestamp(""s""), field.nullable, field.metadata ) new_schema = new_schema.set(i, new_field) need_cast = True # OmniSci doesn't support unsigned types elif pa.types.is_unsigned_integer(field.type): new_field = pa.field( field.name, uint_to_int_map[field.type], field.nullable, field.metadata, ) new_schema = new_schema.set(i, new_field) need_cast = True uint_to_int_cast = True # Such cast may affect the data, so we have to raise a warning about it if uint_to_int_cast: ErrorMessage.single_warning( ""OmniSci does not support unsigned integer types, such types will be rounded up to the signed equivalent."" ) for i, col in new_cols.items(): table = table.set_column(i, new_schema[i], col) if need_cast: try: table = table.cast(new_schema) except pa.lib.ArrowInvalid as e: raise (OverflowError if uint_to_int_cast else RuntimeError)( ""An error occurred when trying to convert unsupported by OmniSci 'dtypes' "" + f""to the supported ones, the schema to cast was: \n{new_schema}."" ) from e return table "," Cast PyArrow table to be fully compatible with OmniSci. Parameters ---------- table : pyarrow.Table Source table. Returns ------- pyarrow.Table Table with fully compatible types with OmniSci. ",26,295,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cast_to_compatible_types(table): schema = table.schema new_schema = schema need_cast = False uint_to_int_cast = False new_cols = {} uint_to_int_map = { pa.uint8(): pa.int16(), pa.uint16(): pa.int32(), pa.uint32(): pa.int64(), pa.uint64(): pa.int64(), # May cause overflow } for i, field in enumerate(schema): # Currently OmniSci doesn't support Arrow table import with # dictionary columns. Here we cast dictionaries until support # is in place. # https://github.com/modin-project/modin/issues/1738 if pa.types.is_dictionary(field.type): # Conversion for dictionary of null type to string is not supported # in Arrow. Build new column for this case for now. if pa.types.is_null(field.type.value_type): mask = np.full(table.num_rows, True, dtype=bool) new_col_data = np.empty(table.num_rows, dtype=str) new_col = pa.array(new_col_data, pa.string(), mask) new_cols[i] = new_col else: need_cast = True new_field = pa.field( field.name, pa.string(), field.nullable, field.metadata ) new_schema = new_schema.set(i, new_field) # OmniSci doesn't support importing Arrow's date type: # https://github.com/omnisci/omniscidb/issues/678 elif pa.types.is_date(field.type): # Arrow's date is the number of days since the UNIX-epoch, so we can convert it # to a timestamp[s] (number of seconds since the UNIX-epoch) without losing precision new_field = pa.field( field.name, pa.timestamp(""s""), field.nullable, field.metadata ) new_schema = new_schema.set(i, new_field) need_cast = True # OmniSci doesn't support unsigned types elif pa.types.is_unsigned_integer(field.type): new_field = pa.field( field.name, uint_to_int_map[field.type], field.nullable, field.metadata, ) new_schema = new_schema.set(i, new_field) need_cast = True uint_to_int_cast = True # Such cast may affect the data, so we have to raise a warning about it if uint_to_int_cast: ErrorMessage.single_warning( ""OmniSci does not support unsigned integer types, such types will be rounded up to the signed equivalent."" ) for i, col in new_cols.items(): table = table.set_column(i, new_schema[i], col) if need_cast: try: table = table.cast(new_schema) except pa.lib.ArrowInvalid as e: raise (OverflowError if uint_to_int_cast else RuntimeError)( ""An error occurred when trying to convert unsupported by OmniSci 'dtypes' "" + f""to the supported ones, the schema to cast was: \n{new_schema}."" ) from e return table ``` ###Assistant : Cast PyArrow table to be fully compatible with OmniSci. Parameters ---------- table : pyarrow.Table Source table. Returns ------- pyarrow.Table Table with fully compatible types with OmniSci. " 79,"def data(self) -> 'DataRequest._DataContent': return DataRequest._DataContent(self.proto.data) ","Get the data contaned in this data request :return: the data content as an instance of _DataContent wrapping docs and groundtruths ",21,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def data(self) -> 'DataRequest._DataContent': return DataRequest._DataContent(self.proto.data) ``` ###Assistant : Get the data contaned in this data request :return: the data content as an instance of _DataContent wrapping docs and groundtruths " 80,"def model_from_json(json_string, custom_objects=None): from keras.layers import ( deserialize_from_json, ) # pylint: disable=g-import-not-at-top return deserialize_from_json(json_string, custom_objects=custom_objects) ","Parses a JSON model configuration string and returns a model instance. Usage: >>> model = tf.keras.Sequential([ ... tf.keras.layers.Dense(5, input_shape=(3,)), ... tf.keras.layers.Softmax()]) >>> config = model.to_json() >>> loaded_model = tf.keras.models.model_from_json(config) Args: json_string: JSON string encoding a model configuration. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: A Keras model instance (uncompiled). ",59,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def model_from_json(json_string, custom_objects=None): from keras.layers import ( deserialize_from_json, ) # pylint: disable=g-import-not-at-top return deserialize_from_json(json_string, custom_objects=custom_objects) ``` ###Assistant : Parses a JSON model configuration string and returns a model instance. Usage: >>> model = tf.keras.Sequential([ ... tf.keras.layers.Dense(5, input_shape=(3,)), ... tf.keras.layers.Softmax()]) >>> config = model.to_json() >>> loaded_model = tf.keras.models.model_from_json(config) Args: json_string: JSON string encoding a model configuration. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: A Keras model instance (uncompiled). " 81,"def test_do_nothing_if_converted(self, run_convert_mock): stdout, _ = self.call_command() run_convert_mock.assert_not_called() self.assertIn(""Converting all PNG thumbnails to WebP"", stdout) "," GIVEN: - Document exists with default WebP thumbnail path WHEN: - Thumbnail conversion is attempted THEN: - Nothing is converted ",20,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_do_nothing_if_converted(self, run_convert_mock): stdout, _ = self.call_command() run_convert_mock.assert_not_called() self.assertIn(""Converting all PNG thumbnails to WebP"", stdout) ``` ###Assistant : GIVEN: - Document exists with default WebP thumbnail path WHEN: - Thumbnail conversion is attempted THEN: - Nothing is converted " 82,"def __ror__(self, other): if isinstance(other, str_type): other = self._literalStringClass(other) if not isinstance(other, ParserElement): raise TypeError( ""Cannot combine element of type {} with ParserElement"".format( type(other).__name__ ) ) return other | self "," Implementation of ``|`` operator when left operand is not a :class:`ParserElement` ",11,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __ror__(self, other): if isinstance(other, str_type): other = self._literalStringClass(other) if not isinstance(other, ParserElement): raise TypeError( ""Cannot combine element of type {} with ParserElement"".format( type(other).__name__ ) ) return other | self ``` ###Assistant : Implementation of ``|`` operator when left operand is not a :class:`ParserElement` " 83,"def shash(value): length = len(value) if length == 0: return 0 x = Hash.ordinal(value[0]) << 7 for c in value: x = (1000003 * x) ^ Hash.ordinal(c) x ^= length x &= 0xFFFFFFFFFFFFFFFF if x == -1: x = -2 # Convert to C long type return ctypes.c_long(x).value "," Returns a Python 2.7 hash for a string. Logic ported from the 2.7 Python branch: cpython/Objects/stringobject.c Method: static long string_hash(PyStringObject *a) Args: value: input string Returns: Python 2.7 hash ",29,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def shash(value): length = len(value) if length == 0: return 0 x = Hash.ordinal(value[0]) << 7 for c in value: x = (1000003 * x) ^ Hash.ordinal(c) x ^= length x &= 0xFFFFFFFFFFFFFFFF if x == -1: x = -2 # Convert to C long type return ctypes.c_long(x).value ``` ###Assistant : Returns a Python 2.7 hash for a string. Logic ported from the 2.7 Python branch: cpython/Objects/stringobject.c Method: static long string_hash(PyStringObject *a) Args: value: input string Returns: Python 2.7 hash " 84,"def data_files_with_one_split_and_metadata(tmp_path, auto_text_file): data_dir = tmp_path / ""autofolder_data_dir_with_metadata_one_split"" data_dir.mkdir(parents=True, exist_ok=True) subdir = data_dir / ""subdir"" subdir.mkdir(parents=True, exist_ok=True) filename = data_dir / ""file.txt"" shutil.copyfile(auto_text_file, filename) filename2 = data_dir / ""file2.txt"" shutil.copyfile(auto_text_file, filename2) filename3 = subdir / ""file3.txt"" # in subdir shutil.copyfile(auto_text_file, filename3) metadata_filename = data_dir / ""metadata.jsonl"" metadata = textwrap.dedent( ) with open(metadata_filename, ""w"", encoding=""utf-8"") as f: f.write(metadata) data_files_with_one_split_and_metadata = DataFilesDict.from_local_or_remote( get_data_patterns_locally(data_dir), data_dir ) assert len(data_files_with_one_split_and_metadata) == 1 assert len(data_files_with_one_split_and_metadata[""train""]) == 4 return data_files_with_one_split_and_metadata @pytest.fixture","\ {""file_name"": ""file.txt"", ""additional_feature"": ""Dummy file""} {""file_name"": ""file2.txt"", ""additional_feature"": ""Second dummy file""} {""file_name"": ""subdir/file3.txt"", ""additional_feature"": ""Third dummy file""} ",18,74,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def data_files_with_one_split_and_metadata(tmp_path, auto_text_file): data_dir = tmp_path / ""autofolder_data_dir_with_metadata_one_split"" data_dir.mkdir(parents=True, exist_ok=True) subdir = data_dir / ""subdir"" subdir.mkdir(parents=True, exist_ok=True) filename = data_dir / ""file.txt"" shutil.copyfile(auto_text_file, filename) filename2 = data_dir / ""file2.txt"" shutil.copyfile(auto_text_file, filename2) filename3 = subdir / ""file3.txt"" # in subdir shutil.copyfile(auto_text_file, filename3) metadata_filename = data_dir / ""metadata.jsonl"" metadata = textwrap.dedent( ) with open(metadata_filename, ""w"", encoding=""utf-8"") as f: f.write(metadata) data_files_with_one_split_and_metadata = DataFilesDict.from_local_or_remote( get_data_patterns_locally(data_dir), data_dir ) assert len(data_files_with_one_split_and_metadata) == 1 assert len(data_files_with_one_split_and_metadata[""train""]) == 4 return data_files_with_one_split_and_metadata @pytest.fixture ``` ###Assistant : \ {""file_name"": ""file.txt"", ""additional_feature"": ""Dummy file""} {""file_name"": ""file2.txt"", ""additional_feature"": ""Second dummy file""} {""file_name"": ""subdir/file3.txt"", ""additional_feature"": ""Third dummy file""} " 85,"def test_upload_room_keys_wrong_version(self) -> None: version = self.get_success( self.handler.create_version( self.local_user, { ""algorithm"": ""m.megolm_backup.v1"", ""auth_data"": ""first_version_auth_data"", }, ) ) self.assertEqual(version, ""1"") version = self.get_success( self.handler.create_version( self.local_user, { ""algorithm"": ""m.megolm_backup.v1"", ""auth_data"": ""second_version_auth_data"", }, ) ) self.assertEqual(version, ""2"") e = self.get_failure( self.handler.upload_room_keys(self.local_user, ""1"", room_keys), SynapseError ) res = e.value.code self.assertEqual(res, 403) ",Check that we get a 403 on uploading keys for an old version,13,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_upload_room_keys_wrong_version(self) -> None: version = self.get_success( self.handler.create_version( self.local_user, { ""algorithm"": ""m.megolm_backup.v1"", ""auth_data"": ""first_version_auth_data"", }, ) ) self.assertEqual(version, ""1"") version = self.get_success( self.handler.create_version( self.local_user, { ""algorithm"": ""m.megolm_backup.v1"", ""auth_data"": ""second_version_auth_data"", }, ) ) self.assertEqual(version, ""2"") e = self.get_failure( self.handler.upload_room_keys(self.local_user, ""1"", room_keys), SynapseError ) res = e.value.code self.assertEqual(res, 403) ``` ###Assistant : Check that we get a 403 on uploading keys for an old version " 86,"def previewtrain(self) -> Dict[str, List[Union[Image.Image, ImageTk.PhotoImage, None, float]]]: return self._previewtrain "," dict or ``None``: The training preview images. Dictionary key is the image name (`str`). Dictionary values are a `list` of the training image (:class:`PIL.Image`), the image formatted for tkinter display (:class:`PIL.ImageTK.PhotoImage`), the last modification time of the image (`float`). The value of this property is ``None`` if training is not running or there are no preview images available. ",58,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def previewtrain(self) -> Dict[str, List[Union[Image.Image, ImageTk.PhotoImage, None, float]]]: return self._previewtrain ``` ###Assistant : dict or ``None``: The training preview images. Dictionary key is the image name (`str`). Dictionary values are a `list` of the training image (:class:`PIL.Image`), the image formatted for tkinter display (:class:`PIL.ImageTK.PhotoImage`), the last modification time of the image (`float`). The value of this property is ``None`` if training is not running or there are no preview images available. " 87,"def test_padding(self): n = coord_net_spec() ax, a, b = coord_map_from_to(n.deconv, n.data) pad = random.randint(0, 10) # conv padding n = coord_net_spec(pad=pad) _, a_pad, b_pad = coord_map_from_to(n.deconv, n.data) self.assertEquals(a, a_pad) self.assertEquals(b - pad, b_pad) # deconv padding n = coord_net_spec(dpad=pad) _, a_pad, b_pad = coord_map_from_to(n.deconv, n.data) self.assertEquals(a, a_pad) self.assertEquals(b + pad, b_pad) # pad both to cancel out n = coord_net_spec(pad=pad, dpad=pad) _, a_pad, b_pad = coord_map_from_to(n.deconv, n.data) self.assertEquals(a, a_pad) self.assertEquals(b, b_pad) "," Padding conv adds offset while padding deconv subtracts offset. ",9,71,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_padding(self): n = coord_net_spec() ax, a, b = coord_map_from_to(n.deconv, n.data) pad = random.randint(0, 10) # conv padding n = coord_net_spec(pad=pad) _, a_pad, b_pad = coord_map_from_to(n.deconv, n.data) self.assertEquals(a, a_pad) self.assertEquals(b - pad, b_pad) # deconv padding n = coord_net_spec(dpad=pad) _, a_pad, b_pad = coord_map_from_to(n.deconv, n.data) self.assertEquals(a, a_pad) self.assertEquals(b + pad, b_pad) # pad both to cancel out n = coord_net_spec(pad=pad, dpad=pad) _, a_pad, b_pad = coord_map_from_to(n.deconv, n.data) self.assertEquals(a, a_pad) self.assertEquals(b, b_pad) ``` ###Assistant : Padding conv adds offset while padding deconv subtracts offset. " 88,"def require_cuda(test_case): return unittest.skipUnless(torch.cuda.is_available(), ""test requires a GPU"")(test_case) "," Decorator marking a test that requires CUDA. These tests are skipped when there are no GPU available. ",17,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def require_cuda(test_case): return unittest.skipUnless(torch.cuda.is_available(), ""test requires a GPU"")(test_case) ``` ###Assistant : Decorator marking a test that requires CUDA. These tests are skipped when there are no GPU available. " 89,"def _flush_periodically(self) -> None: while self._active: # flush is thread-safe; it acquires and releases the lock internally self.flush() time.sleep(self._flush_period) "," Whilst this handler is active, flush the handler periodically. ",9,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _flush_periodically(self) -> None: while self._active: # flush is thread-safe; it acquires and releases the lock internally self.flush() time.sleep(self._flush_period) ``` ###Assistant : Whilst this handler is active, flush the handler periodically. " 90,"def save_flagged(self, dir, label, data, encryption_key) -> str | Dict: if ""confidences"" in data: return json.dumps( { example[""label""]: example[""confidence""] for example in data[""confidences""] } ) else: return data[""label""] "," Returns: Either a string representing the main category label, or a dictionary with category keys mapping to confidence levels. ",19,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save_flagged(self, dir, label, data, encryption_key) -> str | Dict: if ""confidences"" in data: return json.dumps( { example[""label""]: example[""confidence""] for example in data[""confidences""] } ) else: return data[""label""] ``` ###Assistant : Returns: Either a string representing the main category label, or a dictionary with category keys mapping to confidence levels. " 91,"def test_higher_rank_inputs_for_importance_weights(self): for fw in framework_iterator(frameworks=(""torch"", ""tf""), session=True): vtrace = vtrace_tf if fw != ""torch"" else vtrace_torch if fw == ""tf"": inputs_ = { ""log_rhos"": tf1.placeholder( dtype=tf.float32, shape=[None, None, 1] ), ""discounts"": tf1.placeholder( dtype=tf.float32, shape=[None, None, 1] ), ""rewards"": tf1.placeholder( dtype=tf.float32, shape=[None, None, 42] ), ""values"": tf1.placeholder(dtype=tf.float32, shape=[None, None, 42]), ""bootstrap_value"": tf1.placeholder( dtype=tf.float32, shape=[None, 42] ), } else: inputs_ = { ""log_rhos"": Box(-1.0, 1.0, (8, 10, 1)).sample(), ""discounts"": Box(-1.0, 1.0, (8, 10, 1)).sample(), ""rewards"": Box(-1.0, 1.0, (8, 10, 42)).sample(), ""values"": Box(-1.0, 1.0, (8, 10, 42)).sample(), ""bootstrap_value"": Box(-1.0, 1.0, (10, 42)).sample(), } output = vtrace.from_importance_weights(**inputs_) check(int(output.vs.shape[-1]), 42) ",Checks support for additional dimensions in inputs.,7,96,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_higher_rank_inputs_for_importance_weights(self): for fw in framework_iterator(frameworks=(""torch"", ""tf""), session=True): vtrace = vtrace_tf if fw != ""torch"" else vtrace_torch if fw == ""tf"": inputs_ = { ""log_rhos"": tf1.placeholder( dtype=tf.float32, shape=[None, None, 1] ), ""discounts"": tf1.placeholder( dtype=tf.float32, shape=[None, None, 1] ), ""rewards"": tf1.placeholder( dtype=tf.float32, shape=[None, None, 42] ), ""values"": tf1.placeholder(dtype=tf.float32, shape=[None, None, 42]), ""bootstrap_value"": tf1.placeholder( dtype=tf.float32, shape=[None, 42] ), } else: inputs_ = { ""log_rhos"": Box(-1.0, 1.0, (8, 10, 1)).sample(), ""discounts"": Box(-1.0, 1.0, (8, 10, 1)).sample(), ""rewards"": Box(-1.0, 1.0, (8, 10, 42)).sample(), ""values"": Box(-1.0, 1.0, (8, 10, 42)).sample(), ""bootstrap_value"": Box(-1.0, 1.0, (10, 42)).sample(), } output = vtrace.from_importance_weights(**inputs_) check(int(output.vs.shape[-1]), 42) ``` ###Assistant : Checks support for additional dimensions in inputs. " 92,"def test_task_fail_duration(app, admin_client, dag_maker, session): with dag_maker() as dag: op1 = BashOperator(task_id='fail', bash_command='exit 1') op2 = BashOperator(task_id='success', bash_command='exit 0') with pytest.raises(AirflowException): op1.run() op2.run() op1_fails = ( session.query(TaskFail) .filter( TaskFail.task_id == 'fail', TaskFail.dag_id == dag.dag_id, ) .all() ) op2_fails = ( session.query(TaskFail) .filter( TaskFail.task_id == 'success', TaskFail.dag_id == dag.dag_id, ) .all() ) assert len(op1_fails) == 1 assert len(op2_fails) == 0 with unittest.mock.patch.object(app, 'dag_bag') as mocked_dag_bag: mocked_dag_bag.get_dag.return_value = dag resp = admin_client.get(f""dags/{dag.dag_id}/duration"", follow_redirects=True) html = resp.get_data().decode() cumulative_chart = json.loads(re.search(""data_cumlinechart=(.*);"", html).group(1)) line_chart = json.loads(re.search(""data_linechart=(.*);"", html).group(1)) assert resp.status_code == 200 assert sorted(item[""key""] for item in cumulative_chart) == [""fail"", ""success""] assert sorted(item[""key""] for item in line_chart) == [""fail"", ""success""] ",Task duration page with a TaskFail entry should render without error.,11,104,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_task_fail_duration(app, admin_client, dag_maker, session): with dag_maker() as dag: op1 = BashOperator(task_id='fail', bash_command='exit 1') op2 = BashOperator(task_id='success', bash_command='exit 0') with pytest.raises(AirflowException): op1.run() op2.run() op1_fails = ( session.query(TaskFail) .filter( TaskFail.task_id == 'fail', TaskFail.dag_id == dag.dag_id, ) .all() ) op2_fails = ( session.query(TaskFail) .filter( TaskFail.task_id == 'success', TaskFail.dag_id == dag.dag_id, ) .all() ) assert len(op1_fails) == 1 assert len(op2_fails) == 0 with unittest.mock.patch.object(app, 'dag_bag') as mocked_dag_bag: mocked_dag_bag.get_dag.return_value = dag resp = admin_client.get(f""dags/{dag.dag_id}/duration"", follow_redirects=True) html = resp.get_data().decode() cumulative_chart = json.loads(re.search(""data_cumlinechart=(.*);"", html).group(1)) line_chart = json.loads(re.search(""data_linechart=(.*);"", html).group(1)) assert resp.status_code == 200 assert sorted(item[""key""] for item in cumulative_chart) == [""fail"", ""success""] assert sorted(item[""key""] for item in line_chart) == [""fail"", ""success""] ``` ###Assistant : Task duration page with a TaskFail entry should render without error. " 93,"def test_all_users(self) -> None: self._create_users(2) channel = self.make_request( ""GET"", self.url + ""?deactivated=true"", {}, access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual(3, len(channel.json_body[""users""])) self.assertEqual(3, channel.json_body[""total""]) # Check that all fields are available self._check_fields(channel.json_body[""users""]) "," List all users, including deactivated users. ",6,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_all_users(self) -> None: self._create_users(2) channel = self.make_request( ""GET"", self.url + ""?deactivated=true"", {}, access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual(3, len(channel.json_body[""users""])) self.assertEqual(3, channel.json_body[""total""]) # Check that all fields are available self._check_fields(channel.json_body[""users""]) ``` ###Assistant : List all users, including deactivated users. " 94,"def real_quick_ratio(self): la, lb = len(self.a), len(self.b) # can't have more matches than the number of elements in the # shorter sequence return _calculate_ratio(min(la, lb), la + lb) __class_getitem__ = classmethod(GenericAlias) ","Return an upper bound on ratio() very quickly. This isn't defined beyond that it is an upper bound on .ratio(), and is faster to compute than either .ratio() or .quick_ratio(). ",30,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def real_quick_ratio(self): la, lb = len(self.a), len(self.b) # can't have more matches than the number of elements in the # shorter sequence return _calculate_ratio(min(la, lb), la + lb) __class_getitem__ = classmethod(GenericAlias) ``` ###Assistant : Return an upper bound on ratio() very quickly. This isn't defined beyond that it is an upper bound on .ratio(), and is faster to compute than either .ratio() or .quick_ratio(). " 95,"def test_unpublish_view_invalid_page_id(self): # Request confirm unpublish page but with illegal page id response = self.client.get( reverse( ""wagtail_bulk_action"", args=( ""wagtailcore"", ""page"", ""unpublish"", ), ) ) # Check that the user received a 404 response self.assertEqual(response.status_code, 404) "," This tests that the unpublish view returns an error if the page id is invalid ",15,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_unpublish_view_invalid_page_id(self): # Request confirm unpublish page but with illegal page id response = self.client.get( reverse( ""wagtail_bulk_action"", args=( ""wagtailcore"", ""page"", ""unpublish"", ), ) ) # Check that the user received a 404 response self.assertEqual(response.status_code, 404) ``` ###Assistant : This tests that the unpublish view returns an error if the page id is invalid " 96,"def test_warn_report(): fn = report with warnings.catch_warnings(record=True) as record: # Ignore Deprecation warnings. warnings.filterwarnings(""ignore"", category=DeprecationWarning) assert not fn(dict()) assert fn.__name__ in record[0].message.args[0] reset_log_once_with_str(fn.__name__) ",Checks if calling session.report function outside of session raises warning.,10,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_warn_report(): fn = report with warnings.catch_warnings(record=True) as record: # Ignore Deprecation warnings. warnings.filterwarnings(""ignore"", category=DeprecationWarning) assert not fn(dict()) assert fn.__name__ in record[0].message.args[0] reset_log_once_with_str(fn.__name__) ``` ###Assistant : Checks if calling session.report function outside of session raises warning. " 97,"def forward(self, feats, img_metas): batch_size = len(img_metas) mask_features, multi_scale_memorys = self.pixel_decoder(feats) # multi_scale_memorys (from low resolution to high resolution) decoder_inputs = [] decoder_positional_encodings = [] for i in range(self.num_transformer_feat_level): decoder_input = self.decoder_input_projs[i](multi_scale_memorys[i]) # shape (batch_size, c, h, w) -> (h*w, batch_size, c) decoder_input = decoder_input.flatten(2).permute(2, 0, 1) level_embed = self.level_embed.weight[i].view(1, 1, -1) decoder_input = decoder_input + level_embed # shape (batch_size, c, h, w) -> (h*w, batch_size, c) mask = decoder_input.new_zeros( (batch_size, ) + multi_scale_memorys[i].shape[-2:], dtype=torch.bool) decoder_positional_encoding = self.decoder_positional_encoding( mask) decoder_positional_encoding = decoder_positional_encoding.flatten( 2).permute(2, 0, 1) decoder_inputs.append(decoder_input) decoder_positional_encodings.append(decoder_positional_encoding) # shape (num_queries, c) -> (num_queries, batch_size, c) query_feat = self.query_feat.weight.unsqueeze(1).repeat( (1, batch_size, 1)) query_embed = self.query_embed.weight.unsqueeze(1).repeat( (1, batch_size, 1)) cls_pred_list = [] mask_pred_list = [] cls_pred, mask_pred, attn_mask = self.forward_head( query_feat, mask_features, multi_scale_memorys[0].shape[-2:]) cls_pred_list.append(cls_pred) mask_pred_list.append(mask_pred) for i in range(self.num_transformer_decoder_layers): level_idx = i % self.num_transformer_feat_level # if a mask is all True(all background), then set it all False. attn_mask[torch.where( attn_mask.sum(-1) == attn_mask.shape[-1])] = False # cross_attn + self_attn layer = self.transformer_decoder.layers[i] attn_masks = [attn_mask, None] query_feat = layer( query=query_feat, key=decoder_inputs[level_idx], value=decoder_inputs[level_idx], query_pos=query_embed, key_pos=decoder_positional_encodings[level_idx], attn_masks=attn_masks, query_key_padding_mask=None, # here we do not apply masking on padded region key_padding_mask=None) cls_pred, mask_pred, attn_mask = self.forward_head( query_feat, mask_features, multi_scale_memorys[ (i + 1) % self.num_transformer_feat_level].shape[-2:]) cls_pred_list.append(cls_pred) mask_pred_list.append(mask_pred) return cls_pred_list, mask_pred_list ","Forward function. Args: feats (list[Tensor]): Multi scale Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. Returns: tuple: A tuple contains two elements. - cls_pred_list (list[Tensor)]: Classification logits \ for each decoder layer. Each is a 3D-tensor with shape \ (batch_size, num_queries, cls_out_channels). \ Note `cls_out_channels` should includes background. - mask_pred_list (list[Tensor]): Mask logits for each \ decoder layer. Each with shape (batch_size, num_queries, \ h, w). ",73,201,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def forward(self, feats, img_metas): batch_size = len(img_metas) mask_features, multi_scale_memorys = self.pixel_decoder(feats) # multi_scale_memorys (from low resolution to high resolution) decoder_inputs = [] decoder_positional_encodings = [] for i in range(self.num_transformer_feat_level): decoder_input = self.decoder_input_projs[i](multi_scale_memorys[i]) # shape (batch_size, c, h, w) -> (h*w, batch_size, c) decoder_input = decoder_input.flatten(2).permute(2, 0, 1) level_embed = self.level_embed.weight[i].view(1, 1, -1) decoder_input = decoder_input + level_embed # shape (batch_size, c, h, w) -> (h*w, batch_size, c) mask = decoder_input.new_zeros( (batch_size, ) + multi_scale_memorys[i].shape[-2:], dtype=torch.bool) decoder_positional_encoding = self.decoder_positional_encoding( mask) decoder_positional_encoding = decoder_positional_encoding.flatten( 2).permute(2, 0, 1) decoder_inputs.append(decoder_input) decoder_positional_encodings.append(decoder_positional_encoding) # shape (num_queries, c) -> (num_queries, batch_size, c) query_feat = self.query_feat.weight.unsqueeze(1).repeat( (1, batch_size, 1)) query_embed = self.query_embed.weight.unsqueeze(1).repeat( (1, batch_size, 1)) cls_pred_list = [] mask_pred_list = [] cls_pred, mask_pred, attn_mask = self.forward_head( query_feat, mask_features, multi_scale_memorys[0].shape[-2:]) cls_pred_list.append(cls_pred) mask_pred_list.append(mask_pred) for i in range(self.num_transformer_decoder_layers): level_idx = i % self.num_transformer_feat_level # if a mask is all True(all background), then set it all False. attn_mask[torch.where( attn_mask.sum(-1) == attn_mask.shape[-1])] = False # cross_attn + self_attn layer = self.transformer_decoder.layers[i] attn_masks = [attn_mask, None] query_feat = layer( query=query_feat, key=decoder_inputs[level_idx], value=decoder_inputs[level_idx], query_pos=query_embed, key_pos=decoder_positional_encodings[level_idx], attn_masks=attn_masks, query_key_padding_mask=None, # here we do not apply masking on padded region key_padding_mask=None) cls_pred, mask_pred, attn_mask = self.forward_head( query_feat, mask_features, multi_scale_memorys[ (i + 1) % self.num_transformer_feat_level].shape[-2:]) cls_pred_list.append(cls_pred) mask_pred_list.append(mask_pred) return cls_pred_list, mask_pred_list ``` ###Assistant : Forward function. Args: feats (list[Tensor]): Multi scale Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. Returns: tuple: A tuple contains two elements. - cls_pred_list (list[Tensor)]: Classification logits \ for each decoder layer. Each is a 3D-tensor with shape \ (batch_size, num_queries, cls_out_channels). \ Note `cls_out_channels` should includes background. - mask_pred_list (list[Tensor]): Mask logits for each \ decoder layer. Each with shape (batch_size, num_queries, \ h, w). " 98,"def formfield_for_manytomany(self, db_field, request, **kwargs): # If it uses an intermediary model that isn't auto created, don't show # a field in admin. if not db_field.remote_field.through._meta.auto_created: return None db = kwargs.get(""using"") if ""widget"" not in kwargs: autocomplete_fields = self.get_autocomplete_fields(request) if db_field.name in autocomplete_fields: kwargs[""widget""] = AutocompleteSelectMultiple( db_field, self.admin_site, using=db, ) elif db_field.name in self.raw_id_fields: kwargs[""widget""] = widgets.ManyToManyRawIdWidget( db_field.remote_field, self.admin_site, using=db, ) elif db_field.name in [*self.filter_vertical, *self.filter_horizontal]: kwargs[""widget""] = widgets.FilteredSelectMultiple( db_field.verbose_name, db_field.name in self.filter_vertical ) if ""queryset"" not in kwargs: queryset = self.get_field_queryset(db, db_field, request) if queryset is not None: kwargs[""queryset""] = queryset form_field = db_field.formfield(**kwargs) if isinstance(form_field.widget, SelectMultiple) and not isinstance( form_field.widget, (CheckboxSelectMultiple, AutocompleteSelectMultiple) ): msg = _( ""Hold down “Control”, or “Command” on a Mac, to select more than one."" ) help_text = form_field.help_text form_field.help_text = ( format_lazy(""{} {}"", help_text, msg) if help_text else msg ) return form_field "," Get a form Field for a ManyToManyField. ",7,139,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def formfield_for_manytomany(self, db_field, request, **kwargs): # If it uses an intermediary model that isn't auto created, don't show # a field in admin. if not db_field.remote_field.through._meta.auto_created: return None db = kwargs.get(""using"") if ""widget"" not in kwargs: autocomplete_fields = self.get_autocomplete_fields(request) if db_field.name in autocomplete_fields: kwargs[""widget""] = AutocompleteSelectMultiple( db_field, self.admin_site, using=db, ) elif db_field.name in self.raw_id_fields: kwargs[""widget""] = widgets.ManyToManyRawIdWidget( db_field.remote_field, self.admin_site, using=db, ) elif db_field.name in [*self.filter_vertical, *self.filter_horizontal]: kwargs[""widget""] = widgets.FilteredSelectMultiple( db_field.verbose_name, db_field.name in self.filter_vertical ) if ""queryset"" not in kwargs: queryset = self.get_field_queryset(db, db_field, request) if queryset is not None: kwargs[""queryset""] = queryset form_field = db_field.formfield(**kwargs) if isinstance(form_field.widget, SelectMultiple) and not isinstance( form_field.widget, (CheckboxSelectMultiple, AutocompleteSelectMultiple) ): msg = _( ""Hold down “Control”, or “Command” on a Mac, to select more than one."" ) help_text = form_field.help_text form_field.help_text = ( format_lazy(""{} {}"", help_text, msg) if help_text else msg ) return form_field ``` ###Assistant : Get a form Field for a ManyToManyField. " 99,"def test_expiry_logic(self) -> None: self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[ ""1"" ] = 100000 self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[ ""2"" ] = 200000 self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[ ""3"" ] = 300000 self.event_creator_handler._expire_rooms_to_exclude_from_dummy_event_insertion() # All entries within time frame self.assertEqual( len( self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion ), 3, ) # Oldest room to expire self.pump(1.01) self.event_creator_handler._expire_rooms_to_exclude_from_dummy_event_insertion() self.assertEqual( len( self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion ), 2, ) # All rooms to expire self.pump(2) self.assertEqual( len( self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion ), 0, ) ","Simple test to ensure that _expire_rooms_to_exclude_from_dummy_event_insertion() expires old entries correctly. ",10,57,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_expiry_logic(self) -> None: self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[ ""1"" ] = 100000 self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[ ""2"" ] = 200000 self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[ ""3"" ] = 300000 self.event_creator_handler._expire_rooms_to_exclude_from_dummy_event_insertion() # All entries within time frame self.assertEqual( len( self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion ), 3, ) # Oldest room to expire self.pump(1.01) self.event_creator_handler._expire_rooms_to_exclude_from_dummy_event_insertion() self.assertEqual( len( self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion ), 2, ) # All rooms to expire self.pump(2) self.assertEqual( len( self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion ), 0, ) ``` ###Assistant : Simple test to ensure that _expire_rooms_to_exclude_from_dummy_event_insertion() expires old entries correctly. " 100,"def _tune_legacy_checkpoint_score_attr(self) -> Optional[str]: if self.checkpoint_score_attribute is None: return self.checkpoint_score_attribute prefix = """" if self.checkpoint_score_order == MIN: prefix = ""min-"" return f""{prefix}{self.checkpoint_score_attribute}"" # Alias for backwards compatibility deprecation_message = ( ""`CheckpointStrategy` is deprecated and will be removed in "" ""the future. Please use `ray.air.config.CheckpointStrategy` "" ""instead."" ) @Deprecated(message=deprecation_message) @dataclass","Same as ``checkpoint_score_attr`` in ``tune.run``. Only used for Legacy API compatibility. ",11,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _tune_legacy_checkpoint_score_attr(self) -> Optional[str]: if self.checkpoint_score_attribute is None: return self.checkpoint_score_attribute prefix = """" if self.checkpoint_score_order == MIN: prefix = ""min-"" return f""{prefix}{self.checkpoint_score_attribute}"" # Alias for backwards compatibility deprecation_message = ( ""`CheckpointStrategy` is deprecated and will be removed in "" ""the future. Please use `ray.air.config.CheckpointStrategy` "" ""instead."" ) @Deprecated(message=deprecation_message) @dataclass ``` ###Assistant : Same as ``checkpoint_score_attr`` in ``tune.run``. Only used for Legacy API compatibility. " 101,"def test_dynamic_sampling_bias_activation(self): project = self.project # force creation project.update_option( ""sentry:dynamic_sampling_biases"", [ {""id"": ""boostEnvironments"", ""active"": False}, ], ) self.login_as(self.user) token = ApiToken.objects.create(user=self.user, scope_list=[""project:write""]) authorization = f""Bearer {token.token}"" url = reverse( ""sentry-api-0-project-details"", kwargs={ ""organization_slug"": self.project.organization.slug, ""project_slug"": self.project.slug, }, ) with Feature({self.new_ds_flag: True}): self.client.put( url, format=""json"", HTTP_AUTHORIZATION=authorization, data={ ""dynamicSamplingBiases"": [ {""id"": ""boostEnvironments"", ""active"": True}, ] }, ) assert AuditLogEntry.objects.filter( organization=self.project.organization, event=audit_log.get_event_id(""SAMPLING_BIAS_ENABLED""), ).exists() "," Tests that when sending a request to enable a dynamic sampling bias, the bias will be successfully enabled and the audit log 'SAMPLING_BIAS_ENABLED' will be triggered ",26,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_dynamic_sampling_bias_activation(self): project = self.project # force creation project.update_option( ""sentry:dynamic_sampling_biases"", [ {""id"": ""boostEnvironments"", ""active"": False}, ], ) self.login_as(self.user) token = ApiToken.objects.create(user=self.user, scope_list=[""project:write""]) authorization = f""Bearer {token.token}"" url = reverse( ""sentry-api-0-project-details"", kwargs={ ""organization_slug"": self.project.organization.slug, ""project_slug"": self.project.slug, }, ) with Feature({self.new_ds_flag: True}): self.client.put( url, format=""json"", HTTP_AUTHORIZATION=authorization, data={ ""dynamicSamplingBiases"": [ {""id"": ""boostEnvironments"", ""active"": True}, ] }, ) assert AuditLogEntry.objects.filter( organization=self.project.organization, event=audit_log.get_event_id(""SAMPLING_BIAS_ENABLED""), ).exists() ``` ###Assistant : Tests that when sending a request to enable a dynamic sampling bias, the bias will be successfully enabled and the audit log 'SAMPLING_BIAS_ENABLED' will be triggered " 102,"def call(self, inputs, *args, **kwargs): input_shape = K.int_shape(inputs) if len(input_shape) != 4: raise ValueError('Inputs should have rank ' + str(4) + '; Received input shape:', str(input_shape)) if self.data_format == 'channels_first': batch_size, channels, height, width = input_shape if batch_size is None: batch_size = -1 r_height, r_width = self.size o_height, o_width = height * r_height, width * r_width o_channels = channels // (r_height * r_width) out = K.reshape(inputs, (batch_size, r_height, r_width, o_channels, height, width)) out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2)) out = K.reshape(out, (batch_size, o_channels, o_height, o_width)) elif self.data_format == 'channels_last': batch_size, height, width, channels = input_shape if batch_size is None: batch_size = -1 r_height, r_width = self.size o_height, o_width = height * r_height, width * r_width o_channels = channels // (r_height * r_width) out = K.reshape(inputs, (batch_size, height, width, r_height, r_width, o_channels)) out = K.permute_dimensions(out, (0, 1, 3, 2, 4, 5)) out = K.reshape(out, (batch_size, o_height, o_width, o_channels)) return out ","This is where the layer's logic lives. Parameters ---------- inputs: tensor Input tensor, or list/tuple of input tensors args: tuple Additional standard keras Layer arguments kwargs: dict Additional standard keras Layer keyword arguments Returns ------- tensor A tensor or list/tuple of tensors ",42,152,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call(self, inputs, *args, **kwargs): input_shape = K.int_shape(inputs) if len(input_shape) != 4: raise ValueError('Inputs should have rank ' + str(4) + '; Received input shape:', str(input_shape)) if self.data_format == 'channels_first': batch_size, channels, height, width = input_shape if batch_size is None: batch_size = -1 r_height, r_width = self.size o_height, o_width = height * r_height, width * r_width o_channels = channels // (r_height * r_width) out = K.reshape(inputs, (batch_size, r_height, r_width, o_channels, height, width)) out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2)) out = K.reshape(out, (batch_size, o_channels, o_height, o_width)) elif self.data_format == 'channels_last': batch_size, height, width, channels = input_shape if batch_size is None: batch_size = -1 r_height, r_width = self.size o_height, o_width = height * r_height, width * r_width o_channels = channels // (r_height * r_width) out = K.reshape(inputs, (batch_size, height, width, r_height, r_width, o_channels)) out = K.permute_dimensions(out, (0, 1, 3, 2, 4, 5)) out = K.reshape(out, (batch_size, o_height, o_width, o_channels)) return out ``` ###Assistant : This is where the layer's logic lives. Parameters ---------- inputs: tensor Input tensor, or list/tuple of input tensors args: tuple Additional standard keras Layer arguments kwargs: dict Additional standard keras Layer keyword arguments Returns ------- tensor A tensor or list/tuple of tensors " 103,"def to_native_types(self, slicer=None, **kwargs) -> np.ndarray: warnings.warn( ""The 'to_native_types' method is deprecated and will be removed in "" ""a future version. Use 'astype(str)' instead."", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) values = self if slicer is not None: values = values[slicer] return values._format_native_types(**kwargs) "," Format specified values of `self` and return them. .. deprecated:: 1.2.0 Parameters ---------- slicer : int, array-like An indexer into `self` that specifies which values are used in the formatting process. kwargs : dict Options for specifying how the values should be formatted. These options include the following: 1) na_rep : str The value that serves as a placeholder for NULL values 2) quoting : bool or None Whether or not there are quoted values in `self` 3) date_format : str The format used to represent date-like values. Returns ------- numpy.ndarray Formatted values. ",93,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def to_native_types(self, slicer=None, **kwargs) -> np.ndarray: warnings.warn( ""The 'to_native_types' method is deprecated and will be removed in "" ""a future version. Use 'astype(str)' instead."", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) values = self if slicer is not None: values = values[slicer] return values._format_native_types(**kwargs) ``` ###Assistant : Format specified values of `self` and return them. .. deprecated:: 1.2.0 Parameters ---------- slicer : int, array-like An indexer into `self` that specifies which values are used in the formatting process. kwargs : dict Options for specifying how the values should be formatted. These options include the following: 1) na_rep : str The value that serves as a placeholder for NULL values 2) quoting : bool or None Whether or not there are quoted values in `self` 3) date_format : str The format used to represent date-like values. Returns ------- numpy.ndarray Formatted values. " 104,"def periphery(G, e=None, usebounds=False, weight=None): if usebounds is True and e is None and not G.is_directed(): return _extrema_bounding(G, compute=""periphery"", weight=weight) if e is None: e = eccentricity(G, weight=weight) diameter = max(e.values()) p = [v for v in e if e[v] == diameter] return p ","Returns the periphery of the graph G. The periphery is the set of nodes with eccentricity equal to the diameter. Parameters ---------- G : NetworkX graph A graph e : eccentricity dictionary, optional A precomputed dictionary of eccentricities. weight : string, function, or None If this is a string, then edge weights will be accessed via the edge attribute with this key (that is, the weight of the edge joining `u` to `v` will be ``G.edges[u, v][weight]``). If no such edge attribute exists, the weight of the edge is assumed to be one. If this is a function, the weight of an edge is the value returned by the function. The function must accept exactly three positional arguments: the two endpoints of an edge and the dictionary of edge attributes for that edge. The function must return a number. If this is None, every edge has weight/distance/cost 1. Weights stored as floating point values can lead to small round-off errors in distances. Use integer weights to avoid this. Weights should be positive, since they are distances. Returns ------- p : list List of nodes in periphery Examples -------- >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) >>> nx.periphery(G) [2, 5] See Also -------- barycenter center ",212,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def periphery(G, e=None, usebounds=False, weight=None): if usebounds is True and e is None and not G.is_directed(): return _extrema_bounding(G, compute=""periphery"", weight=weight) if e is None: e = eccentricity(G, weight=weight) diameter = max(e.values()) p = [v for v in e if e[v] == diameter] return p ``` ###Assistant : Returns the periphery of the graph G. The periphery is the set of nodes with eccentricity equal to the diameter. Parameters ---------- G : NetworkX graph A graph e : eccentricity dictionary, optional A precomputed dictionary of eccentricities. weight : string, function, or None If this is a string, then edge weights will be accessed via the edge attribute with this key (that is, the weight of the edge joining `u` to `v` will be ``G.edges[u, v][weight]``). If no such edge attribute exists, the weight of the edge is assumed to be one. If this is a function, the weight of an edge is the value returned by the function. The function must accept exactly three positional arguments: the two endpoints of an edge and the dictionary of edge attributes for that edge. The function must return a number. If this is None, every edge has weight/distance/cost 1. Weights stored as floating point values can lead to small round-off errors in distances. Use integer weights to avoid this. Weights should be positive, since they are distances. Returns ------- p : list List of nodes in periphery Examples -------- >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) >>> nx.periphery(G) [2, 5] See Also -------- barycenter center " 105,"def _current_mode_setpoint_enums(self) -> list[ThermostatSetpointType | None]: if self._current_mode is None: # Thermostat(valve) with no support for setting a mode is considered heating-only return [ThermostatSetpointType.HEATING] return THERMOSTAT_MODE_SETPOINT_MAP.get(int(self._current_mode.value), []) # type: ignore[no-any-return] ",Return the list of enums that are relevant to the current thermostat mode.,13,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _current_mode_setpoint_enums(self) -> list[ThermostatSetpointType | None]: if self._current_mode is None: # Thermostat(valve) with no support for setting a mode is considered heating-only return [ThermostatSetpointType.HEATING] return THERMOSTAT_MODE_SETPOINT_MAP.get(int(self._current_mode.value), []) # type: ignore[no-any-return] ``` ###Assistant : Return the list of enums that are relevant to the current thermostat mode. " 106,"def line(loc, strg): lastCR = strg.rfind(""\n"", 0, loc) nextCR = strg.find(""\n"", loc) if nextCR >= 0: return strg[lastCR + 1:nextCR] else: return strg[lastCR + 1:] ","Returns the line of text containing loc within a string, counting newlines as line separators. ",15,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def line(loc, strg): lastCR = strg.rfind(""\n"", 0, loc) nextCR = strg.find(""\n"", loc) if nextCR >= 0: return strg[lastCR + 1:nextCR] else: return strg[lastCR + 1:] ``` ###Assistant : Returns the line of text containing loc within a string, counting newlines as line separators. " 107,"def _parse_configs(self, config_files): formatted = """" for cfile in config_files: fname = os.path.basename(cfile) ext = os.path.splitext(cfile)[1] formatted += f""\n--------- {fname} ---------\n"" if ext == "".ini"": formatted += self._parse_ini(cfile) elif fname == "".faceswap"": formatted += self._parse_json(cfile) return formatted "," Parse the given list of config files into a human readable format. Parameters ---------- config_files: list A list of paths to the faceswap config files Returns ------- str The current configuration in the config files formatted in a human readable format ",41,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _parse_configs(self, config_files): formatted = """" for cfile in config_files: fname = os.path.basename(cfile) ext = os.path.splitext(cfile)[1] formatted += f""\n--------- {fname} ---------\n"" if ext == "".ini"": formatted += self._parse_ini(cfile) elif fname == "".faceswap"": formatted += self._parse_json(cfile) return formatted ``` ###Assistant : Parse the given list of config files into a human readable format. Parameters ---------- config_files: list A list of paths to the faceswap config files Returns ------- str The current configuration in the config files formatted in a human readable format " 108,"def parent(self) -> DOMNode: if self._parent is None: raise NoParent(f""{self} has no parent"") assert isinstance(self._parent, DOMNode) return self._parent ","Get the parent node. Raises: NoParent: If this is the root node. Returns: DOMNode: The node which is the direct parent of this node. ",24,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parent(self) -> DOMNode: if self._parent is None: raise NoParent(f""{self} has no parent"") assert isinstance(self._parent, DOMNode) return self._parent ``` ###Assistant : Get the parent node. Raises: NoParent: If this is the root node. Returns: DOMNode: The node which is the direct parent of this node. " 109,"def user_documents_dir(self) -> str: documents_dir = _get_user_dirs_folder(""XDG_DOCUMENTS_DIR"") if documents_dir is None: documents_dir = os.environ.get(""XDG_DOCUMENTS_DIR"", """").strip() if not documents_dir: documents_dir = os.path.expanduser(""~/Documents"") return documents_dir "," :return: documents directory tied to the user, e.g. ``~/Documents`` ",9,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def user_documents_dir(self) -> str: documents_dir = _get_user_dirs_folder(""XDG_DOCUMENTS_DIR"") if documents_dir is None: documents_dir = os.environ.get(""XDG_DOCUMENTS_DIR"", """").strip() if not documents_dir: documents_dir = os.path.expanduser(""~/Documents"") return documents_dir ``` ###Assistant : :return: documents directory tied to the user, e.g. ``~/Documents`` " 110,"def test_iforest_sparse(global_random_seed): rng = check_random_state(global_random_seed) X_train, X_test = train_test_split(diabetes.data[:50], random_state=rng) grid = ParameterGrid({""max_samples"": [0.5, 1.0], ""bootstrap"": [True, False]}) for sparse_format in [csc_matrix, csr_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) for params in grid: # Trained on sparse format sparse_classifier = IsolationForest( n_estimators=10, random_state=global_random_seed, **params ).fit(X_train_sparse) sparse_results = sparse_classifier.predict(X_test_sparse) # Trained on dense format dense_classifier = IsolationForest( n_estimators=10, random_state=global_random_seed, **params ).fit(X_train) dense_results = dense_classifier.predict(X_test) assert_array_equal(sparse_results, dense_results) ",Check IForest for various parameter settings on sparse input.,9,65,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_iforest_sparse(global_random_seed): rng = check_random_state(global_random_seed) X_train, X_test = train_test_split(diabetes.data[:50], random_state=rng) grid = ParameterGrid({""max_samples"": [0.5, 1.0], ""bootstrap"": [True, False]}) for sparse_format in [csc_matrix, csr_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) for params in grid: # Trained on sparse format sparse_classifier = IsolationForest( n_estimators=10, random_state=global_random_seed, **params ).fit(X_train_sparse) sparse_results = sparse_classifier.predict(X_test_sparse) # Trained on dense format dense_classifier = IsolationForest( n_estimators=10, random_state=global_random_seed, **params ).fit(X_train) dense_results = dense_classifier.predict(X_test) assert_array_equal(sparse_results, dense_results) ``` ###Assistant : Check IForest for various parameter settings on sparse input. " 111,"def get_scorer_names(): return sorted(_SCORERS.keys()) for name, metric in [ (""precision"", precision_score), (""recall"", recall_score), (""f1"", f1_score), (""jaccard"", jaccard_score), ]: _SCORERS[name] = make_scorer(metric, average=""binary"") for average in [""macro"", ""micro"", ""samples"", ""weighted""]: qualified_name = ""{0}_{1}"".format(name, average) _SCORERS[qualified_name] = make_scorer(metric, pos_label=None, average=average) SCORERS = _DeprecatedScorers(_SCORERS) ","Get the names of all available scorers. These names can be passed to :func:`~sklearn.metrics.get_scorer` to retrieve the scorer object. Returns ------- list of str Names of all available scorers. ",29,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_scorer_names(): return sorted(_SCORERS.keys()) for name, metric in [ (""precision"", precision_score), (""recall"", recall_score), (""f1"", f1_score), (""jaccard"", jaccard_score), ]: _SCORERS[name] = make_scorer(metric, average=""binary"") for average in [""macro"", ""micro"", ""samples"", ""weighted""]: qualified_name = ""{0}_{1}"".format(name, average) _SCORERS[qualified_name] = make_scorer(metric, pos_label=None, average=average) SCORERS = _DeprecatedScorers(_SCORERS) ``` ###Assistant : Get the names of all available scorers. These names can be passed to :func:`~sklearn.metrics.get_scorer` to retrieve the scorer object. Returns ------- list of str Names of all available scorers. " 112,"def _split_generators(self, dl_manager): # Download extract and return path of data file. dl_dir = dl_manager.download_and_extract(_URL) # Use swda/ folder. data_dir = os.path.join(dl_dir, ""swda"") # Handle partitions files: download extract and return paths of split files. downloaded_files = dl_manager.download(self._URLS) return [ # Return whole data path and train splits file downloaded path. datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={""data_dir"": data_dir, ""split_file"": downloaded_files[""train""]} ), # Return whole data path and dev splits file downloaded path. datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={""data_dir"": data_dir, ""split_file"": downloaded_files[""dev""]}, ), # Return whole data path and train splits file downloaded path. datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={""data_dir"": data_dir, ""split_file"": downloaded_files[""test""]} ), ] "," Returns SplitGenerators. This method is tasked with downloading/extracting the data and defining the splits. Args: dl_manager (:obj:`datasets.utils.download_manager.DownloadManager`): Download manager to download and extract data files from urls. Returns: :obj:`list[str]`: List of paths to data. ",34,95,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _split_generators(self, dl_manager): # Download extract and return path of data file. dl_dir = dl_manager.download_and_extract(_URL) # Use swda/ folder. data_dir = os.path.join(dl_dir, ""swda"") # Handle partitions files: download extract and return paths of split files. downloaded_files = dl_manager.download(self._URLS) return [ # Return whole data path and train splits file downloaded path. datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={""data_dir"": data_dir, ""split_file"": downloaded_files[""train""]} ), # Return whole data path and dev splits file downloaded path. datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={""data_dir"": data_dir, ""split_file"": downloaded_files[""dev""]}, ), # Return whole data path and train splits file downloaded path. datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={""data_dir"": data_dir, ""split_file"": downloaded_files[""test""]} ), ] ``` ###Assistant : Returns SplitGenerators. This method is tasked with downloading/extracting the data and defining the splits. Args: dl_manager (:obj:`datasets.utils.download_manager.DownloadManager`): Download manager to download and extract data files from urls. Returns: :obj:`list[str]`: List of paths to data. " 113,"def print_help(self): help_text = console.print(text=help_text, menu=""Stocks - Discovery"") ","Print help[cmds] [src][Geek of Wall St][/src] rtearn realtime earnings from and expected moves [src][Finnhub][/src] pipo past IPOs dates fipo future IPOs dates [src][Yahoo Finance][/src] gainers show latest top gainers losers show latest top losers ugs undervalued stocks with revenue and earnings growth in excess of 25% gtech tech stocks with revenue and earnings growth more than 25% active most active stocks by intraday trade volume ulc potentially undervalued large cap stocks asc small cap stocks with earnings growth rates better than 25% [src][Fidelity][/src] ford orders by Fidelity Customers [src][Cathiesark.com][/src] arkord orders by ARK Investment Management LLC [src][Seeking Alpha][/src] upcoming upcoming earnings release dates trending trending news cnews customized news (buybacks, ipos, spacs, healthcare, politics) [src][Shortinterest.com][/src] lowfloat low float stocks under 10M shares float [src][Pennystockflow.com][/src] hotpenny today's hot penny stocks [src][NASDAQ Data Link (Formerly Quandl)][/src] rtat top 10 retail traded stocks per day[/cmds] ",142,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def print_help(self): help_text = console.print(text=help_text, menu=""Stocks - Discovery"") ``` ###Assistant : Print help[cmds] [src][Geek of Wall St][/src] rtearn realtime earnings from and expected moves [src][Finnhub][/src] pipo past IPOs dates fipo future IPOs dates [src][Yahoo Finance][/src] gainers show latest top gainers losers show latest top losers ugs undervalued stocks with revenue and earnings growth in excess of 25% gtech tech stocks with revenue and earnings growth more than 25% active most active stocks by intraday trade volume ulc potentially undervalued large cap stocks asc small cap stocks with earnings growth rates better than 25% [src][Fidelity][/src] ford orders by Fidelity Customers [src][Cathiesark.com][/src] arkord orders by ARK Investment Management LLC [src][Seeking Alpha][/src] upcoming upcoming earnings release dates trending trending news cnews customized news (buybacks, ipos, spacs, healthcare, politics) [src][Shortinterest.com][/src] lowfloat low float stocks under 10M shares float [src][Pennystockflow.com][/src] hotpenny today's hot penny stocks [src][NASDAQ Data Link (Formerly Quandl)][/src] rtat top 10 retail traded stocks per day[/cmds] " 114,"def handle_fk_field(self, obj, field): self._start_relational_field(field) related_att = getattr(obj, field.get_attname()) if related_att is not None: if self.use_natural_foreign_keys and hasattr( field.remote_field.model, ""natural_key"" ): related = getattr(obj, field.name) # If related object has a natural key, use it related = related.natural_key() # Iterable natural keys are rolled out as subelements for key_value in related: self.xml.startElement(""natural"", {}) self.xml.characters(str(key_value)) self.xml.endElement(""natural"") else: self.xml.characters(str(related_att)) else: self.xml.addQuickElement(""None"") self.xml.endElement(""field"") "," Handle a ForeignKey (they need to be treated slightly differently from regular fields). ",13,60,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def handle_fk_field(self, obj, field): self._start_relational_field(field) related_att = getattr(obj, field.get_attname()) if related_att is not None: if self.use_natural_foreign_keys and hasattr( field.remote_field.model, ""natural_key"" ): related = getattr(obj, field.name) # If related object has a natural key, use it related = related.natural_key() # Iterable natural keys are rolled out as subelements for key_value in related: self.xml.startElement(""natural"", {}) self.xml.characters(str(key_value)) self.xml.endElement(""natural"") else: self.xml.characters(str(related_att)) else: self.xml.addQuickElement(""None"") self.xml.endElement(""field"") ``` ###Assistant : Handle a ForeignKey (they need to be treated slightly differently from regular fields). " 115,"def test_metrics_folder(): with _ray_start(include_dashboard=True) as context: session_dir = context[""session_dir""] assert os.path.exists( f""{session_dir}/metrics/grafana/provisioning/dashboards/default.yml"" ) assert os.path.exists( f""{session_dir}/metrics/grafana/provisioning/dashboards"" ""/default_grafana_dashboard.json"" ) assert os.path.exists( f""{session_dir}/metrics/grafana/provisioning/datasources/default.yml"" ) assert os.path.exists(f""{session_dir}/metrics/prometheus/prometheus.yml"") "," Tests that the default dashboard files get created. ",8,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_metrics_folder(): with _ray_start(include_dashboard=True) as context: session_dir = context[""session_dir""] assert os.path.exists( f""{session_dir}/metrics/grafana/provisioning/dashboards/default.yml"" ) assert os.path.exists( f""{session_dir}/metrics/grafana/provisioning/dashboards"" ""/default_grafana_dashboard.json"" ) assert os.path.exists( f""{session_dir}/metrics/grafana/provisioning/datasources/default.yml"" ) assert os.path.exists(f""{session_dir}/metrics/prometheus/prometheus.yml"") ``` ###Assistant : Tests that the default dashboard files get created. " 116,"def state_dict(self): state_dict = {} state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale state_dict['cur_scale'] = self.cur_scale state_dict['cur_iter'] = self.cur_iter if state_dict['dynamic_loss_scale']: state_dict['last_overflow_iter'] = self.last_overflow_iter state_dict['scale_factor'] = self.scale_factor state_dict['scale_window'] = self.scale_window state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict() state_dict['fp32_groups_flat'] = self.fp32_groups_flat state_dict['clip_grad'] = self.clip_grad return state_dict # Refresh fp32 master params from fp16 copies"," Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict of the contained Pytorch optimizer. Example:: checkpoint = {} checkpoint['model'] = model.state_dict() checkpoint['optimizer'] = optimizer.state_dict() torch.save(checkpoint, ""saved.pth"") ",39,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def state_dict(self): state_dict = {} state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale state_dict['cur_scale'] = self.cur_scale state_dict['cur_iter'] = self.cur_iter if state_dict['dynamic_loss_scale']: state_dict['last_overflow_iter'] = self.last_overflow_iter state_dict['scale_factor'] = self.scale_factor state_dict['scale_window'] = self.scale_window state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict() state_dict['fp32_groups_flat'] = self.fp32_groups_flat state_dict['clip_grad'] = self.clip_grad return state_dict # Refresh fp32 master params from fp16 copies ``` ###Assistant : Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict of the contained Pytorch optimizer. Example:: checkpoint = {} checkpoint['model'] = model.state_dict() checkpoint['optimizer'] = optimizer.state_dict() torch.save(checkpoint, ""saved.pth"") " 117,"def _update_mean_variance(n_past, mu, var, X, sample_weight=None): if X.shape[0] == 0: return mu, var # Compute (potentially weighted) mean and variance of new datapoints if sample_weight is not None: n_new = float(sample_weight.sum()) if np.isclose(n_new, 0.0): return mu, var new_mu = np.average(X, axis=0, weights=sample_weight) new_var = np.average((X - new_mu) ** 2, axis=0, weights=sample_weight) else: n_new = X.shape[0] new_var = np.var(X, axis=0) new_mu = np.mean(X, axis=0) if n_past == 0: return new_mu, new_var n_total = float(n_past + n_new) # Combine mean of old and new data, taking into consideration # (weighted) number of observations total_mu = (n_new * new_mu + n_past * mu) / n_total # Combine variance of old and new data, taking into consideration # (weighted) number of observations. This is achieved by combining # the sum-of-squared-differences (ssd) old_ssd = n_past * var new_ssd = n_new * new_var total_ssd = old_ssd + new_ssd + (n_new * n_past / n_total) * (mu - new_mu) ** 2 total_var = total_ssd / n_total return total_mu, total_var ","Compute online update of Gaussian mean and variance. Given starting sample count, mean, and variance, a new set of points X, and optionally sample weights, return the updated mean and variance. (NB - each dimension (column) in X is treated as independent -- you get variance, not covariance). Can take scalar mean and variance, or vector mean and variance to simultaneously update a number of independent Gaussians. See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf Parameters ---------- n_past : int Number of samples represented in old mean and variance. If sample weights were given, this should contain the sum of sample weights represented in old mean and variance. mu : array-like of shape (number of Gaussians,) Means for Gaussians in original set. var : array-like of shape (number of Gaussians,) Variances for Gaussians in original set. sample_weight : array-like of shape (n_samples,), default=None Weights applied to individual samples (1. for unweighted). Returns ------- total_mu : array-like of shape (number of Gaussians,) Updated mean for each Gaussian over the combined set. total_var : array-like of shape (number of Gaussians,) Updated variance for each Gaussian over the combined set. ",191,162,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _update_mean_variance(n_past, mu, var, X, sample_weight=None): if X.shape[0] == 0: return mu, var # Compute (potentially weighted) mean and variance of new datapoints if sample_weight is not None: n_new = float(sample_weight.sum()) if np.isclose(n_new, 0.0): return mu, var new_mu = np.average(X, axis=0, weights=sample_weight) new_var = np.average((X - new_mu) ** 2, axis=0, weights=sample_weight) else: n_new = X.shape[0] new_var = np.var(X, axis=0) new_mu = np.mean(X, axis=0) if n_past == 0: return new_mu, new_var n_total = float(n_past + n_new) # Combine mean of old and new data, taking into consideration # (weighted) number of observations total_mu = (n_new * new_mu + n_past * mu) / n_total # Combine variance of old and new data, taking into consideration # (weighted) number of observations. This is achieved by combining # the sum-of-squared-differences (ssd) old_ssd = n_past * var new_ssd = n_new * new_var total_ssd = old_ssd + new_ssd + (n_new * n_past / n_total) * (mu - new_mu) ** 2 total_var = total_ssd / n_total return total_mu, total_var ``` ###Assistant : Compute online update of Gaussian mean and variance. Given starting sample count, mean, and variance, a new set of points X, and optionally sample weights, return the updated mean and variance. (NB - each dimension (column) in X is treated as independent -- you get variance, not covariance). Can take scalar mean and variance, or vector mean and variance to simultaneously update a number of independent Gaussians. See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf Parameters ---------- n_past : int Number of samples represented in old mean and variance. If sample weights were given, this should contain the sum of sample weights represented in old mean and variance. mu : array-like of shape (number of Gaussians,) Means for Gaussians in original set. var : array-like of shape (number of Gaussians,) Variances for Gaussians in original set. sample_weight : array-like of shape (n_samples,), default=None Weights applied to individual samples (1. for unweighted). Returns ------- total_mu : array-like of shape (number of Gaussians,) Updated mean for each Gaussian over the combined set. total_var : array-like of shape (number of Gaussians,) Updated variance for each Gaussian over the combined set. " 118,"def is_prime(n): sympy_deprecation_warning( , deprecated_since_version=""1.11"", active_deprecations_target='deprecated-carmichael-static-methods', ) return isprime(n) "," is_prime is just a wrapper around sympy.ntheory.primetest.isprime so use that directly instead. ",12,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_prime(n): sympy_deprecation_warning( , deprecated_since_version=""1.11"", active_deprecations_target='deprecated-carmichael-static-methods', ) return isprime(n) ``` ###Assistant : is_prime is just a wrapper around sympy.ntheory.primetest.isprime so use that directly instead. " 119,"def standard_b64decode(s): return b64decode(s) _urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_') _urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/') ","Decode bytes encoded with the standard Base64 alphabet. Argument s is a bytes-like object or ASCII string to decode. The result is returned as a bytes object. A binascii.Error is raised if the input is incorrectly padded. Characters that are not in the standard alphabet are discarded prior to the padding check. ",52,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def standard_b64decode(s): return b64decode(s) _urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_') _urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/') ``` ###Assistant : Decode bytes encoded with the standard Base64 alphabet. Argument s is a bytes-like object or ASCII string to decode. The result is returned as a bytes object. A binascii.Error is raised if the input is incorrectly padded. Characters that are not in the standard alphabet are discarded prior to the padding check. " 120,"def inception_score(self, imgs, batch_size=32, splits=1): N = len(imgs) dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size) up = nn.Upsample( size=(28, 28), mode=""bilinear"", align_corners=False, # This is to reduce user warnings from torch. ).type(torch.FloatTensor) ",Calculate the inception score of the generated images.,8,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def inception_score(self, imgs, batch_size=32, splits=1): N = len(imgs) dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size) up = nn.Upsample( size=(28, 28), mode=""bilinear"", align_corners=False, # This is to reduce user warnings from torch. ).type(torch.FloatTensor) ``` ###Assistant : Calculate the inception score of the generated images. " 121,"def execute(): frappe.reload_doc(""stock"", ""doctype"", ""shipment"") # update submitted status frappe.db.sql( ) # update cancelled status frappe.db.sql( ) ","UPDATE `tabShipment` SET status = ""Submitted"" WHERE status = ""Draft"" AND docstatus = 1UPDATE `tabShipment` SET status = ""Cancelled"" WHERE status = ""Draft"" AND docstatus = 2",27,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def execute(): frappe.reload_doc(""stock"", ""doctype"", ""shipment"") # update submitted status frappe.db.sql( ) # update cancelled status frappe.db.sql( ) ``` ###Assistant : UPDATE `tabShipment` SET status = ""Submitted"" WHERE status = ""Draft"" AND docstatus = 1UPDATE `tabShipment` SET status = ""Cancelled"" WHERE status = ""Draft"" AND docstatus = 2 " 122,"def url(self, name): raise NotImplementedError(""subclasses of Storage must provide a url() method"") "," Return an absolute URL where the file's contents can be accessed directly by a web browser. ",16,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def url(self, name): raise NotImplementedError(""subclasses of Storage must provide a url() method"") ``` ###Assistant : Return an absolute URL where the file's contents can be accessed directly by a web browser. " 123,"def load_attributes_from_hdf5_group(group, name): if name in group.attrs: data = [ n.decode(""utf8"") if hasattr(n, ""decode"") else n for n in group.attrs[name] ] else: data = [] chunk_id = 0 while ""%s%d"" % (name, chunk_id) in group.attrs: data.extend( [ n.decode(""utf8"") if hasattr(n, ""decode"") else n for n in group.attrs[""%s%d"" % (name, chunk_id)] ] ) chunk_id += 1 return data ","Loads attributes of the specified name from the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Args: group: A pointer to a HDF5 group. name: A name of the attributes to load. Returns: data: Attributes data. ",51,57,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_attributes_from_hdf5_group(group, name): if name in group.attrs: data = [ n.decode(""utf8"") if hasattr(n, ""decode"") else n for n in group.attrs[name] ] else: data = [] chunk_id = 0 while ""%s%d"" % (name, chunk_id) in group.attrs: data.extend( [ n.decode(""utf8"") if hasattr(n, ""decode"") else n for n in group.attrs[""%s%d"" % (name, chunk_id)] ] ) chunk_id += 1 return data ``` ###Assistant : Loads attributes of the specified name from the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Args: group: A pointer to a HDF5 group. name: A name of the attributes to load. Returns: data: Attributes data. " 124,"def _find_root_block_schema(block_schemas_with_references): return next( ( block_schema for ( block_schema, _, parent_block_schema_id, ) in block_schemas_with_references if parent_block_schema_id is None ), None, ) "," Attempts to find the root block schema from a list of block schemas with references. Returns None if a root block schema is not found. Returns only the first potential root block schema if multiple are found. ",37,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _find_root_block_schema(block_schemas_with_references): return next( ( block_schema for ( block_schema, _, parent_block_schema_id, ) in block_schemas_with_references if parent_block_schema_id is None ), None, ) ``` ###Assistant : Attempts to find the root block schema from a list of block schemas with references. Returns None if a root block schema is not found. Returns only the first potential root block schema if multiple are found. " 125,"def feature_embedding(input_feats, out_feat_len): assert input_feats.ndim == 2 assert isinstance(out_feat_len, int) assert out_feat_len >= input_feats.shape[1] num_nodes = input_feats.shape[0] feat_dim = input_feats.shape[1] feat_repeat_times = out_feat_len // feat_dim residue_dim = out_feat_len % feat_dim if residue_dim > 0: embed_wave = np.array([ np.power(1000, 2.0 * (j // 2) / feat_repeat_times + 1) for j in range(feat_repeat_times + 1) ]).reshape((feat_repeat_times + 1, 1, 1)) repeat_feats = np.repeat( np.expand_dims( input_feats, axis=0), feat_repeat_times, axis=0) residue_feats = np.hstack([ input_feats[:, 0:residue_dim], np.zeros( (num_nodes, feat_dim - residue_dim)) ]) residue_feats = np.expand_dims(residue_feats, axis=0) repeat_feats = np.concatenate([repeat_feats, residue_feats], axis=0) embedded_feats = repeat_feats / embed_wave embedded_feats[:, 0::2] = np.sin(embedded_feats[:, 0::2]) embedded_feats[:, 1::2] = np.cos(embedded_feats[:, 1::2]) embedded_feats = np.transpose(embedded_feats, (1, 0, 2)).reshape( (num_nodes, -1))[:, 0:out_feat_len] else: embed_wave = np.array([ np.power(1000, 2.0 * (j // 2) / feat_repeat_times) for j in range(feat_repeat_times) ]).reshape((feat_repeat_times, 1, 1)) repeat_feats = np.repeat( np.expand_dims( input_feats, axis=0), feat_repeat_times, axis=0) embedded_feats = repeat_feats / embed_wave embedded_feats[:, 0::2] = np.sin(embedded_feats[:, 0::2]) embedded_feats[:, 1::2] = np.cos(embedded_feats[:, 1::2]) embedded_feats = np.transpose(embedded_feats, (1, 0, 2)).reshape( (num_nodes, -1)).astype(np.float32) return embedded_feats ","Embed features. This code was partially adapted from https://github.com/GXYM/DRRG licensed under the MIT license. Args: input_feats (ndarray): The input features of shape (N, d), where N is the number of nodes in graph, d is the input feature vector length. out_feat_len (int): The length of output feature vector. Returns: embedded_feats (ndarray): The embedded features. ",54,162,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def feature_embedding(input_feats, out_feat_len): assert input_feats.ndim == 2 assert isinstance(out_feat_len, int) assert out_feat_len >= input_feats.shape[1] num_nodes = input_feats.shape[0] feat_dim = input_feats.shape[1] feat_repeat_times = out_feat_len // feat_dim residue_dim = out_feat_len % feat_dim if residue_dim > 0: embed_wave = np.array([ np.power(1000, 2.0 * (j // 2) / feat_repeat_times + 1) for j in range(feat_repeat_times + 1) ]).reshape((feat_repeat_times + 1, 1, 1)) repeat_feats = np.repeat( np.expand_dims( input_feats, axis=0), feat_repeat_times, axis=0) residue_feats = np.hstack([ input_feats[:, 0:residue_dim], np.zeros( (num_nodes, feat_dim - residue_dim)) ]) residue_feats = np.expand_dims(residue_feats, axis=0) repeat_feats = np.concatenate([repeat_feats, residue_feats], axis=0) embedded_feats = repeat_feats / embed_wave embedded_feats[:, 0::2] = np.sin(embedded_feats[:, 0::2]) embedded_feats[:, 1::2] = np.cos(embedded_feats[:, 1::2]) embedded_feats = np.transpose(embedded_feats, (1, 0, 2)).reshape( (num_nodes, -1))[:, 0:out_feat_len] else: embed_wave = np.array([ np.power(1000, 2.0 * (j // 2) / feat_repeat_times) for j in range(feat_repeat_times) ]).reshape((feat_repeat_times, 1, 1)) repeat_feats = np.repeat( np.expand_dims( input_feats, axis=0), feat_repeat_times, axis=0) embedded_feats = repeat_feats / embed_wave embedded_feats[:, 0::2] = np.sin(embedded_feats[:, 0::2]) embedded_feats[:, 1::2] = np.cos(embedded_feats[:, 1::2]) embedded_feats = np.transpose(embedded_feats, (1, 0, 2)).reshape( (num_nodes, -1)).astype(np.float32) return embedded_feats ``` ###Assistant : Embed features. This code was partially adapted from https://github.com/GXYM/DRRG licensed under the MIT license. Args: input_feats (ndarray): The input features of shape (N, d), where N is the number of nodes in graph, d is the input feature vector length. out_feat_len (int): The length of output feature vector. Returns: embedded_feats (ndarray): The embedded features. " 126,"def decision_function(self, X): check_is_fitted(self) xp, _ = get_namespace(X) X = self._validate_data(X, accept_sparse=""csr"", reset=False) scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ return xp.reshape(scores, -1) if scores.shape[1] == 1 else scores "," Predict confidence scores for samples. The confidence score for a sample is proportional to the signed distance of that sample to the hyperplane. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data matrix for which we want to get the confidence scores. Returns ------- scores : ndarray of shape (n_samples,) or (n_samples, n_classes) Confidence scores per `(n_samples, n_classes)` combination. In the binary case, confidence score for `self.classes_[1]` where >0 means this class would be predicted. ",79,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def decision_function(self, X): check_is_fitted(self) xp, _ = get_namespace(X) X = self._validate_data(X, accept_sparse=""csr"", reset=False) scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ return xp.reshape(scores, -1) if scores.shape[1] == 1 else scores ``` ###Assistant : Predict confidence scores for samples. The confidence score for a sample is proportional to the signed distance of that sample to the hyperplane. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data matrix for which we want to get the confidence scores. Returns ------- scores : ndarray of shape (n_samples,) or (n_samples, n_classes) Confidence scores per `(n_samples, n_classes)` combination. In the binary case, confidence score for `self.classes_[1]` where >0 means this class would be predicted. " 127,"def get_ranking(pairs): if len(pairs) == 1: return list(pairs[0]) w = get_winner(pairs) # now remove the winner from the list of pairs p_new = np.array([(a, b) for a, b in pairs if a != w]) return [w] + get_ranking(p_new) "," Abuses concordance property to get a (not necessarily unqiue) ranking. The lack of uniqueness is due to the potential existance of multiple equally ranked winners. We have to pick one, which is where the non-uniqueness comes from ",37,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_ranking(pairs): if len(pairs) == 1: return list(pairs[0]) w = get_winner(pairs) # now remove the winner from the list of pairs p_new = np.array([(a, b) for a, b in pairs if a != w]) return [w] + get_ranking(p_new) ``` ###Assistant : Abuses concordance property to get a (not necessarily unqiue) ranking. The lack of uniqueness is due to the potential existance of multiple equally ranked winners. We have to pick one, which is where the non-uniqueness comes from " 128,"def backup_dir(dir, ext="".bak""): # type: (str, str) -> str n = 1 extension = ext while os.path.exists(dir + extension): n += 1 extension = ext + str(n) return dir + extension ","Figure out the name of a directory to back up the given dir to (adding .bak, .bak2, etc)",18,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def backup_dir(dir, ext="".bak""): # type: (str, str) -> str n = 1 extension = ext while os.path.exists(dir + extension): n += 1 extension = ext + str(n) return dir + extension ``` ###Assistant : Figure out the name of a directory to back up the given dir to (adding .bak, .bak2, etc) " 129,"def synchronize(local_filters, remotes, update_remote=True): remote_filters = ray.get( [r.get_filters.remote(flush_after=True) for r in remotes] ) for rf in remote_filters: for k in local_filters: local_filters[k].apply_changes(rf[k], with_buffer=False) if update_remote: copies = {k: v.as_serializable() for k, v in local_filters.items()} remote_copy = ray.put(copies) [r.sync_filters.remote(remote_copy) for r in remotes] ","Aggregates all filters from remote evaluators. Local copy is updated and then broadcasted to all remote evaluators. Args: local_filters: Filters to be synchronized. remotes: Remote evaluators with filters. update_remote: Whether to push updates to remote filters. ",36,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def synchronize(local_filters, remotes, update_remote=True): remote_filters = ray.get( [r.get_filters.remote(flush_after=True) for r in remotes] ) for rf in remote_filters: for k in local_filters: local_filters[k].apply_changes(rf[k], with_buffer=False) if update_remote: copies = {k: v.as_serializable() for k, v in local_filters.items()} remote_copy = ray.put(copies) [r.sync_filters.remote(remote_copy) for r in remotes] ``` ###Assistant : Aggregates all filters from remote evaluators. Local copy is updated and then broadcasted to all remote evaluators. Args: local_filters: Filters to be synchronized. remotes: Remote evaluators with filters. update_remote: Whether to push updates to remote filters. " 130,"def test_estimator_empty_instance_dict(estimator): state = estimator.__getstate__() expected = {""_sklearn_version"": sklearn.__version__} assert state == expected # this should not raise pickle.loads(pickle.dumps(BaseEstimator())) ","Check that ``__getstate__`` returns an empty ``dict`` with an empty instance. Python 3.11+ changed behaviour by returning ``None`` instead of raising an ``AttributeError``. Non-regression test for gh-25188. ",27,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_estimator_empty_instance_dict(estimator): state = estimator.__getstate__() expected = {""_sklearn_version"": sklearn.__version__} assert state == expected # this should not raise pickle.loads(pickle.dumps(BaseEstimator())) ``` ###Assistant : Check that ``__getstate__`` returns an empty ``dict`` with an empty instance. Python 3.11+ changed behaviour by returning ``None`` instead of raising an ``AttributeError``. Non-regression test for gh-25188. " 131,"def test_with_variables(self): context = Context({""name"": ""jonathan wells""}) template = expected = self.assertHTMLEqual(expected, Template(template).render(context)) "," {% load wagtailadmin_tags %} {% fragment as my_fragment %}

Hello, {{ name|title }}

{% endfragment %} Text coming after: {{ my_fragment }} Text coming after:

Hello, Jonathan Wells

",28,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_with_variables(self): context = Context({""name"": ""jonathan wells""}) template = expected = self.assertHTMLEqual(expected, Template(template).render(context)) ``` ###Assistant : {% load wagtailadmin_tags %} {% fragment as my_fragment %}

Hello, {{ name|title }}

{% endfragment %} Text coming after: {{ my_fragment }} Text coming after:

Hello, Jonathan Wells

" 132,"def _extract_interpreter(b_module_data): interpreter = None args = [] b_lines = b_module_data.split(b""\n"", 1) if b_lines[0].startswith(b""#!""): b_shebang = b_lines[0].strip() # shlex.split on python-2.6 needs bytes. On python-3.x it needs text cli_split = shlex.split(to_native(b_shebang[2:], errors='surrogate_or_strict')) # convert args to text cli_split = [to_text(a, errors='surrogate_or_strict') for a in cli_split] interpreter = cli_split[0] args = cli_split[1:] return interpreter, args "," Used to extract shebang expression from binary module data and return a text string with the shebang, or None if no shebang is detected. ",24,54,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _extract_interpreter(b_module_data): interpreter = None args = [] b_lines = b_module_data.split(b""\n"", 1) if b_lines[0].startswith(b""#!""): b_shebang = b_lines[0].strip() # shlex.split on python-2.6 needs bytes. On python-3.x it needs text cli_split = shlex.split(to_native(b_shebang[2:], errors='surrogate_or_strict')) # convert args to text cli_split = [to_text(a, errors='surrogate_or_strict') for a in cli_split] interpreter = cli_split[0] args = cli_split[1:] return interpreter, args ``` ###Assistant : Used to extract shebang expression from binary module data and return a text string with the shebang, or None if no shebang is detected. " 133,"def test_resource_exhausted_info(self): # generate some random data to be captured implicitly in training func. from sklearn.datasets import fetch_olivetti_faces a_large_array = [] for i in range(50): a_large_array.append(fetch_olivetti_faces()) ","This is to test if helpful information is displayed when the objects captured in trainable/training function are too large and RESOURCES_EXHAUSTED error of gRPC is triggered.",26,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_resource_exhausted_info(self): # generate some random data to be captured implicitly in training func. from sklearn.datasets import fetch_olivetti_faces a_large_array = [] for i in range(50): a_large_array.append(fetch_olivetti_faces()) ``` ###Assistant : This is to test if helpful information is displayed when the objects captured in trainable/training function are too large and RESOURCES_EXHAUSTED error of gRPC is triggered. " 134,"def testing_session_settings(): with tempfile.TemporaryDirectory() as tmpdir: profile = prefect.settings.Profile( name=""test-session"", settings={ # Set PREFECT_HOME to a temporary directory to avoid clobbering # environments and settings PREFECT_HOME: tmpdir, PREFECT_PROFILES_PATH: ""$PREFECT_HOME/profiles.toml"", # Enable debug logging PREFECT_LOGGING_LEVEL: ""DEBUG"", # Disable shipping logs to the API; # can be enabled by the `enable_orion_handler` mark PREFECT_LOGGING_ORION_ENABLED: False, # Disable services for test runs PREFECT_ORION_ANALYTICS_ENABLED: False, PREFECT_ORION_SERVICES_LATE_RUNS_ENABLED: False, PREFECT_ORION_SERVICES_SCHEDULER_ENABLED: False, }, source=__file__, ) with prefect.settings.use_profile( profile, override_environment_variables=True, include_current_context=False, ) as ctx: yield ctx "," Creates a fixture for the scope of the test session that modifies setting defaults. This ensures that tests are isolated from existing settings, databases, etc. ",25,77,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def testing_session_settings(): with tempfile.TemporaryDirectory() as tmpdir: profile = prefect.settings.Profile( name=""test-session"", settings={ # Set PREFECT_HOME to a temporary directory to avoid clobbering # environments and settings PREFECT_HOME: tmpdir, PREFECT_PROFILES_PATH: ""$PREFECT_HOME/profiles.toml"", # Enable debug logging PREFECT_LOGGING_LEVEL: ""DEBUG"", # Disable shipping logs to the API; # can be enabled by the `enable_orion_handler` mark PREFECT_LOGGING_ORION_ENABLED: False, # Disable services for test runs PREFECT_ORION_ANALYTICS_ENABLED: False, PREFECT_ORION_SERVICES_LATE_RUNS_ENABLED: False, PREFECT_ORION_SERVICES_SCHEDULER_ENABLED: False, }, source=__file__, ) with prefect.settings.use_profile( profile, override_environment_variables=True, include_current_context=False, ) as ctx: yield ctx ``` ###Assistant : Creates a fixture for the scope of the test session that modifies setting defaults. This ensures that tests are isolated from existing settings, databases, etc. " 135,"def bot_start(self, **kwargs) -> None: self.buy_rsi = IntParameter([0, 50], default=30, space='buy') "," Parameters can also be defined here ... ",7,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def bot_start(self, **kwargs) -> None: self.buy_rsi = IntParameter([0, 50], default=30, space='buy') ``` ###Assistant : Parameters can also be defined here ... " 136,"def getvalue(self): if callable(getattr(self.stream, ""getvalue"", None)): return self.stream.getvalue() "," Return the fully serialized queryset (or None if the output stream is not seekable). ",14,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def getvalue(self): if callable(getattr(self.stream, ""getvalue"", None)): return self.stream.getvalue() ``` ###Assistant : Return the fully serialized queryset (or None if the output stream is not seekable). " 137,"def boxplot(self, X, win=None, env=None, opts=None): X = np.squeeze(X) assert X.ndim == 1 or X.ndim == 2, ""X should be one or two-dimensional"" if X.ndim == 1: X = X[:, None] opts = {} if opts is None else opts _title2str(opts) _assert_opts(opts) if opts.get(""legend"") is not None: assert ( len(opts[""legend""]) == X.shape[1] ), ""number of legened labels must match number of columns"" data = [] for k in range(X.shape[1]): _data = { ""y"": X.take(k, 1).tolist(), ""type"": ""box"", } if opts.get(""legend""): _data[""name""] = opts[""legend""][k] else: _data[""name""] = ""column "" + str(k) data.append(_data) return self._send( { ""data"": data, ""win"": win, ""eid"": env, ""layout"": _opts2layout(opts), ""opts"": opts, } ) "," This function draws boxplots of the specified data. It takes as input an `N` or an `NxM` tensor `X` that specifies the `N` data values of which to construct the `M` boxplots. The following plot-specific `opts` are currently supported: - `opts.legend`: labels for each of the columns in `X` ",49,106,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def boxplot(self, X, win=None, env=None, opts=None): X = np.squeeze(X) assert X.ndim == 1 or X.ndim == 2, ""X should be one or two-dimensional"" if X.ndim == 1: X = X[:, None] opts = {} if opts is None else opts _title2str(opts) _assert_opts(opts) if opts.get(""legend"") is not None: assert ( len(opts[""legend""]) == X.shape[1] ), ""number of legened labels must match number of columns"" data = [] for k in range(X.shape[1]): _data = { ""y"": X.take(k, 1).tolist(), ""type"": ""box"", } if opts.get(""legend""): _data[""name""] = opts[""legend""][k] else: _data[""name""] = ""column "" + str(k) data.append(_data) return self._send( { ""data"": data, ""win"": win, ""eid"": env, ""layout"": _opts2layout(opts), ""opts"": opts, } ) ``` ###Assistant : This function draws boxplots of the specified data. It takes as input an `N` or an `NxM` tensor `X` that specifies the `N` data values of which to construct the `M` boxplots. The following plot-specific `opts` are currently supported: - `opts.legend`: labels for each of the columns in `X` " 138,"def test_body_after_POST_multipart_related(self): # Ticket #9054 # There are cases in which the multipart data is related instead of # being a binary upload, in which case it should still be accessible # via body. payload_data = b""\r\n"".join([ b'--boundary', b'Content-ID: id; name=""name""', b'', b'value', b'--boundary--' ]) payload = FakePayload(payload_data) request = WSGIRequest({ 'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'multipart/related; boundary=boundary', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload, }) self.assertEqual(request.POST, {}) self.assertEqual(request.body, payload_data) "," Reading body after parsing multipart that isn't form-data is allowed ",10,65,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_body_after_POST_multipart_related(self): # Ticket #9054 # There are cases in which the multipart data is related instead of # being a binary upload, in which case it should still be accessible # via body. payload_data = b""\r\n"".join([ b'--boundary', b'Content-ID: id; name=""name""', b'', b'value', b'--boundary--' ]) payload = FakePayload(payload_data) request = WSGIRequest({ 'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'multipart/related; boundary=boundary', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload, }) self.assertEqual(request.POST, {}) self.assertEqual(request.body, payload_data) ``` ###Assistant : Reading body after parsing multipart that isn't form-data is allowed " 139,"def query(self, query, **kwargs) -> Result: try: if self.db_conn: result = self.db_conn.aql.execute(query, **kwargs) return result else: raise AirflowException( f""Failed to execute AQLQuery, error connecting to database: {self.database}"" ) except AQLQueryExecuteError as error: raise AirflowException(f""Failed to execute AQLQuery, error: {str(error)}"") "," Function to create a arangodb session and execute the AQL query in the session. :param query: AQL query :return: Result ",20,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def query(self, query, **kwargs) -> Result: try: if self.db_conn: result = self.db_conn.aql.execute(query, **kwargs) return result else: raise AirflowException( f""Failed to execute AQLQuery, error connecting to database: {self.database}"" ) except AQLQueryExecuteError as error: raise AirflowException(f""Failed to execute AQLQuery, error: {str(error)}"") ``` ###Assistant : Function to create a arangodb session and execute the AQL query in the session. :param query: AQL query :return: Result " 140,"def test_post_process_frame(feature_names, target_names): pd = pytest.importorskip(""pandas"") X_original = pd.DataFrame( { ""col_int_as_integer"": [1, 2, 3], ""col_int_as_numeric"": [1, 2, 3], ""col_float_as_real"": [1.0, 2.0, 3.0], ""col_float_as_numeric"": [1.0, 2.0, 3.0], ""col_categorical"": [""a"", ""b"", ""c""], ""col_string"": [""a"", ""b"", ""c""], } ) X, y = _post_process_frame(X_original, feature_names, target_names) assert isinstance(X, pd.DataFrame) if len(target_names) >= 2: assert isinstance(y, pd.DataFrame) elif len(target_names) == 1: assert isinstance(y, pd.Series) else: assert y is None ",Check the behaviour of the post-processing function for splitting a dataframe.,11,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_post_process_frame(feature_names, target_names): pd = pytest.importorskip(""pandas"") X_original = pd.DataFrame( { ""col_int_as_integer"": [1, 2, 3], ""col_int_as_numeric"": [1, 2, 3], ""col_float_as_real"": [1.0, 2.0, 3.0], ""col_float_as_numeric"": [1.0, 2.0, 3.0], ""col_categorical"": [""a"", ""b"", ""c""], ""col_string"": [""a"", ""b"", ""c""], } ) X, y = _post_process_frame(X_original, feature_names, target_names) assert isinstance(X, pd.DataFrame) if len(target_names) >= 2: assert isinstance(y, pd.DataFrame) elif len(target_names) == 1: assert isinstance(y, pd.Series) else: assert y is None ``` ###Assistant : Check the behaviour of the post-processing function for splitting a dataframe. " 141,"def cleanse_setting(self, key, value): try: is_sensitive = self.hidden_settings.search(key) except TypeError: is_sensitive = False if is_sensitive: cleansed = self.cleansed_substitute elif isinstance(value, dict): cleansed = {k: self.cleanse_setting(k, v) for k, v in value.items()} elif isinstance(value, list): cleansed = [self.cleanse_setting("""", v) for v in value] elif isinstance(value, tuple): cleansed = tuple([self.cleanse_setting("""", v) for v in value]) else: cleansed = value if callable(cleansed): cleansed = CallableSettingWrapper(cleansed) return cleansed "," Cleanse an individual setting key/value of sensitive content. If the value is a dictionary, recursively cleanse the keys in that dictionary. ",21,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cleanse_setting(self, key, value): try: is_sensitive = self.hidden_settings.search(key) except TypeError: is_sensitive = False if is_sensitive: cleansed = self.cleansed_substitute elif isinstance(value, dict): cleansed = {k: self.cleanse_setting(k, v) for k, v in value.items()} elif isinstance(value, list): cleansed = [self.cleanse_setting("""", v) for v in value] elif isinstance(value, tuple): cleansed = tuple([self.cleanse_setting("""", v) for v in value]) else: cleansed = value if callable(cleansed): cleansed = CallableSettingWrapper(cleansed) return cleansed ``` ###Assistant : Cleanse an individual setting key/value of sensitive content. If the value is a dictionary, recursively cleanse the keys in that dictionary. " 142,"def func_dump(func): if os.name == ""nt"": raw_code = marshal.dumps(func.__code__).replace(b""\\"", b""/"") code = codecs.encode(raw_code, ""base64"").decode(""ascii"") else: raw_code = marshal.dumps(func.__code__) code = codecs.encode(raw_code, ""base64"").decode(""ascii"") defaults = func.__defaults__ if func.__closure__: closure = tuple(c.cell_contents for c in func.__closure__) else: closure = None return code, defaults, closure ","Serializes a user defined function. Args: func: the function to serialize. Returns: A tuple `(code, defaults, closure)`. ",17,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def func_dump(func): if os.name == ""nt"": raw_code = marshal.dumps(func.__code__).replace(b""\\"", b""/"") code = codecs.encode(raw_code, ""base64"").decode(""ascii"") else: raw_code = marshal.dumps(func.__code__) code = codecs.encode(raw_code, ""base64"").decode(""ascii"") defaults = func.__defaults__ if func.__closure__: closure = tuple(c.cell_contents for c in func.__closure__) else: closure = None return code, defaults, closure ``` ###Assistant : Serializes a user defined function. Args: func: the function to serialize. Returns: A tuple `(code, defaults, closure)`. " 143,"def cauchy_upper_bound(f): if not f.lev: return dup_cauchy_upper_bound(f.rep, f.dom) else: raise ValueError('univariate polynomial expected') ",Computes the Cauchy upper bound on the roots of ``f``. ,10,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cauchy_upper_bound(f): if not f.lev: return dup_cauchy_upper_bound(f.rep, f.dom) else: raise ValueError('univariate polynomial expected') ``` ###Assistant : Computes the Cauchy upper bound on the roots of ``f``. " 144,"def kernS(s): hit = False quoted = '""' in s or ""'"" in s if '(' in s and not quoted: if s.count('(') != s.count("")""): raise SympifyError('unmatched left parenthesis') # strip all space from s s = ''.join(s.split()) olds = s # now use space to represent a symbol that # will # step 1. turn potential 2-arg Muls into 3-arg versions # 1a. *( -> * *( s = s.replace('*(', '* *(') # 1b. close up exponentials s = s.replace('** *', '**') # 2. handle the implied multiplication of a negated # parenthesized expression in two steps # 2a: -(...) --> -( *(...) target = '-( *(' s = s.replace('-(', target) # 2b: double the matching closing parenthesis # -( *(...) --> -( *(...)) i = nest = 0 assert target.endswith('(') # assumption below while True: j = s.find(target, i) if j == -1: break j += len(target) - 1 for j in range(j, len(s)): if s[j] == ""("": nest += 1 elif s[j] == "")"": nest -= 1 if nest == 0: break s = s[:j] + "")"" + s[j:] i = j + 2 # the first char after 2nd ) if ' ' in s: # get a unique kern kern = '_' while kern in s: kern += choice(string.ascii_letters + string.digits) s = s.replace(' ', kern) hit = kern in s else: hit = False for i in range(2): try: expr = sympify(s) break except TypeError: # the kern might cause unknown errors... if hit: s = olds # maybe it didn't like the kern; use un-kerned s hit = False continue expr = sympify(s) # let original error raise if not hit: return expr from .symbol import Symbol rep = {Symbol(kern): 1}","Use a hack to try keep autosimplification from distributing a a number into an Add; this modification does not prevent the 2-arg Mul from becoming an Add, however. Examples ======== >>> from sympy.core.sympify import kernS >>> from sympy.abc import x, y The 2-arg Mul distributes a number (or minus sign) across the terms of an expression, but kernS will prevent that: >>> 2*(x + y), -(x + 1) (2*x + 2*y, -x - 1) >>> kernS('2*(x + y)') 2*(x + y) >>> kernS('-(x + 1)') -(x + 1) If use of the hack fails, the un-hacked string will be passed to sympify... and you get what you get. XXX This hack should not be necessary once issue 4596 has been resolved. ",121,288,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def kernS(s): hit = False quoted = '""' in s or ""'"" in s if '(' in s and not quoted: if s.count('(') != s.count("")""): raise SympifyError('unmatched left parenthesis') # strip all space from s s = ''.join(s.split()) olds = s # now use space to represent a symbol that # will # step 1. turn potential 2-arg Muls into 3-arg versions # 1a. *( -> * *( s = s.replace('*(', '* *(') # 1b. close up exponentials s = s.replace('** *', '**') # 2. handle the implied multiplication of a negated # parenthesized expression in two steps # 2a: -(...) --> -( *(...) target = '-( *(' s = s.replace('-(', target) # 2b: double the matching closing parenthesis # -( *(...) --> -( *(...)) i = nest = 0 assert target.endswith('(') # assumption below while True: j = s.find(target, i) if j == -1: break j += len(target) - 1 for j in range(j, len(s)): if s[j] == ""("": nest += 1 elif s[j] == "")"": nest -= 1 if nest == 0: break s = s[:j] + "")"" + s[j:] i = j + 2 # the first char after 2nd ) if ' ' in s: # get a unique kern kern = '_' while kern in s: kern += choice(string.ascii_letters + string.digits) s = s.replace(' ', kern) hit = kern in s else: hit = False for i in range(2): try: expr = sympify(s) break except TypeError: # the kern might cause unknown errors... if hit: s = olds # maybe it didn't like the kern; use un-kerned s hit = False continue expr = sympify(s) # let original error raise if not hit: return expr from .symbol import Symbol rep = {Symbol(kern): 1} ``` ###Assistant : Use a hack to try keep autosimplification from distributing a a number into an Add; this modification does not prevent the 2-arg Mul from becoming an Add, however. Examples ======== >>> from sympy.core.sympify import kernS >>> from sympy.abc import x, y The 2-arg Mul distributes a number (or minus sign) across the terms of an expression, but kernS will prevent that: >>> 2*(x + y), -(x + 1) (2*x + 2*y, -x - 1) >>> kernS('2*(x + y)') 2*(x + y) >>> kernS('-(x + 1)') -(x + 1) If use of the hack fails, the un-hacked string will be passed to sympify... and you get what you get. XXX This hack should not be necessary once issue 4596 has been resolved. " 145,"def test_load_corrupt_file(self, patched_pickle_load): # First load is the schema version patched_pickle_load.side_effect = [DocumentClassifier.FORMAT_VERSION, OSError()] with self.assertRaises(ClassifierModelCorruptError): self.classifier.load() "," GIVEN: - Corrupted classifier pickle file WHEN: - An attempt is made to load the classifier THEN: - The ClassifierModelCorruptError is raised ",22,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_load_corrupt_file(self, patched_pickle_load): # First load is the schema version patched_pickle_load.side_effect = [DocumentClassifier.FORMAT_VERSION, OSError()] with self.assertRaises(ClassifierModelCorruptError): self.classifier.load() ``` ###Assistant : GIVEN: - Corrupted classifier pickle file WHEN: - An attempt is made to load the classifier THEN: - The ClassifierModelCorruptError is raised " 146,"def current_option(self) -> str: return self.device[self.entity_description.current_option_key] ",Return the selected entity option to represent the entity state.,10,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def current_option(self) -> str: return self.device[self.entity_description.current_option_key] ``` ###Assistant : Return the selected entity option to represent the entity state. " 147,"def gen_html(img): html_code = img['html']['structure']['tokens'].copy() to_insert = [i for i, tag in enumerate(html_code) if tag in ('', '>')] for i, cell in zip(to_insert[::-1], img['html']['cells'][::-1]): if cell['tokens']: text = ''.join(cell['tokens']) # skip empty text sp_char_list = ['', '', '\u2028', ' ', '', ''] text_remove_style = skip_char(text, sp_char_list) if len(text_remove_style) == 0: continue html_code.insert(i + 1, text) html_code = ''.join(html_code) html_code = '{}
'.format(html_code) return html_code "," Formats HTML code from tokenized annotation of img ",8,63,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def gen_html(img): html_code = img['html']['structure']['tokens'].copy() to_insert = [i for i, tag in enumerate(html_code) if tag in ('', '>')] for i, cell in zip(to_insert[::-1], img['html']['cells'][::-1]): if cell['tokens']: text = ''.join(cell['tokens']) # skip empty text sp_char_list = ['', '', '\u2028', ' ', '', ''] text_remove_style = skip_char(text, sp_char_list) if len(text_remove_style) == 0: continue html_code.insert(i + 1, text) html_code = ''.join(html_code) html_code = '{}
'.format(html_code) return html_code ``` ###Assistant : Formats HTML code from tokenized annotation of img " 148,"def key_aliases(self) -> Iterable[str]: for alias in _get_key_aliases(self.key): yield _normalize_key(alias) ","Get the aliases for the key, including the key itself",10,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def key_aliases(self) -> Iterable[str]: for alias in _get_key_aliases(self.key): yield _normalize_key(alias) ``` ###Assistant : Get the aliases for the key, including the key itself " 149,"def test_create_single_available_ip(self): vrf = VRF.objects.create(name='VRF 1') prefix = Prefix.objects.create(prefix=IPNetwork('192.0.2.0/30'), vrf=vrf, is_pool=True) url = reverse('ipam-api:prefix-available-ips', kwargs={'pk': prefix.pk}) self.add_permissions('ipam.view_prefix', 'ipam.add_ipaddress') # Create all four available IPs with individual requests for i in range(1, 5): data = { 'description': 'Test IP {}'.format(i) } response = self.client.post(url, data, format='json', **self.header) self.assertHttpStatus(response, status.HTTP_201_CREATED) self.assertEqual(response.data['vrf']['id'], vrf.pk) self.assertEqual(response.data['description'], data['description']) # Try to create one more IP response = self.client.post(url, {}, format='json', **self.header) self.assertHttpStatus(response, status.HTTP_409_CONFLICT) self.assertIn('detail', response.data) "," Test retrieval of the first available IP address within a parent prefix. ",12,69,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_create_single_available_ip(self): vrf = VRF.objects.create(name='VRF 1') prefix = Prefix.objects.create(prefix=IPNetwork('192.0.2.0/30'), vrf=vrf, is_pool=True) url = reverse('ipam-api:prefix-available-ips', kwargs={'pk': prefix.pk}) self.add_permissions('ipam.view_prefix', 'ipam.add_ipaddress') # Create all four available IPs with individual requests for i in range(1, 5): data = { 'description': 'Test IP {}'.format(i) } response = self.client.post(url, data, format='json', **self.header) self.assertHttpStatus(response, status.HTTP_201_CREATED) self.assertEqual(response.data['vrf']['id'], vrf.pk) self.assertEqual(response.data['description'], data['description']) # Try to create one more IP response = self.client.post(url, {}, format='json', **self.header) self.assertHttpStatus(response, status.HTTP_409_CONFLICT) self.assertIn('detail', response.data) ``` ###Assistant : Test retrieval of the first available IP address within a parent prefix. " 150,"def climate_adc_t3000_missing_setpoint_fixture(client, climate_adc_t3000_state): data = copy.deepcopy(climate_adc_t3000_state) data[""name""] = f""{data['name']} missing setpoint"" for value in data[""values""][:]: if ( value[""commandClassName""] == ""Humidity Control Setpoint"" and value[""propertyKeyName""] == ""De-humidifier"" ): data[""values""].remove(value) node = Node(client, data) client.driver.controller.nodes[node.node_id] = node return node @pytest.fixture(name=""climate_adc_t3000_missing_mode"")",Mock a climate ADC-T3000 node with missing de-humidify setpoint.,9,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def climate_adc_t3000_missing_setpoint_fixture(client, climate_adc_t3000_state): data = copy.deepcopy(climate_adc_t3000_state) data[""name""] = f""{data['name']} missing setpoint"" for value in data[""values""][:]: if ( value[""commandClassName""] == ""Humidity Control Setpoint"" and value[""propertyKeyName""] == ""De-humidifier"" ): data[""values""].remove(value) node = Node(client, data) client.driver.controller.nodes[node.node_id] = node return node @pytest.fixture(name=""climate_adc_t3000_missing_mode"") ``` ###Assistant : Mock a climate ADC-T3000 node with missing de-humidify setpoint. " 151,"def classify_ode(eq, func=None, dict=False, ics=None, *, prep=True, xi=None, eta=None, n=None, **kwargs): r"," Returns a tuple of possible :py:meth:`~sympy.solvers.ode.dsolve` classifications for an ODE. The tuple is ordered so that first item is the classification that :py:meth:`~sympy.solvers.ode.dsolve` uses to solve the ODE by default. In general, classifications at the near the beginning of the list will produce better solutions faster than those near the end, thought there are always exceptions. To make :py:meth:`~sympy.solvers.ode.dsolve` use a different classification, use ``dsolve(ODE, func, hint=)``. See also the :py:meth:`~sympy.solvers.ode.dsolve` docstring for different meta-hints you can use. If ``dict`` is true, :py:meth:`~sympy.solvers.ode.classify_ode` will return a dictionary of ``hint:match`` expression terms. This is intended for internal use by :py:meth:`~sympy.solvers.ode.dsolve`. Note that because dictionaries are ordered arbitrarily, this will most likely not be in the same order as the tuple. You can get help on different hints by executing ``help(ode.ode_hintname)``, where ``hintname`` is the name of the hint without ``_Integral``. See :py:data:`~sympy.solvers.ode.allhints` or the :py:mod:`~sympy.solvers.ode` docstring for a list of all supported hints that can be returned from :py:meth:`~sympy.solvers.ode.classify_ode`. Notes ===== These are remarks on hint names. ``_Integral`` If a classification has ``_Integral`` at the end, it will return the expression with an unevaluated :py:class:`~.Integral`",184,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def classify_ode(eq, func=None, dict=False, ics=None, *, prep=True, xi=None, eta=None, n=None, **kwargs): r ``` ###Assistant : Returns a tuple of possible :py:meth:`~sympy.solvers.ode.dsolve` classifications for an ODE. The tuple is ordered so that first item is the classification that :py:meth:`~sympy.solvers.ode.dsolve` uses to solve the ODE by default. In general, classifications at the near the beginning of the list will produce better solutions faster than those near the end, thought there are always exceptions. To make :py:meth:`~sympy.solvers.ode.dsolve` use a different classification, use ``dsolve(ODE, func, hint=)``. See also the :py:meth:`~sympy.solvers.ode.dsolve` docstring for different meta-hints you can use. If ``dict`` is true, :py:meth:`~sympy.solvers.ode.classify_ode` will return a dictionary of ``hint:match`` expression terms. This is intended for internal use by :py:meth:`~sympy.solvers.ode.dsolve`. Note that because dictionaries are ordered arbitrarily, this will most likely not be in the same order as the tuple. You can get help on different hints by executing ``help(ode.ode_hintname)``, where ``hintname`` is the name of the hint without ``_Integral``. See :py:data:`~sympy.solvers.ode.allhints` or the :py:mod:`~sympy.solvers.ode` docstring for a list of all supported hints that can be returned from :py:meth:`~sympy.solvers.ode.classify_ode`. Notes ===== These are remarks on hint names. ``_Integral`` If a classification has ``_Integral`` at the end, it will return the expression with an unevaluated :py:class:`~.Integral` " 152,"def mode(a, axis=0, nan_policy='propagate'): a, axis = _chk_asarray(a, axis) if a.size == 0: return ModeResult(np.array([]), np.array([])) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.mode(a, axis) if a.dtype == object and np.nan in set(a.ravel()): # Fall back to a slower method since np.unique does not work with NaN scores = set(np.ravel(a)) # get ALL unique values testshape = list(a.shape) testshape[axis] = 1 oldmostfreq = np.zeros(testshape, dtype=a.dtype) oldcounts = np.zeros(testshape, dtype=int) for score in scores: template = (a == score) counts = np.sum(template, axis, keepdims=True) mostfrequent = np.where(counts > oldcounts, score, oldmostfreq) oldcounts = np.maximum(counts, oldcounts) oldmostfreq = mostfrequent return ModeResult(mostfrequent, oldcounts) ","Return an array of the modal (most common) value in the passed array. If there is more than one such value, only the smallest is returned. The bin-count for the modal bins is also returned. Parameters ---------- a : array_like n-dimensional array of which to find mode(s). axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. The following options are available (default is 'propagate'): * 'propagate': returns nan * 'raise': throws an error * 'omit': performs the calculations ignoring nan values Returns ------- mode : ndarray Array of modal values. count : ndarray Array of counts for each mode. Examples -------- >>> a = np.array([[6, 8, 3, 0], ... [3, 2, 1, 7], ... [8, 1, 8, 4], ... [5, 3, 0, 5], ... [4, 7, 5, 9]]) >>> from scipy import stats >>> stats.mode(a) ModeResult(mode=array([[3, 1, 0, 0]]), count=array([[1, 1, 1, 1]])) To get mode of whole array, specify ``axis=None``: >>> stats.mode(a, axis=None) ModeResult(mode=array([3]), count=array([3])) ",183,108,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mode(a, axis=0, nan_policy='propagate'): a, axis = _chk_asarray(a, axis) if a.size == 0: return ModeResult(np.array([]), np.array([])) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.mode(a, axis) if a.dtype == object and np.nan in set(a.ravel()): # Fall back to a slower method since np.unique does not work with NaN scores = set(np.ravel(a)) # get ALL unique values testshape = list(a.shape) testshape[axis] = 1 oldmostfreq = np.zeros(testshape, dtype=a.dtype) oldcounts = np.zeros(testshape, dtype=int) for score in scores: template = (a == score) counts = np.sum(template, axis, keepdims=True) mostfrequent = np.where(counts > oldcounts, score, oldmostfreq) oldcounts = np.maximum(counts, oldcounts) oldmostfreq = mostfrequent return ModeResult(mostfrequent, oldcounts) ``` ###Assistant : Return an array of the modal (most common) value in the passed array. If there is more than one such value, only the smallest is returned. The bin-count for the modal bins is also returned. Parameters ---------- a : array_like n-dimensional array of which to find mode(s). axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. The following options are available (default is 'propagate'): * 'propagate': returns nan * 'raise': throws an error * 'omit': performs the calculations ignoring nan values Returns ------- mode : ndarray Array of modal values. count : ndarray Array of counts for each mode. Examples -------- >>> a = np.array([[6, 8, 3, 0], ... [3, 2, 1, 7], ... [8, 1, 8, 4], ... [5, 3, 0, 5], ... [4, 7, 5, 9]]) >>> from scipy import stats >>> stats.mode(a) ModeResult(mode=array([[3, 1, 0, 0]]), count=array([[1, 1, 1, 1]])) To get mode of whole array, specify ``axis=None``: >>> stats.mode(a, axis=None) ModeResult(mode=array([3]), count=array([3])) " 153,"def wait_for_instance(self) -> AnsibleCoreCI: core_ci = self.get_instance() core_ci.wait() return core_ci ",Wait for an AnsibleCoreCI VM instance to become ready.,9,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def wait_for_instance(self) -> AnsibleCoreCI: core_ci = self.get_instance() core_ci.wait() return core_ci ``` ###Assistant : Wait for an AnsibleCoreCI VM instance to become ready. " 154,"def spherical_bessel_fn(n, x=None, polys=False): if n < 0: dup = dup_spherical_bessel_fn_minus(-int(n), ZZ) else: dup = dup_spherical_bessel_fn(int(n), ZZ) poly = DMP(dup, ZZ) if x is not None: poly = Poly.new(poly, 1/x) else: poly = PurePoly.new(poly, 1/Dummy('x')) return poly if polys else poly.as_expr() "," Coefficients for the spherical Bessel functions. Those are only needed in the jn() function. The coefficients are calculated from: fn(0, z) = 1/z fn(1, z) = 1/z**2 fn(n-1, z) + fn(n+1, z) == (2*n+1)/z * fn(n, z) Parameters ========== n : int `n` decides the degree of polynomial x : optional polys : bool, optional If True, return a Poly, otherwise (default) return an expression. Examples ======== >>> from sympy.polys.orthopolys import spherical_bessel_fn as fn >>> from sympy import Symbol >>> z = Symbol(""z"") >>> fn(1, z) z**(-2) >>> fn(2, z) -1/z + 3/z**3 >>> fn(3, z) -6/z**2 + 15/z**4 >>> fn(4, z) 1/z - 45/z**3 + 105/z**5 ",107,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def spherical_bessel_fn(n, x=None, polys=False): if n < 0: dup = dup_spherical_bessel_fn_minus(-int(n), ZZ) else: dup = dup_spherical_bessel_fn(int(n), ZZ) poly = DMP(dup, ZZ) if x is not None: poly = Poly.new(poly, 1/x) else: poly = PurePoly.new(poly, 1/Dummy('x')) return poly if polys else poly.as_expr() ``` ###Assistant : Coefficients for the spherical Bessel functions. Those are only needed in the jn() function. The coefficients are calculated from: fn(0, z) = 1/z fn(1, z) = 1/z**2 fn(n-1, z) + fn(n+1, z) == (2*n+1)/z * fn(n, z) Parameters ========== n : int `n` decides the degree of polynomial x : optional polys : bool, optional If True, return a Poly, otherwise (default) return an expression. Examples ======== >>> from sympy.polys.orthopolys import spherical_bessel_fn as fn >>> from sympy import Symbol >>> z = Symbol(""z"") >>> fn(1, z) z**(-2) >>> fn(2, z) -1/z + 3/z**3 >>> fn(3, z) -6/z**2 + 15/z**4 >>> fn(4, z) 1/z - 45/z**3 + 105/z**5 " 155,"def raft_large(*, pretrained=False, progress=True, **kwargs): return _raft( arch=""raft_large"", pretrained=pretrained, progress=progress, # Feature encoder feature_encoder_layers=(64, 64, 96, 128, 256), feature_encoder_block=ResidualBlock, feature_encoder_norm_layer=InstanceNorm2d, # Context encoder context_encoder_layers=(64, 64, 96, 128, 256), context_encoder_block=ResidualBlock, context_encoder_norm_layer=BatchNorm2d, # Correlation block corr_block_num_levels=4, corr_block_radius=4, # Motion encoder motion_encoder_corr_layers=(256, 192), motion_encoder_flow_layers=(128, 64), motion_encoder_out_channels=128, # Recurrent block recurrent_block_hidden_state_size=128, recurrent_block_kernel_size=((1, 5), (5, 1)), recurrent_block_padding=((0, 2), (2, 0)), # Flow head flow_head_hidden_size=256, # Mask predictor use_mask_predictor=True, **kwargs, ) ","RAFT model from `RAFT: Recurrent All Pairs Field Transforms for Optical Flow `_. Args: pretrained (bool): Whether to use weights that have been pre-trained on :class:`~torchvsion.datasets.FlyingChairs` + :class:`~torchvsion.datasets.FlyingThings3D` with two fine-tuning steps: - one on :class:`~torchvsion.datasets.Sintel` + :class:`~torchvsion.datasets.FlyingThings3D` - one on :class:`~torchvsion.datasets.KittiFlow`. This corresponds to the ``C+T+S/K`` strategy in the paper. progress (bool): If True, displays a progress bar of the download to stderr. Returns: nn.Module: The model. ",68,65,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def raft_large(*, pretrained=False, progress=True, **kwargs): return _raft( arch=""raft_large"", pretrained=pretrained, progress=progress, # Feature encoder feature_encoder_layers=(64, 64, 96, 128, 256), feature_encoder_block=ResidualBlock, feature_encoder_norm_layer=InstanceNorm2d, # Context encoder context_encoder_layers=(64, 64, 96, 128, 256), context_encoder_block=ResidualBlock, context_encoder_norm_layer=BatchNorm2d, # Correlation block corr_block_num_levels=4, corr_block_radius=4, # Motion encoder motion_encoder_corr_layers=(256, 192), motion_encoder_flow_layers=(128, 64), motion_encoder_out_channels=128, # Recurrent block recurrent_block_hidden_state_size=128, recurrent_block_kernel_size=((1, 5), (5, 1)), recurrent_block_padding=((0, 2), (2, 0)), # Flow head flow_head_hidden_size=256, # Mask predictor use_mask_predictor=True, **kwargs, ) ``` ###Assistant : RAFT model from `RAFT: Recurrent All Pairs Field Transforms for Optical Flow `_. Args: pretrained (bool): Whether to use weights that have been pre-trained on :class:`~torchvsion.datasets.FlyingChairs` + :class:`~torchvsion.datasets.FlyingThings3D` with two fine-tuning steps: - one on :class:`~torchvsion.datasets.Sintel` + :class:`~torchvsion.datasets.FlyingThings3D` - one on :class:`~torchvsion.datasets.KittiFlow`. This corresponds to the ``C+T+S/K`` strategy in the paper. progress (bool): If True, displays a progress bar of the download to stderr. Returns: nn.Module: The model. " 156,"def _get_prettyprint_usage(self, console, executor_name, usage_kind=None): from rich.panel import Panel from rich.syntax import Syntax flow_plain = f flow_docker = f flow_sandbox = f panels = [ Panel( Syntax( p[0], 'python', theme='monokai', word_wrap=True, ), title=p[1], width=80, expand=False, ) for p in [ (flow_plain, 'Use via source'), (flow_docker, 'Use in Docker'), (flow_sandbox, 'Use in Sandbox'), ] ] if usage_kind == 'docker': console.print(panels[2]) elif usage_kind == 'source': console.print(panels[1]) else: console.print(*reversed(panels)) ","from jina import Flow f = Flow().add(uses='jinahub://{executor_name}') from jina import Flow f = Flow().add(uses='jinahub+docker://{executor_name}') from jina import Flow f = Flow().add(uses='jinahub+sandbox://{executor_name}') ",21,66,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_prettyprint_usage(self, console, executor_name, usage_kind=None): from rich.panel import Panel from rich.syntax import Syntax flow_plain = f flow_docker = f flow_sandbox = f panels = [ Panel( Syntax( p[0], 'python', theme='monokai', word_wrap=True, ), title=p[1], width=80, expand=False, ) for p in [ (flow_plain, 'Use via source'), (flow_docker, 'Use in Docker'), (flow_sandbox, 'Use in Sandbox'), ] ] if usage_kind == 'docker': console.print(panels[2]) elif usage_kind == 'source': console.print(panels[1]) else: console.print(*reversed(panels)) ``` ###Assistant : from jina import Flow f = Flow().add(uses='jinahub://{executor_name}') from jina import Flow f = Flow().add(uses='jinahub+docker://{executor_name}') from jina import Flow f = Flow().add(uses='jinahub+sandbox://{executor_name}') " 157,"def encoding(self, val): self._encoding = val if hasattr(self, ""GET""): del self.GET if hasattr(self, ""_post""): del self._post "," Set the encoding used for GET/POST accesses. If the GET or POST dictionary has already been created, remove and recreate it on the next access (so that it is decoded correctly). ",31,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def encoding(self, val): self._encoding = val if hasattr(self, ""GET""): del self.GET if hasattr(self, ""_post""): del self._post ``` ###Assistant : Set the encoding used for GET/POST accesses. If the GET or POST dictionary has already been created, remove and recreate it on the next access (so that it is decoded correctly). " 158,"def scale(self, factor, scale_tips=False, **kwargs): r if self.get_length() == 0: return self if scale_tips: super().scale(factor, **kwargs) self._set_stroke_width_from_length() return self has_tip = self.has_tip() has_start_tip = self.has_start_tip() if has_tip or has_start_tip: old_tips = self.pop_tips() super().scale(factor, **kwargs) self._set_stroke_width_from_length() if has_tip: self.add_tip(tip=old_tips[0]) if has_start_tip: self.add_tip(tip=old_tips[1], at_start=True) return self ","Scale an arrow, but keep stroke width and arrow tip size fixed. See Also -------- :meth:`~.Mobject.scale` Examples -------- :: >>> arrow = Arrow(np.array([-1, -1, 0]), np.array([1, 1, 0]), buff=0) >>> scaled_arrow = arrow.scale(2) >>> np.round(scaled_arrow.get_start_and_end(), 8) + 0 array([[-2., -2., 0.], [ 2., 2., 0.]]) >>> arrow.tip.length == scaled_arrow.tip.length True Manually scaling the object using the default method :meth:`~.Mobject.scale` does not have the same properties:: >>> new_arrow = Arrow(np.array([-1, -1, 0]), np.array([1, 1, 0]), buff=0) >>> another_scaled_arrow = VMobject.scale(new_arrow, 2) >>> another_scaled_arrow.tip.length == arrow.tip.length False ",85,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def scale(self, factor, scale_tips=False, **kwargs): r if self.get_length() == 0: return self if scale_tips: super().scale(factor, **kwargs) self._set_stroke_width_from_length() return self has_tip = self.has_tip() has_start_tip = self.has_start_tip() if has_tip or has_start_tip: old_tips = self.pop_tips() super().scale(factor, **kwargs) self._set_stroke_width_from_length() if has_tip: self.add_tip(tip=old_tips[0]) if has_start_tip: self.add_tip(tip=old_tips[1], at_start=True) return self ``` ###Assistant : Scale an arrow, but keep stroke width and arrow tip size fixed. See Also -------- :meth:`~.Mobject.scale` Examples -------- :: >>> arrow = Arrow(np.array([-1, -1, 0]), np.array([1, 1, 0]), buff=0) >>> scaled_arrow = arrow.scale(2) >>> np.round(scaled_arrow.get_start_and_end(), 8) + 0 array([[-2., -2., 0.], [ 2., 2., 0.]]) >>> arrow.tip.length == scaled_arrow.tip.length True Manually scaling the object using the default method :meth:`~.Mobject.scale` does not have the same properties:: >>> new_arrow = Arrow(np.array([-1, -1, 0]), np.array([1, 1, 0]), buff=0) >>> another_scaled_arrow = VMobject.scale(new_arrow, 2) >>> another_scaled_arrow.tip.length == arrow.tip.length False " 159,"def effective(file, line, frame): possibles = Breakpoint.bplist[file, line] for b in possibles: if not b.enabled: continue if not checkfuncname(b, frame): continue # Count every hit when bp is enabled b.hits += 1 if not b.cond: # If unconditional, and ignoring go on to next, else break if b.ignore > 0: b.ignore -= 1 continue else: # breakpoint and marker that it's ok to delete if temporary return (b, True) else: # Conditional bp. # Ignore count applies only to those bpt hits where the # condition evaluates to true. try: val = eval(b.cond, frame.f_globals, frame.f_locals) if val: if b.ignore > 0: b.ignore -= 1 # continue else: return (b, True) # else: # continue except: # if eval fails, most conservative thing is to stop on # breakpoint regardless of ignore count. Don't delete # temporary, as another hint to user. return (b, False) return (None, None) # -------------------- testing -------------------- ","Determine which breakpoint for this file:line is to be acted upon. Called only if we know there is a breakpoint at this location. Return the breakpoint that was triggered and a boolean that indicates if it is ok to delete a temporary breakpoint. Return (None, None) if there is no matching breakpoint. ",52,151,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def effective(file, line, frame): possibles = Breakpoint.bplist[file, line] for b in possibles: if not b.enabled: continue if not checkfuncname(b, frame): continue # Count every hit when bp is enabled b.hits += 1 if not b.cond: # If unconditional, and ignoring go on to next, else break if b.ignore > 0: b.ignore -= 1 continue else: # breakpoint and marker that it's ok to delete if temporary return (b, True) else: # Conditional bp. # Ignore count applies only to those bpt hits where the # condition evaluates to true. try: val = eval(b.cond, frame.f_globals, frame.f_locals) if val: if b.ignore > 0: b.ignore -= 1 # continue else: return (b, True) # else: # continue except: # if eval fails, most conservative thing is to stop on # breakpoint regardless of ignore count. Don't delete # temporary, as another hint to user. return (b, False) return (None, None) # -------------------- testing -------------------- ``` ###Assistant : Determine which breakpoint for this file:line is to be acted upon. Called only if we know there is a breakpoint at this location. Return the breakpoint that was triggered and a boolean that indicates if it is ok to delete a temporary breakpoint. Return (None, None) if there is no matching breakpoint. " 160,"def _log_gauss_mass(a, b): a, b = jnp.array(a), jnp.array(b) a, b = jnp.broadcast_arrays(a, b) # Note: Docstring carried over from scipy # Calculations in right tail are inaccurate, so we'll exploit the # symmetry and work only in the left tail case_left = b <= 0 case_right = a > 0 case_central = ~(case_left | case_right) def mass_case_left(a, b): return _log_diff(log_ndtr(b), log_ndtr(a)) def mass_case_right(a, b): return mass_case_left(-b, -a) def mass_case_central(a, b): # Note: Docstring carried over from scipy # Previously, this was implemented as: # left_mass = mass_case_left(a, 0) # right_mass = mass_case_right(0, b) # return _log_sum(left_mass, right_mass) # Catastrophic cancellation occurs as np.exp(log_mass) approaches 1. # Correct for this with an alternative formulation. # We're not concerned with underflow here: if only one term # underflows, it was insignificant; if both terms underflow, # the result can't accurately be represented in logspace anyway # because sc.log1p(x) ~ x for small x. return jnp.log1p(-ndtr(a) - ndtr(-b)) out = jnp.select( [case_left, case_right, case_central], [mass_case_left(a, b), mass_case_right(a, b), mass_case_central(a, b)] ) return out @_wraps(osp_stats.truncnorm.logpdf, update_doc=False)",Log of Gaussian probability mass within an interval,8,172,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _log_gauss_mass(a, b): a, b = jnp.array(a), jnp.array(b) a, b = jnp.broadcast_arrays(a, b) # Note: Docstring carried over from scipy # Calculations in right tail are inaccurate, so we'll exploit the # symmetry and work only in the left tail case_left = b <= 0 case_right = a > 0 case_central = ~(case_left | case_right) def mass_case_left(a, b): return _log_diff(log_ndtr(b), log_ndtr(a)) def mass_case_right(a, b): return mass_case_left(-b, -a) def mass_case_central(a, b): # Note: Docstring carried over from scipy # Previously, this was implemented as: # left_mass = mass_case_left(a, 0) # right_mass = mass_case_right(0, b) # return _log_sum(left_mass, right_mass) # Catastrophic cancellation occurs as np.exp(log_mass) approaches 1. # Correct for this with an alternative formulation. # We're not concerned with underflow here: if only one term # underflows, it was insignificant; if both terms underflow, # the result can't accurately be represented in logspace anyway # because sc.log1p(x) ~ x for small x. return jnp.log1p(-ndtr(a) - ndtr(-b)) out = jnp.select( [case_left, case_right, case_central], [mass_case_left(a, b), mass_case_right(a, b), mass_case_central(a, b)] ) return out @_wraps(osp_stats.truncnorm.logpdf, update_doc=False) ``` ###Assistant : Log of Gaussian probability mass within an interval " 161,"def test_fx_validator_integration(tmpdir): not_supported = { None: ""`self.trainer` reference is not registered"", ""on_before_accelerator_backend_setup"": ""You can't"", ""setup"": ""You can't"", ""configure_sharded_model"": ""You can't"", ""on_configure_sharded_model"": ""You can't"", ""configure_optimizers"": ""You can't"", ""on_fit_start"": ""You can't"", ""on_pretrain_routine_start"": ""You can't"", ""on_pretrain_routine_end"": ""You can't"", ""on_train_dataloader"": ""You can't"", ""train_dataloader"": ""You can't"", ""on_val_dataloader"": ""You can't"", ""val_dataloader"": ""You can't"", ""on_validation_end"": ""You can't"", ""on_train_end"": ""You can't"", ""on_fit_end"": ""You can't"", ""teardown"": ""You can't"", ""on_sanity_check_start"": ""You can't"", ""on_sanity_check_end"": ""You can't"", ""prepare_data"": ""You can't"", ""configure_callbacks"": ""You can't"", ""on_validation_model_eval"": ""You can't"", ""on_validation_model_train"": ""You can't"", ""lr_scheduler_step"": ""You can't"", ""summarize"": ""not managed by the `Trainer"", } model = HookedModel(not_supported) with pytest.warns(UserWarning, match=not_supported[None]): model.log(""foo"", 1) callback = HookedCallback(not_supported) trainer = Trainer( default_root_dir=tmpdir, max_epochs=2, limit_train_batches=1, limit_val_batches=1, limit_test_batches=1, limit_predict_batches=1, callbacks=callback, ) with pytest.deprecated_call(match=""on_train_dataloader` is deprecated in v1.5""): trainer.fit(model) not_supported.update( { # `lightning_module` ref is now present from the `fit` call ""on_before_accelerator_backend_setup"": ""You can't"", ""on_test_dataloader"": ""You can't"", ""test_dataloader"": ""You can't"", ""on_test_model_eval"": ""You can't"", ""on_test_model_train"": ""You can't"", ""on_test_end"": ""You can't"", } ) with pytest.deprecated_call(match=""on_test_dataloader` is deprecated in v1.5""): trainer.test(model, verbose=False) not_supported.update({k: ""result collection is not registered yet"" for k in not_supported}) not_supported.update( { ""on_predict_dataloader"": ""result collection is not registered yet"", ""predict_dataloader"": ""result collection is not registered yet"", ""on_predict_model_eval"": ""result collection is not registered yet"", ""on_predict_start"": ""result collection is not registered yet"", ""on_predict_epoch_start"": ""result collection is not registered yet"", ""on_predict_batch_start"": ""result collection is not registered yet"", ""predict_step"": ""result collection is not registered yet"", ""on_predict_batch_end"": ""result collection is not registered yet"", ""on_predict_epoch_end"": ""result collection is not registered yet"", ""on_predict_end"": ""result collection is not registered yet"", } ) with pytest.deprecated_call(match=""on_predict_dataloader` is deprecated in v1.5""): trainer.predict(model) @RunIf(min_gpus=2)",Tries to log inside all `LightningModule` and `Callback` hooks to check any expected errors.,14,249,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_fx_validator_integration(tmpdir): not_supported = { None: ""`self.trainer` reference is not registered"", ""on_before_accelerator_backend_setup"": ""You can't"", ""setup"": ""You can't"", ""configure_sharded_model"": ""You can't"", ""on_configure_sharded_model"": ""You can't"", ""configure_optimizers"": ""You can't"", ""on_fit_start"": ""You can't"", ""on_pretrain_routine_start"": ""You can't"", ""on_pretrain_routine_end"": ""You can't"", ""on_train_dataloader"": ""You can't"", ""train_dataloader"": ""You can't"", ""on_val_dataloader"": ""You can't"", ""val_dataloader"": ""You can't"", ""on_validation_end"": ""You can't"", ""on_train_end"": ""You can't"", ""on_fit_end"": ""You can't"", ""teardown"": ""You can't"", ""on_sanity_check_start"": ""You can't"", ""on_sanity_check_end"": ""You can't"", ""prepare_data"": ""You can't"", ""configure_callbacks"": ""You can't"", ""on_validation_model_eval"": ""You can't"", ""on_validation_model_train"": ""You can't"", ""lr_scheduler_step"": ""You can't"", ""summarize"": ""not managed by the `Trainer"", } model = HookedModel(not_supported) with pytest.warns(UserWarning, match=not_supported[None]): model.log(""foo"", 1) callback = HookedCallback(not_supported) trainer = Trainer( default_root_dir=tmpdir, max_epochs=2, limit_train_batches=1, limit_val_batches=1, limit_test_batches=1, limit_predict_batches=1, callbacks=callback, ) with pytest.deprecated_call(match=""on_train_dataloader` is deprecated in v1.5""): trainer.fit(model) not_supported.update( { # `lightning_module` ref is now present from the `fit` call ""on_before_accelerator_backend_setup"": ""You can't"", ""on_test_dataloader"": ""You can't"", ""test_dataloader"": ""You can't"", ""on_test_model_eval"": ""You can't"", ""on_test_model_train"": ""You can't"", ""on_test_end"": ""You can't"", } ) with pytest.deprecated_call(match=""on_test_dataloader` is deprecated in v1.5""): trainer.test(model, verbose=False) not_supported.update({k: ""result collection is not registered yet"" for k in not_supported}) not_supported.update( { ""on_predict_dataloader"": ""result collection is not registered yet"", ""predict_dataloader"": ""result collection is not registered yet"", ""on_predict_model_eval"": ""result collection is not registered yet"", ""on_predict_start"": ""result collection is not registered yet"", ""on_predict_epoch_start"": ""result collection is not registered yet"", ""on_predict_batch_start"": ""result collection is not registered yet"", ""predict_step"": ""result collection is not registered yet"", ""on_predict_batch_end"": ""result collection is not registered yet"", ""on_predict_epoch_end"": ""result collection is not registered yet"", ""on_predict_end"": ""result collection is not registered yet"", } ) with pytest.deprecated_call(match=""on_predict_dataloader` is deprecated in v1.5""): trainer.predict(model) @RunIf(min_gpus=2) ``` ###Assistant : Tries to log inside all `LightningModule` and `Callback` hooks to check any expected errors. " 162,"def enable_application_mode() -> Callable[[], None]: terminal_in = sys.stdin terminal_out = sys.stdout current_console_mode_in = _get_console_mode(terminal_in) current_console_mode_out = _get_console_mode(terminal_out) ","Enable application mode. Returns: Callable[[], None]: A callable that will restore terminal to previous state. ",15,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def enable_application_mode() -> Callable[[], None]: terminal_in = sys.stdin terminal_out = sys.stdout current_console_mode_in = _get_console_mode(terminal_in) current_console_mode_out = _get_console_mode(terminal_out) ``` ###Assistant : Enable application mode. Returns: Callable[[], None]: A callable that will restore terminal to previous state. " 163,"def get_attributes(self) -> dict[str, str]: return _attributes( message=self.message, type=self.type, ) ",Return a dictionary of attributes for this instance.,8,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_attributes(self) -> dict[str, str]: return _attributes( message=self.message, type=self.type, ) ``` ###Assistant : Return a dictionary of attributes for this instance. " 164,"def check_handle_timedout(self) -> None: for trade in Trade.get_open_order_trades(): try: if not trade.open_order_id: continue order = self.exchange.fetch_order(trade.open_order_id, trade.pair) except (ExchangeError): logger.info('Cannot query order for %s due to %s', trade, traceback.format_exc()) continue fully_cancelled = self.update_trade_state(trade, trade.open_order_id, order) if (order['side'] == 'buy' and (order['status'] == 'open' or fully_cancelled) and ( fully_cancelled or self.strategy.ft_check_timed_out( 'buy', trade, order, datetime.now(timezone.utc)) )): self.handle_cancel_enter(trade, order, constants.CANCEL_REASON['TIMEOUT']) elif (order['side'] == 'sell' and (order['status'] == 'open' or fully_cancelled) and ( fully_cancelled or self.strategy.ft_check_timed_out( 'sell', trade, order, datetime.now(timezone.utc))) ): self.handle_cancel_exit(trade, order, constants.CANCEL_REASON['TIMEOUT']) canceled_count = trade.get_exit_order_count() max_timeouts = self.config.get('unfilledtimeout', {}).get('exit_timeout_count', 0) if max_timeouts > 0 and canceled_count >= max_timeouts: logger.warning(f'Emergencyselling trade {trade}, as the sell order ' f'timed out {max_timeouts} times.') try: self.execute_trade_exit( trade, order.get('price'), sell_reason=SellCheckTuple(sell_type=SellType.EMERGENCY_SELL)) except DependencyException as exception: logger.warning(f'Unable to emergency sell trade {trade.pair}: {exception}') "," Check if any orders are timed out and cancel if necessary :param timeoutvalue: Number of minutes until order is considered timed out :return: None ",24,125,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_handle_timedout(self) -> None: for trade in Trade.get_open_order_trades(): try: if not trade.open_order_id: continue order = self.exchange.fetch_order(trade.open_order_id, trade.pair) except (ExchangeError): logger.info('Cannot query order for %s due to %s', trade, traceback.format_exc()) continue fully_cancelled = self.update_trade_state(trade, trade.open_order_id, order) if (order['side'] == 'buy' and (order['status'] == 'open' or fully_cancelled) and ( fully_cancelled or self.strategy.ft_check_timed_out( 'buy', trade, order, datetime.now(timezone.utc)) )): self.handle_cancel_enter(trade, order, constants.CANCEL_REASON['TIMEOUT']) elif (order['side'] == 'sell' and (order['status'] == 'open' or fully_cancelled) and ( fully_cancelled or self.strategy.ft_check_timed_out( 'sell', trade, order, datetime.now(timezone.utc))) ): self.handle_cancel_exit(trade, order, constants.CANCEL_REASON['TIMEOUT']) canceled_count = trade.get_exit_order_count() max_timeouts = self.config.get('unfilledtimeout', {}).get('exit_timeout_count', 0) if max_timeouts > 0 and canceled_count >= max_timeouts: logger.warning(f'Emergencyselling trade {trade}, as the sell order ' f'timed out {max_timeouts} times.') try: self.execute_trade_exit( trade, order.get('price'), sell_reason=SellCheckTuple(sell_type=SellType.EMERGENCY_SELL)) except DependencyException as exception: logger.warning(f'Unable to emergency sell trade {trade.pair}: {exception}') ``` ###Assistant : Check if any orders are timed out and cancel if necessary :param timeoutvalue: Number of minutes until order is considered timed out :return: None " 165,"def test_user_misery_denominator(self): ProjectTransactionThreshold.objects.create( project=self.project, organization=self.project.organization, threshold=600, metric=TransactionMetric.LCP.value, ) lcps = [ 400, 400, 300, 3000, 3000, 3000, ] for idx, lcp in enumerate(lcps): data = self.load_data( timestamp=before_now(minutes=(10 + idx)), ) data[""event_id""] = f""{idx}"" * 32 data[""transaction""] = ""/misery/new/"" data[""user""] = {""email"": f""{idx}@example.com""} data[""measurements""] = { ""lcp"": {""value"": lcp}, } self.store_event(data, project_id=self.project.id) # Shouldn't count towards misery data = self.load_data(timestamp=self.ten_mins_ago, duration=timedelta(milliseconds=0)) data[""transaction""] = ""/misery/new/"" data[""user""] = {""email"": ""7@example.com""} data[""measurements""] = {} self.store_event(data, project_id=self.project.id) query = { ""field"": [ ""transaction"", ""user_misery()"", ], ""query"": ""event.type:transaction"", ""project"": [self.project.id], ""sort"": ""-user_misery"", } response = self.do_request( query, ) assert response.status_code == 200, response.content assert len(response.data[""data""]) == 1 data = response.data[""data""] # (3 frustrated + 5.8875) / (6 + 117.75) assert abs(data[0][""user_misery()""] - 0.071818) < 0.0001 ","This is to test against a bug where the denominator of misery(total unique users) was wrong This is because the total unique users for a LCP misery should only count users that have had a txn with lcp, and not count all transactions (ie. uniq_if(transaction has lcp) not just uniq()) ",50,119,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_user_misery_denominator(self): ProjectTransactionThreshold.objects.create( project=self.project, organization=self.project.organization, threshold=600, metric=TransactionMetric.LCP.value, ) lcps = [ 400, 400, 300, 3000, 3000, 3000, ] for idx, lcp in enumerate(lcps): data = self.load_data( timestamp=before_now(minutes=(10 + idx)), ) data[""event_id""] = f""{idx}"" * 32 data[""transaction""] = ""/misery/new/"" data[""user""] = {""email"": f""{idx}@example.com""} data[""measurements""] = { ""lcp"": {""value"": lcp}, } self.store_event(data, project_id=self.project.id) # Shouldn't count towards misery data = self.load_data(timestamp=self.ten_mins_ago, duration=timedelta(milliseconds=0)) data[""transaction""] = ""/misery/new/"" data[""user""] = {""email"": ""7@example.com""} data[""measurements""] = {} self.store_event(data, project_id=self.project.id) query = { ""field"": [ ""transaction"", ""user_misery()"", ], ""query"": ""event.type:transaction"", ""project"": [self.project.id], ""sort"": ""-user_misery"", } response = self.do_request( query, ) assert response.status_code == 200, response.content assert len(response.data[""data""]) == 1 data = response.data[""data""] # (3 frustrated + 5.8875) / (6 + 117.75) assert abs(data[0][""user_misery()""] - 0.071818) < 0.0001 ``` ###Assistant : This is to test against a bug where the denominator of misery(total unique users) was wrong This is because the total unique users for a LCP misery should only count users that have had a txn with lcp, and not count all transactions (ie. uniq_if(transaction has lcp) not just uniq()) " 166,"def _determine_base_url(document, page_url): # type: (HTMLElement, str) -> str for base in document.findall("".//base""): href = base.get(""href"") if href is not None: return href return page_url ","Determine the HTML document's base URL. This looks for a ```` tag in the HTML document. If present, its href attribute denotes the base URL of anchor tags in the document. If there is no such tag (or if it does not have a valid href attribute), the HTML file's URL is used as the base URL. :param document: An HTML document representation. The current implementation expects the result of ``html5lib.parse()``. :param page_url: The URL of the HTML document. ",79,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _determine_base_url(document, page_url): # type: (HTMLElement, str) -> str for base in document.findall("".//base""): href = base.get(""href"") if href is not None: return href return page_url ``` ###Assistant : Determine the HTML document's base URL. This looks for a ```` tag in the HTML document. If present, its href attribute denotes the base URL of anchor tags in the document. If there is no such tag (or if it does not have a valid href attribute), the HTML file's URL is used as the base URL. :param document: An HTML document representation. The current implementation expects the result of ``html5lib.parse()``. :param page_url: The URL of the HTML document. " 167,"def pairwise_distances(self, U, V): return self._distance_block.eval(feed_dict={self._features_batch1: U, self._features_batch2: V}) #---------------------------------------------------------------------------- ",Evaluate pairwise distances between two batches of feature vectors.,9,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def pairwise_distances(self, U, V): return self._distance_block.eval(feed_dict={self._features_batch1: U, self._features_batch2: V}) #---------------------------------------------------------------------------- ``` ###Assistant : Evaluate pairwise distances between two batches of feature vectors. " 168,"def upgrade(): op.create_table( 'project', sa.Column('id', sa.Integer(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('name', sa.String(), nullable=False), sa.Column('company_id', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'company_id', name='unique_integration_name_company_id') ) conn = op.get_bind() session = sa.orm.Session(bind=conn) project_record = db.Project(name='mindsdb') session.add(project_record) session.commit() with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.add_column(sa.Column('project_id', sa.Integer())) batch_op.create_foreign_key('fk_project_id', 'project', ['project_id'], ['id']) conn.execute(sa.sql.text(), project_id=project_record.id) with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.alter_column( 'project_id', existing_type=sa.INTEGER(), nullable=False ) with op.batch_alter_table('view', schema=None) as batch_op: batch_op.add_column(sa.Column('project_id', sa.Integer())) batch_op.create_foreign_key('fk_project_id', 'project', ['project_id'], ['id']) conn.execute(sa.sql.text(), project_id=project_record.id) with op.batch_alter_table('view', schema=None) as batch_op: batch_op.alter_column( 'project_id', existing_type=sa.INTEGER(), nullable=False ) views = conn.execute().fetchall() for row in views: conn.execute( text(), { 'name': f""{row['name']}_view"", 'view_id': row['id'] } ) view_integration = session.query(db.Integration).filter_by(name='views').first() if view_integration is not None: session.delete(view_integration) session.commit() "," update predictor set project_id = :project_id update view set project_id = :project_id select id, name from view where exists (select 1 from predictor where view.name = predictor.name) update view set name = :name where id = :view_id ",37,110,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def upgrade(): op.create_table( 'project', sa.Column('id', sa.Integer(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('name', sa.String(), nullable=False), sa.Column('company_id', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'company_id', name='unique_integration_name_company_id') ) conn = op.get_bind() session = sa.orm.Session(bind=conn) project_record = db.Project(name='mindsdb') session.add(project_record) session.commit() with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.add_column(sa.Column('project_id', sa.Integer())) batch_op.create_foreign_key('fk_project_id', 'project', ['project_id'], ['id']) conn.execute(sa.sql.text(), project_id=project_record.id) with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.alter_column( 'project_id', existing_type=sa.INTEGER(), nullable=False ) with op.batch_alter_table('view', schema=None) as batch_op: batch_op.add_column(sa.Column('project_id', sa.Integer())) batch_op.create_foreign_key('fk_project_id', 'project', ['project_id'], ['id']) conn.execute(sa.sql.text(), project_id=project_record.id) with op.batch_alter_table('view', schema=None) as batch_op: batch_op.alter_column( 'project_id', existing_type=sa.INTEGER(), nullable=False ) views = conn.execute().fetchall() for row in views: conn.execute( text(), { 'name': f""{row['name']}_view"", 'view_id': row['id'] } ) view_integration = session.query(db.Integration).filter_by(name='views').first() if view_integration is not None: session.delete(view_integration) session.commit() ``` ###Assistant : update predictor set project_id = :project_id update view set project_id = :project_id select id, name from view where exists (select 1 from predictor where view.name = predictor.name) update view set name = :name where id = :view_id " 169,"def clear(self) -> None: self._patterns: List[PatternType] = [] self.matcher: Matcher = Matcher(self.nlp.vocab, validate=self.validate) self.phrase_matcher: PhraseMatcher = PhraseMatcher( self.nlp.vocab, attr=self.phrase_matcher_attr, validate=self.validate, ) ","Reset all patterns. RETURNS: None DOCS: https://spacy.io/api/spanruler#clear ",7,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def clear(self) -> None: self._patterns: List[PatternType] = [] self.matcher: Matcher = Matcher(self.nlp.vocab, validate=self.validate) self.phrase_matcher: PhraseMatcher = PhraseMatcher( self.nlp.vocab, attr=self.phrase_matcher_attr, validate=self.validate, ) ``` ###Assistant : Reset all patterns. RETURNS: None DOCS: https://spacy.io/api/spanruler#clear " 170,"def _from_module(self, module, object): if module is None: return True elif inspect.getmodule(object) is not None: return module is inspect.getmodule(object) elif inspect.isfunction(object): return module.__dict__ is object.__globals__ elif inspect.ismethoddescriptor(object): if hasattr(object, '__objclass__'): obj_mod = object.__objclass__.__module__ elif hasattr(object, '__module__'): obj_mod = object.__module__ else: return True # [XX] no easy way to tell otherwise return module.__name__ == obj_mod elif inspect.isclass(object): return module.__name__ == object.__module__ elif hasattr(object, '__module__'): return module.__name__ == object.__module__ elif isinstance(object, property): return True # [XX] no way not be sure. else: raise ValueError(""object must be a class or function"") "," Return true if the given object is defined in the given module. ",12,88,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _from_module(self, module, object): if module is None: return True elif inspect.getmodule(object) is not None: return module is inspect.getmodule(object) elif inspect.isfunction(object): return module.__dict__ is object.__globals__ elif inspect.ismethoddescriptor(object): if hasattr(object, '__objclass__'): obj_mod = object.__objclass__.__module__ elif hasattr(object, '__module__'): obj_mod = object.__module__ else: return True # [XX] no easy way to tell otherwise return module.__name__ == obj_mod elif inspect.isclass(object): return module.__name__ == object.__module__ elif hasattr(object, '__module__'): return module.__name__ == object.__module__ elif isinstance(object, property): return True # [XX] no way not be sure. else: raise ValueError(""object must be a class or function"") ``` ###Assistant : Return true if the given object is defined in the given module. " 171,"def cut_ansi_string_into_parts(string_with_ansi_codes): color_codes_english = ['Black', 'Red', 'Green', 'Yellow', 'Blue', 'Magenta', 'Cyan', 'White', 'Reset'] color_codes = [""30m"", ""31m"", ""32m"", ""33m"", ""34m"", ""35m"", ""36m"", ""37m"", ""0m""] effect_codes_english = ['Italic', 'Underline', 'Slow Blink', 'Rapid Blink', 'Crossed Out'] effect_codes = [""3m"", ""4m"", ""5m"", ""6m"", ""9m""] background_codes = [""40m"", ""41m"", ""42m"", ""43m"", ""44m"", ""45m"", ""46m"", ""47m""] background_codes_english = [""Black"", ""Red"", ""Green"", ""Yellow"", ""Blue"", ""Magenta"", ""Cyan"", ""White""] ansi_codes = color_codes + effect_codes tuple_list = [] string_list = string_with_ansi_codes.split(""\u001b["") if (len(string_list)) == 1: string_list = string_with_ansi_codes.split(""\033["") for teststring in string_list: if teststring == string_with_ansi_codes: tuple_list += [(teststring, None, None, None)] break if any(code in teststring for code in ansi_codes): static_string = None color_used = None effect_used = None background_used = None for color in range(0, len(color_codes)): if teststring.startswith(color_codes[color]): working_thread = teststring.split(color_codes[color]) ansi_strip = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]') static_string = ansi_strip.sub('', working_thread[1]) color_used = color_codes_english[color] for effect in range(0, len(effect_codes)): if teststring.startswith(effect_codes[effect]): working_thread = teststring.split(effect_codes[effect]) ansi_strip = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]') static_string = ansi_strip.sub('', working_thread[1]) effect_used = effect_codes_english[effect] for background in range(0, len(background_codes)): if teststring.startswith(background_codes[background]): working_thread = teststring.split(background_codes[background]) ansi_strip = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]') static_string = ansi_strip.sub('', working_thread[1]) background_used = background_codes_english[background] try: if not tuple_list[len(tuple_list) - 1][0]: if not tuple_list[len(tuple_list) - 1][1] == None: color_used = tuple_list[len(tuple_list) - 1][1] if not tuple_list[len(tuple_list) - 1][2] == None: background_used = tuple_list[len(tuple_list) - 1][2] if not tuple_list[len(tuple_list) - 1][3] == None: effect_used = tuple_list[len(tuple_list) - 1][3] tuple_list += [(static_string, color_used, background_used, effect_used)] else: tuple_list += [(static_string, color_used, background_used, effect_used)] except Exception: tuple_list += [(static_string, color_used, background_used, effect_used)] new_tuple_list = [] for x in range(0, len(tuple_list)): if tuple_list[x][0]: new_tuple_list += [[tuple_list[x][0], tuple_list[x][1], tuple_list[x][2], tuple_list[x][3]]] return new_tuple_list "," Converts a string with ambedded ANSI Color Codes and parses it to create a list of tuples describing pieces of the input string. :param string_with_ansi_codes: :return: [(sty, str, str, str), ...] A list of tuples. Each tuple has format: (text, text color, background color, effects) ",45,258,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cut_ansi_string_into_parts(string_with_ansi_codes): color_codes_english = ['Black', 'Red', 'Green', 'Yellow', 'Blue', 'Magenta', 'Cyan', 'White', 'Reset'] color_codes = [""30m"", ""31m"", ""32m"", ""33m"", ""34m"", ""35m"", ""36m"", ""37m"", ""0m""] effect_codes_english = ['Italic', 'Underline', 'Slow Blink', 'Rapid Blink', 'Crossed Out'] effect_codes = [""3m"", ""4m"", ""5m"", ""6m"", ""9m""] background_codes = [""40m"", ""41m"", ""42m"", ""43m"", ""44m"", ""45m"", ""46m"", ""47m""] background_codes_english = [""Black"", ""Red"", ""Green"", ""Yellow"", ""Blue"", ""Magenta"", ""Cyan"", ""White""] ansi_codes = color_codes + effect_codes tuple_list = [] string_list = string_with_ansi_codes.split(""\u001b["") if (len(string_list)) == 1: string_list = string_with_ansi_codes.split(""\033["") for teststring in string_list: if teststring == string_with_ansi_codes: tuple_list += [(teststring, None, None, None)] break if any(code in teststring for code in ansi_codes): static_string = None color_used = None effect_used = None background_used = None for color in range(0, len(color_codes)): if teststring.startswith(color_codes[color]): working_thread = teststring.split(color_codes[color]) ansi_strip = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]') static_string = ansi_strip.sub('', working_thread[1]) color_used = color_codes_english[color] for effect in range(0, len(effect_codes)): if teststring.startswith(effect_codes[effect]): working_thread = teststring.split(effect_codes[effect]) ansi_strip = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]') static_string = ansi_strip.sub('', working_thread[1]) effect_used = effect_codes_english[effect] for background in range(0, len(background_codes)): if teststring.startswith(background_codes[background]): working_thread = teststring.split(background_codes[background]) ansi_strip = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]') static_string = ansi_strip.sub('', working_thread[1]) background_used = background_codes_english[background] try: if not tuple_list[len(tuple_list) - 1][0]: if not tuple_list[len(tuple_list) - 1][1] == None: color_used = tuple_list[len(tuple_list) - 1][1] if not tuple_list[len(tuple_list) - 1][2] == None: background_used = tuple_list[len(tuple_list) - 1][2] if not tuple_list[len(tuple_list) - 1][3] == None: effect_used = tuple_list[len(tuple_list) - 1][3] tuple_list += [(static_string, color_used, background_used, effect_used)] else: tuple_list += [(static_string, color_used, background_used, effect_used)] except Exception: tuple_list += [(static_string, color_used, background_used, effect_used)] new_tuple_list = [] for x in range(0, len(tuple_list)): if tuple_list[x][0]: new_tuple_list += [[tuple_list[x][0], tuple_list[x][1], tuple_list[x][2], tuple_list[x][3]]] return new_tuple_list ``` ###Assistant : Converts a string with ambedded ANSI Color Codes and parses it to create a list of tuples describing pieces of the input string. :param string_with_ansi_codes: :return: [(sty, str, str, str), ...] A list of tuples. Each tuple has format: (text, text color, background color, effects) " 172,"def interpolate(self, f=None, f_step=DEFAULT_STEP, pol_order=1, f_min=DEFAULT_F_MIN, f_max=DEFAULT_F_MAX): # Remove None values i = 0 while i < len(self.raw): if self.raw[i] is None: self.raw = np.delete(self.raw, i) self.frequency = np.delete(self.frequency, i) else: i += 1 # Interpolation functions keys = 'raw error error_smoothed equalization equalized_raw equalized_smoothed target'.split() interpolators = dict() log_f = np.log10(self.frequency) for key in keys: if len(self.__dict__[key]): interpolators[key] = InterpolatedUnivariateSpline(log_f, self.__dict__[key], k=pol_order) if f is None: self.frequency = self.generate_frequencies(f_min=f_min, f_max=f_max, f_step=f_step) else: self.frequency = np.array(f) # Prevent log10 from exploding by replacing zero frequency with small value zero_freq_fix = False if self.frequency[0] == 0: self.frequency[0] = 0.001 zero_freq_fix = True # Run interpolators log_f = np.log10(self.frequency) for key in keys: if len(self.__dict__[key]) and key in interpolators: self.__dict__[key] = interpolators[key](log_f) if zero_freq_fix: # Restore zero frequency self.frequency[0] = 0 # Everything but the interpolated data is affected by interpolating, reset them self.reset(**{key: False for key in keys}) ",Interpolates missing values from previous and next value. Resets all but raw data.,13,147,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def interpolate(self, f=None, f_step=DEFAULT_STEP, pol_order=1, f_min=DEFAULT_F_MIN, f_max=DEFAULT_F_MAX): # Remove None values i = 0 while i < len(self.raw): if self.raw[i] is None: self.raw = np.delete(self.raw, i) self.frequency = np.delete(self.frequency, i) else: i += 1 # Interpolation functions keys = 'raw error error_smoothed equalization equalized_raw equalized_smoothed target'.split() interpolators = dict() log_f = np.log10(self.frequency) for key in keys: if len(self.__dict__[key]): interpolators[key] = InterpolatedUnivariateSpline(log_f, self.__dict__[key], k=pol_order) if f is None: self.frequency = self.generate_frequencies(f_min=f_min, f_max=f_max, f_step=f_step) else: self.frequency = np.array(f) # Prevent log10 from exploding by replacing zero frequency with small value zero_freq_fix = False if self.frequency[0] == 0: self.frequency[0] = 0.001 zero_freq_fix = True # Run interpolators log_f = np.log10(self.frequency) for key in keys: if len(self.__dict__[key]) and key in interpolators: self.__dict__[key] = interpolators[key](log_f) if zero_freq_fix: # Restore zero frequency self.frequency[0] = 0 # Everything but the interpolated data is affected by interpolating, reset them self.reset(**{key: False for key in keys}) ``` ###Assistant : Interpolates missing values from previous and next value. Resets all but raw data. " 173,"def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() device = self.param_groups[0]['params'][0].device one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] trust_coeff = group['trust_coeff'] eps = group['eps'] for p in group['params']: if p.grad is None: continue grad = p.grad # apply LARS LR adaptation, LARC clipping, weight decay # ref: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py if weight_decay != 0 or group['always_adapt']: w_norm = p.norm(2.0) g_norm = grad.norm(2.0) trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps) # FIXME nested where required since logical and/or not working in PT XLA trust_ratio = torch.where( w_norm > 0, torch.where(g_norm > 0, trust_ratio, one_tensor), one_tensor, ) if group['trust_clip']: trust_ratio = torch.minimum(trust_ratio / group['lr'], one_tensor) grad.add_(p, alpha=weight_decay) grad.mul_(trust_ratio) # apply SGD update https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 if momentum != 0: param_state = self.state[p] if 'momentum_buffer' not in param_state: buf = param_state['momentum_buffer'] = torch.clone(grad).detach() else: buf = param_state['momentum_buffer'] buf.mul_(momentum).add_(grad, alpha=1. - dampening) if nesterov: grad = grad.add(buf, alpha=momentum) else: grad = buf p.add_(grad, alpha=-group['lr']) return loss","Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. ",19,182,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() device = self.param_groups[0]['params'][0].device one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] trust_coeff = group['trust_coeff'] eps = group['eps'] for p in group['params']: if p.grad is None: continue grad = p.grad # apply LARS LR adaptation, LARC clipping, weight decay # ref: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py if weight_decay != 0 or group['always_adapt']: w_norm = p.norm(2.0) g_norm = grad.norm(2.0) trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps) # FIXME nested where required since logical and/or not working in PT XLA trust_ratio = torch.where( w_norm > 0, torch.where(g_norm > 0, trust_ratio, one_tensor), one_tensor, ) if group['trust_clip']: trust_ratio = torch.minimum(trust_ratio / group['lr'], one_tensor) grad.add_(p, alpha=weight_decay) grad.mul_(trust_ratio) # apply SGD update https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 if momentum != 0: param_state = self.state[p] if 'momentum_buffer' not in param_state: buf = param_state['momentum_buffer'] = torch.clone(grad).detach() else: buf = param_state['momentum_buffer'] buf.mul_(momentum).add_(grad, alpha=1. - dampening) if nesterov: grad = grad.add(buf, alpha=momentum) else: grad = buf p.add_(grad, alpha=-group['lr']) return loss ``` ###Assistant : Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. " 174,"async def _async_force_resync(self, *_): self._forced_resync = None await self._async_force_refresh_state() ",Force a resync after an update since the hub may have stale state.,13,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def _async_force_resync(self, *_): self._forced_resync = None await self._async_force_refresh_state() ``` ###Assistant : Force a resync after an update since the hub may have stale state. " 175,"def set_color_codes(palette=""deep""): if palette == ""reset"": colors = [ (0., 0., 1.), (0., .5, 0.), (1., 0., 0.), (.75, 0., .75), (.75, .75, 0.), (0., .75, .75), (0., 0., 0.) ] elif not isinstance(palette, str): err = ""set_color_codes requires a named seaborn palette"" raise TypeError(err) elif palette in SEABORN_PALETTES: if not palette.endswith(""6""): palette = palette + ""6"" colors = SEABORN_PALETTES[palette] + [(.1, .1, .1)] else: err = f""Cannot set colors with palette '{palette}'"" raise ValueError(err) for code, color in zip(""bgrmyck"", colors): rgb = mpl.colors.colorConverter.to_rgb(color) mpl.colors.colorConverter.colors[code] = rgb mpl.colors.colorConverter.cache[code] = rgb ","Change how matplotlib color shorthands are interpreted. Calling this will change how shorthand codes like ""b"" or ""g"" are interpreted by matplotlib in subsequent plots. Parameters ---------- palette : {deep, muted, pastel, dark, bright, colorblind} Named seaborn palette to use as the source of colors. See Also -------- set : Color codes can be set through the high-level seaborn style manager. set_palette : Color codes can also be set through the function that sets the matplotlib color cycle. ",78,90,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_color_codes(palette=""deep""): if palette == ""reset"": colors = [ (0., 0., 1.), (0., .5, 0.), (1., 0., 0.), (.75, 0., .75), (.75, .75, 0.), (0., .75, .75), (0., 0., 0.) ] elif not isinstance(palette, str): err = ""set_color_codes requires a named seaborn palette"" raise TypeError(err) elif palette in SEABORN_PALETTES: if not palette.endswith(""6""): palette = palette + ""6"" colors = SEABORN_PALETTES[palette] + [(.1, .1, .1)] else: err = f""Cannot set colors with palette '{palette}'"" raise ValueError(err) for code, color in zip(""bgrmyck"", colors): rgb = mpl.colors.colorConverter.to_rgb(color) mpl.colors.colorConverter.colors[code] = rgb mpl.colors.colorConverter.cache[code] = rgb ``` ###Assistant : Change how matplotlib color shorthands are interpreted. Calling this will change how shorthand codes like ""b"" or ""g"" are interpreted by matplotlib in subsequent plots. Parameters ---------- palette : {deep, muted, pastel, dark, bright, colorblind} Named seaborn palette to use as the source of colors. See Also -------- set : Color codes can be set through the high-level seaborn style manager. set_palette : Color codes can also be set through the function that sets the matplotlib color cycle. " 176,"def test_mapped_literal_length_increase_at_runtime_adds_additional_tis(dag_maker, session): from airflow.models import Variable Variable.set(key='arg1', value=[1, 2, 3]) ","Test that when the length of mapped literal increases at runtime, additional ti is added",15,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_mapped_literal_length_increase_at_runtime_adds_additional_tis(dag_maker, session): from airflow.models import Variable Variable.set(key='arg1', value=[1, 2, 3]) ``` ###Assistant : Test that when the length of mapped literal increases at runtime, additional ti is added " 177,"def create_png_thumbnail_file(self, thumb_dir): thumb_file = Path(thumb_dir) / Path(f""{self.doc.pk:07}.png"") thumb_file.write_text(""this is a dummy png file"") return thumb_file "," Creates a dummy PNG thumbnail file in the given directory, based on the database Document ",15,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def create_png_thumbnail_file(self, thumb_dir): thumb_file = Path(thumb_dir) / Path(f""{self.doc.pk:07}.png"") thumb_file.write_text(""this is a dummy png file"") return thumb_file ``` ###Assistant : Creates a dummy PNG thumbnail file in the given directory, based on the database Document " 178,"def test_parse_html(self): assert validate(parse_html(), '"perfectly"valid
HTML').tag == ""html"" with self.assertRaises(ValueError) as cm: validate(parse_html(), None) assert_validationerror(cm.exception, ) "," ValidationError: Unable to parse HTML: can only parse strings (None) ",10,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_parse_html(self): assert validate(parse_html(), '"perfectly"valid
HTML').tag == ""html"" with self.assertRaises(ValueError) as cm: validate(parse_html(), None) assert_validationerror(cm.exception, ) ``` ###Assistant : ValidationError: Unable to parse HTML: can only parse strings (None) " 179,"def _check_feature_names_in(estimator, input_features=None, *, generate_names=True): feature_names_in_ = getattr(estimator, ""feature_names_in_"", None) n_features_in_ = getattr(estimator, ""n_features_in_"", None) if input_features is not None: input_features = np.asarray(input_features, dtype=object) if feature_names_in_ is not None and not np.array_equal( feature_names_in_, input_features ): raise ValueError(""input_features is not equal to feature_names_in_"") if n_features_in_ is not None and len(input_features) != n_features_in_: raise ValueError( ""input_features should have length equal to number of "" f""features ({n_features_in_}), got {len(input_features)}"" ) return input_features if feature_names_in_ is not None: return feature_names_in_ if not generate_names: return # Generates feature names if `n_features_in_` is defined if n_features_in_ is None: raise ValueError(""Unable to generate feature names without n_features_in_"") return np.asarray([f""x{i}"" for i in range(n_features_in_)], dtype=object) ","Check `input_features` and generate names if needed. Commonly used in :term:`get_feature_names_out`. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then names are generated: `[x0, x1, ..., x(n_features_in_)]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. generate_names : bool, default=True Whether to generate names when `input_features` is `None` and `estimator.feature_names_in_` is not defined. This is useful for transformers that validates `input_features` but do not require them in :term:`get_feature_names_out` e.g. `PCA`. Returns ------- feature_names_in : ndarray of str or `None` Feature names in. ",110,107,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_feature_names_in(estimator, input_features=None, *, generate_names=True): feature_names_in_ = getattr(estimator, ""feature_names_in_"", None) n_features_in_ = getattr(estimator, ""n_features_in_"", None) if input_features is not None: input_features = np.asarray(input_features, dtype=object) if feature_names_in_ is not None and not np.array_equal( feature_names_in_, input_features ): raise ValueError(""input_features is not equal to feature_names_in_"") if n_features_in_ is not None and len(input_features) != n_features_in_: raise ValueError( ""input_features should have length equal to number of "" f""features ({n_features_in_}), got {len(input_features)}"" ) return input_features if feature_names_in_ is not None: return feature_names_in_ if not generate_names: return # Generates feature names if `n_features_in_` is defined if n_features_in_ is None: raise ValueError(""Unable to generate feature names without n_features_in_"") return np.asarray([f""x{i}"" for i in range(n_features_in_)], dtype=object) ``` ###Assistant : Check `input_features` and generate names if needed. Commonly used in :term:`get_feature_names_out`. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then names are generated: `[x0, x1, ..., x(n_features_in_)]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. generate_names : bool, default=True Whether to generate names when `input_features` is `None` and `estimator.feature_names_in_` is not defined. This is useful for transformers that validates `input_features` but do not require them in :term:`get_feature_names_out` e.g. `PCA`. Returns ------- feature_names_in : ndarray of str or `None` Feature names in. " 180,"def all_pairs_bellman_ford_path(G, weight=""weight""): path = single_source_bellman_ford_path # TODO This can be trivially parallelized. for n in G: yield (n, path(G, n, weight=weight)) ","Compute shortest paths between all nodes in a weighted graph. Parameters ---------- G : NetworkX graph weight : string or function (default=""weight"") If this is a string, then edge weights will be accessed via the edge attribute with this key (that is, the weight of the edge joining `u` to `v` will be ``G.edges[u, v][weight]``). If no such edge attribute exists, the weight of the edge is assumed to be one. If this is a function, the weight of an edge is the value returned by the function. The function must accept exactly three positional arguments: the two endpoints of an edge and the dictionary of edge attributes for that edge. The function must return a number. Returns ------- distance : dictionary Dictionary, keyed by source and target, of shortest paths. Examples -------- >>> G = nx.path_graph(5) >>> path = dict(nx.all_pairs_bellman_ford_path(G)) >>> path[0][4] [0, 1, 2, 3, 4] Notes ----- Edge weight attributes must be numerical. Distances are calculated as sums of weighted edges traversed. See Also -------- floyd_warshall, all_pairs_dijkstra_path ",170,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def all_pairs_bellman_ford_path(G, weight=""weight""): path = single_source_bellman_ford_path # TODO This can be trivially parallelized. for n in G: yield (n, path(G, n, weight=weight)) ``` ###Assistant : Compute shortest paths between all nodes in a weighted graph. Parameters ---------- G : NetworkX graph weight : string or function (default=""weight"") If this is a string, then edge weights will be accessed via the edge attribute with this key (that is, the weight of the edge joining `u` to `v` will be ``G.edges[u, v][weight]``). If no such edge attribute exists, the weight of the edge is assumed to be one. If this is a function, the weight of an edge is the value returned by the function. The function must accept exactly three positional arguments: the two endpoints of an edge and the dictionary of edge attributes for that edge. The function must return a number. Returns ------- distance : dictionary Dictionary, keyed by source and target, of shortest paths. Examples -------- >>> G = nx.path_graph(5) >>> path = dict(nx.all_pairs_bellman_ford_path(G)) >>> path[0][4] [0, 1, 2, 3, 4] Notes ----- Edge weight attributes must be numerical. Distances are calculated as sums of weighted edges traversed. See Also -------- floyd_warshall, all_pairs_dijkstra_path " 181,"def update_cost_in_all_boms_in_test(): log = enqueue_update_cost() # create BOM Update Log while log.status != ""Completed"": resume_bom_cost_update_jobs() # run cron job until complete log.reload() return log "," Utility to run 'Update Cost' job in tests without Cron job until fully complete. ",14,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update_cost_in_all_boms_in_test(): log = enqueue_update_cost() # create BOM Update Log while log.status != ""Completed"": resume_bom_cost_update_jobs() # run cron job until complete log.reload() return log ``` ###Assistant : Utility to run 'Update Cost' job in tests without Cron job until fully complete. " 182,"def clear(self) -> None: self.row_count = 0 self._clear_caches() self._y_offsets.clear() self.data.clear() self.rows.clear() self._line_no = 0 self._require_update_dimensions = True self.refresh() ","Clear the table. Args: columns (bool, optional): Also clear the columns. Defaults to False. ",14,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def clear(self) -> None: self.row_count = 0 self._clear_caches() self._y_offsets.clear() self.data.clear() self.rows.clear() self._line_no = 0 self._require_update_dimensions = True self.refresh() ``` ###Assistant : Clear the table. Args: columns (bool, optional): Also clear the columns. Defaults to False. " 183,"def copyFile(source_path, dest_path): while 1: try: shutil.copyfile(source_path, dest_path) except PermissionError as e: if e.errno != errno.EACCES: raise general.warning(""Problem copying file %s:"" % e) try: reply = raw_input(""Retry? (YES/no) "") or ""yes"" except EOFError: reply = ""no"" if reply.upper() == ""YES"": continue raise break ","Improved version of shutil.copy This handles errors with a chance to correct them, e.g. on Windows, files might be locked by running program or virus checkers. ",26,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def copyFile(source_path, dest_path): while 1: try: shutil.copyfile(source_path, dest_path) except PermissionError as e: if e.errno != errno.EACCES: raise general.warning(""Problem copying file %s:"" % e) try: reply = raw_input(""Retry? (YES/no) "") or ""yes"" except EOFError: reply = ""no"" if reply.upper() == ""YES"": continue raise break ``` ###Assistant : Improved version of shutil.copy This handles errors with a chance to correct them, e.g. on Windows, files might be locked by running program or virus checkers. " 184,"def test_iforest(global_random_seed): X_train = np.array([[0, 1], [1, 2]]) X_test = np.array([[2, 1], [1, 1]]) grid = ParameterGrid( {""n_estimators"": [3], ""max_samples"": [0.5, 1.0, 3], ""bootstrap"": [True, False]} ) with ignore_warnings(): for params in grid: IsolationForest(random_state=global_random_seed, **params).fit( X_train ).predict(X_test) ",Check Isolation Forest for various parameter settings.,7,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_iforest(global_random_seed): X_train = np.array([[0, 1], [1, 2]]) X_test = np.array([[2, 1], [1, 1]]) grid = ParameterGrid( {""n_estimators"": [3], ""max_samples"": [0.5, 1.0, 3], ""bootstrap"": [True, False]} ) with ignore_warnings(): for params in grid: IsolationForest(random_state=global_random_seed, **params).fit( X_train ).predict(X_test) ``` ###Assistant : Check Isolation Forest for various parameter settings. " 185,"def invalidate_caches(self): _register(MetaPathFinder, machinery.BuiltinImporter, machinery.FrozenImporter, machinery.PathFinder, machinery.WindowsRegistryFinder) ","An optional method for clearing the finder's cache, if any. This method is used by importlib.invalidate_caches(). ",16,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def invalidate_caches(self): _register(MetaPathFinder, machinery.BuiltinImporter, machinery.FrozenImporter, machinery.PathFinder, machinery.WindowsRegistryFinder) ``` ###Assistant : An optional method for clearing the finder's cache, if any. This method is used by importlib.invalidate_caches(). " 186,"def reap(instance=None, status='failed', excluded_uuids=[]): me = instance if me is None: try: me = Instance.objects.me() except RuntimeError as e: logger.warning(f'Local instance is not registered, not running reaper: {e}') return workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id jobs = UnifiedJob.objects.filter( Q(status='running') & (Q(execution_node=me.hostname) | Q(controller_node=me.hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id) ).exclude(celery_task_id__in=excluded_uuids) for j in jobs: reap_job(j, status) "," Reap all jobs in running for this instance. ",8,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def reap(instance=None, status='failed', excluded_uuids=[]): me = instance if me is None: try: me = Instance.objects.me() except RuntimeError as e: logger.warning(f'Local instance is not registered, not running reaper: {e}') return workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id jobs = UnifiedJob.objects.filter( Q(status='running') & (Q(execution_node=me.hostname) | Q(controller_node=me.hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id) ).exclude(celery_task_id__in=excluded_uuids) for j in jobs: reap_job(j, status) ``` ###Assistant : Reap all jobs in running for this instance. " 187,"def closure(self, rel, depth=-1): from nltk.util import acyclic_breadth_first for synset in acyclic_breadth_first(self, rel, depth): if synset != self: yield synset from nltk.util import acyclic_depth_first as acyclic_tree from nltk.util import unweighted_minimum_spanning_tree as mst # Also add this shortcut? # from nltk.util import unweighted_minimum_spanning_digraph as umsd "," Return the transitive closure of source under the rel relationship, breadth-first, discarding cycles: >>> from nltk.corpus import wordnet as wn >>> computer = wn.synset('computer.n.01') >>> topic = lambda s:s.topic_domains() >>> print(list(computer.closure(topic))) [Synset('computer_science.n.01')] UserWarning: Discarded redundant search for Synset('computer.n.01') at depth 2 Include redundant paths (but only once), avoiding duplicate searches (from 'animal.n.01' to 'entity.n.01'): >>> dog = wn.synset('dog.n.01') >>> hyp = lambda s:s.hypernyms() >>> print(list(dog.closure(hyp))) [Synset('canine.n.02'), Synset('domestic_animal.n.01'), Synset('carnivore.n.01'),\ Synset('animal.n.01'), Synset('placental.n.01'), Synset('organism.n.01'),\ Synset('mammal.n.01'), Synset('living_thing.n.01'), Synset('vertebrate.n.01'),\ Synset('whole.n.02'), Synset('chordate.n.01'), Synset('object.n.01'),\ Synset('physical_entity.n.01'), Synset('entity.n.01')] UserWarning: Discarded redundant search for Synset('animal.n.01') at depth 7 ",88,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def closure(self, rel, depth=-1): from nltk.util import acyclic_breadth_first for synset in acyclic_breadth_first(self, rel, depth): if synset != self: yield synset from nltk.util import acyclic_depth_first as acyclic_tree from nltk.util import unweighted_minimum_spanning_tree as mst # Also add this shortcut? # from nltk.util import unweighted_minimum_spanning_digraph as umsd ``` ###Assistant : Return the transitive closure of source under the rel relationship, breadth-first, discarding cycles: >>> from nltk.corpus import wordnet as wn >>> computer = wn.synset('computer.n.01') >>> topic = lambda s:s.topic_domains() >>> print(list(computer.closure(topic))) [Synset('computer_science.n.01')] UserWarning: Discarded redundant search for Synset('computer.n.01') at depth 2 Include redundant paths (but only once), avoiding duplicate searches (from 'animal.n.01' to 'entity.n.01'): >>> dog = wn.synset('dog.n.01') >>> hyp = lambda s:s.hypernyms() >>> print(list(dog.closure(hyp))) [Synset('canine.n.02'), Synset('domestic_animal.n.01'), Synset('carnivore.n.01'),\ Synset('animal.n.01'), Synset('placental.n.01'), Synset('organism.n.01'),\ Synset('mammal.n.01'), Synset('living_thing.n.01'), Synset('vertebrate.n.01'),\ Synset('whole.n.02'), Synset('chordate.n.01'), Synset('object.n.01'),\ Synset('physical_entity.n.01'), Synset('entity.n.01')] UserWarning: Discarded redundant search for Synset('animal.n.01') at depth 7 " 188,"def _get_save_args(self) -> Tuple[int, ...]: filetype = self.config[""format""] args: Tuple[int, ...] = tuple() if filetype == ""jpg"" and self.config[""jpg_quality""] > 0: args = (cv2.IMWRITE_JPEG_QUALITY, # pylint: disable=no-member self.config[""jpg_quality""]) if filetype == ""png"" and self.config[""png_compress_level""] > -1: args = (cv2.IMWRITE_PNG_COMPRESSION, # pylint: disable=no-member self.config[""png_compress_level""]) logger.debug(args) return args "," Obtain the save parameters for the file format. Returns ------- tuple The OpenCV specific arguments for the selected file format ",20,46,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_save_args(self) -> Tuple[int, ...]: filetype = self.config[""format""] args: Tuple[int, ...] = tuple() if filetype == ""jpg"" and self.config[""jpg_quality""] > 0: args = (cv2.IMWRITE_JPEG_QUALITY, # pylint: disable=no-member self.config[""jpg_quality""]) if filetype == ""png"" and self.config[""png_compress_level""] > -1: args = (cv2.IMWRITE_PNG_COMPRESSION, # pylint: disable=no-member self.config[""png_compress_level""]) logger.debug(args) return args ``` ###Assistant : Obtain the save parameters for the file format. Returns ------- tuple The OpenCV specific arguments for the selected file format " 189,"def set_constrained_layout_pads(self, **kwargs): if isinstance(self.get_layout_engine(), ConstrainedLayoutEngine): self.get_layout_engine().set(**kwargs) "," Set padding for ``constrained_layout``. Tip: The parameters can be passed from a dictionary by using ``fig.set_constrained_layout(**pad_dict)``. See :doc:`/tutorials/intermediate/constrainedlayout_guide`. Parameters ---------- w_pad : float, default: :rc:`figure.constrained_layout.w_pad` Width padding in inches. This is the pad around Axes and is meant to make sure there is enough room for fonts to look good. Defaults to 3 pts = 0.04167 inches h_pad : float, default: :rc:`figure.constrained_layout.h_pad` Height padding in inches. Defaults to 3 pts. wspace : float, default: :rc:`figure.constrained_layout.wspace` Width padding between subplots, expressed as a fraction of the subplot width. The total padding ends up being w_pad + wspace. hspace : float, default: :rc:`figure.constrained_layout.hspace` Height padding between subplots, expressed as a fraction of the subplot width. The total padding ends up being h_pad + hspace. ",122,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_constrained_layout_pads(self, **kwargs): if isinstance(self.get_layout_engine(), ConstrainedLayoutEngine): self.get_layout_engine().set(**kwargs) ``` ###Assistant : Set padding for ``constrained_layout``. Tip: The parameters can be passed from a dictionary by using ``fig.set_constrained_layout(**pad_dict)``. See :doc:`/tutorials/intermediate/constrainedlayout_guide`. Parameters ---------- w_pad : float, default: :rc:`figure.constrained_layout.w_pad` Width padding in inches. This is the pad around Axes and is meant to make sure there is enough room for fonts to look good. Defaults to 3 pts = 0.04167 inches h_pad : float, default: :rc:`figure.constrained_layout.h_pad` Height padding in inches. Defaults to 3 pts. wspace : float, default: :rc:`figure.constrained_layout.wspace` Width padding between subplots, expressed as a fraction of the subplot width. The total padding ends up being w_pad + wspace. hspace : float, default: :rc:`figure.constrained_layout.hspace` Height padding between subplots, expressed as a fraction of the subplot width. The total padding ends up being h_pad + hspace. " 190,"def track_current_candle(self): if self.dd.current_candle > self.current_candle: self.get_corr_dataframes = True self.pair_it = 0 self.current_candle = self.dd.current_candle # Following methods which are overridden by user made prediction models. # See freqai/prediction_models/CatboostPredictionModel.py for an example. "," Checks if the latest candle appended by the datadrawer is equivalent to the latest candle seen by FreqAI. If not, it asks to refresh the cached corr_dfs, and resets the pair counter. ",32,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def track_current_candle(self): if self.dd.current_candle > self.current_candle: self.get_corr_dataframes = True self.pair_it = 0 self.current_candle = self.dd.current_candle # Following methods which are overridden by user made prediction models. # See freqai/prediction_models/CatboostPredictionModel.py for an example. ``` ###Assistant : Checks if the latest candle appended by the datadrawer is equivalent to the latest candle seen by FreqAI. If not, it asks to refresh the cached corr_dfs, and resets the pair counter. " 191,"def find(self, req): dist = self.by_key.get(req.key) if dist is not None and dist not in req: # XXX add more info raise VersionConflict(dist, req) return dist ","Find a distribution matching requirement `req` If there is an active distribution for the requested project, this returns it as long as it meets the version requirement specified by `req`. But, if there is an active distribution for the project and it does *not* meet the `req` requirement, ``VersionConflict`` is raised. If there is no active distribution for the requested project, ``None`` is returned. ",64,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find(self, req): dist = self.by_key.get(req.key) if dist is not None and dist not in req: # XXX add more info raise VersionConflict(dist, req) return dist ``` ###Assistant : Find a distribution matching requirement `req` If there is an active distribution for the requested project, this returns it as long as it meets the version requirement specified by `req`. But, if there is an active distribution for the project and it does *not* meet the `req` requirement, ``VersionConflict`` is raised. If there is no active distribution for the requested project, ``None`` is returned. " 192,"def current() -> dict | None: try: ContextStack.top(_FROZEN_CONTEXT_KEY) sample: Sample = {} for ctx in ContextStack.stack(_FROZEN_CONTEXT_KEY): if not isinstance(ctx, dict): raise TypeError(f'Expect architecture to be a dict, found: {ctx}') sample.update(ctx) return sample except NoContextError: return None ","Retrieve the current frozen context. If multiple layers have been found, they would be merged from bottom to top. Returns ------- The sample in frozen context. If no sample is found, return none. ",33,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def current() -> dict | None: try: ContextStack.top(_FROZEN_CONTEXT_KEY) sample: Sample = {} for ctx in ContextStack.stack(_FROZEN_CONTEXT_KEY): if not isinstance(ctx, dict): raise TypeError(f'Expect architecture to be a dict, found: {ctx}') sample.update(ctx) return sample except NoContextError: return None ``` ###Assistant : Retrieve the current frozen context. If multiple layers have been found, they would be merged from bottom to top. Returns ------- The sample in frozen context. If no sample is found, return none. " 193,"def test_rept_child() -> None: rows = 10_000 cols = 7 rept_row_count = 5 # these times and sizes are based on the above constants # and Madhavas MacBook Pro 2019 expected_rept_mem_size = 4.010650634765625 expected_rept_ser_size = 7.4926300048828125 macbook_pro_2019_ser_time = 0.18791760900000032 macbook_pro_2019_de_time = 0.1726598199999998 sept = make_sept(rows=rows, cols=cols) rept_rows = [sept.copy() for i in range(rept_row_count)] rept = REPT(rows=rept_rows) start = timeit.default_timer() ser = sy.serialize(rept, to_bytes=True) end = timeit.default_timer() time_ser = end - start start = timeit.default_timer() de = sy.deserialize(ser, from_bytes=True) end = timeit.default_timer() time_de = end - start assert rept == de current_rept_mem_size = size(rept) mem_diff = (current_rept_mem_size / expected_rept_mem_size * 100) - 100 current_rept_bytes_size = size(ser) bytes_diff = (current_rept_bytes_size / expected_rept_ser_size * 100) - 100 ser_time_diff = (time_ser / macbook_pro_2019_ser_time * 100) - 100 de_time_diff = (time_de / macbook_pro_2019_de_time * 100) - 100 print(""REPT Stats"") print(""=========="") print(""In-memory size of REPT"", size(rept)) print(""Serialized size of REPT"", size(ser)) print(f""Serializing {rept_row_count}x{rows}x{cols} took {time_ser} secs"") print(f""Deserializing {rept_row_count}x{rows}x{cols} took {time_de} secs"") print(""Current Results"") print(""==============="") print(f""In-memory size delta: {mem_diff}%"") print(f""Serialized size delta: {bytes_diff}%"") print(f""Serializing time delta: {ser_time_diff}%"") print(f""Deserializing time delta: {de_time_diff}%"") # we want to assert that our calculated values are smaller than the old values with # some tolerance assert (current_rept_mem_size - expected_rept_mem_size) < 1e-3 assert (current_rept_bytes_size - expected_rept_ser_size) < 2e-2 # TODO: make time benchmarks stable (probably can't run in parallel) # assert (time_ser - macbook_pro_2019_ser_time) < 2e-1 # assert (time_de - macbook_pro_2019_de_time) < 2e-1 ",We need to benchmark both the size and time to serialize and deserialize REPTs,14,230,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_rept_child() -> None: rows = 10_000 cols = 7 rept_row_count = 5 # these times and sizes are based on the above constants # and Madhavas MacBook Pro 2019 expected_rept_mem_size = 4.010650634765625 expected_rept_ser_size = 7.4926300048828125 macbook_pro_2019_ser_time = 0.18791760900000032 macbook_pro_2019_de_time = 0.1726598199999998 sept = make_sept(rows=rows, cols=cols) rept_rows = [sept.copy() for i in range(rept_row_count)] rept = REPT(rows=rept_rows) start = timeit.default_timer() ser = sy.serialize(rept, to_bytes=True) end = timeit.default_timer() time_ser = end - start start = timeit.default_timer() de = sy.deserialize(ser, from_bytes=True) end = timeit.default_timer() time_de = end - start assert rept == de current_rept_mem_size = size(rept) mem_diff = (current_rept_mem_size / expected_rept_mem_size * 100) - 100 current_rept_bytes_size = size(ser) bytes_diff = (current_rept_bytes_size / expected_rept_ser_size * 100) - 100 ser_time_diff = (time_ser / macbook_pro_2019_ser_time * 100) - 100 de_time_diff = (time_de / macbook_pro_2019_de_time * 100) - 100 print(""REPT Stats"") print(""=========="") print(""In-memory size of REPT"", size(rept)) print(""Serialized size of REPT"", size(ser)) print(f""Serializing {rept_row_count}x{rows}x{cols} took {time_ser} secs"") print(f""Deserializing {rept_row_count}x{rows}x{cols} took {time_de} secs"") print(""Current Results"") print(""==============="") print(f""In-memory size delta: {mem_diff}%"") print(f""Serialized size delta: {bytes_diff}%"") print(f""Serializing time delta: {ser_time_diff}%"") print(f""Deserializing time delta: {de_time_diff}%"") # we want to assert that our calculated values are smaller than the old values with # some tolerance assert (current_rept_mem_size - expected_rept_mem_size) < 1e-3 assert (current_rept_bytes_size - expected_rept_ser_size) < 2e-2 # TODO: make time benchmarks stable (probably can't run in parallel) # assert (time_ser - macbook_pro_2019_ser_time) < 2e-1 # assert (time_de - macbook_pro_2019_de_time) < 2e-1 ``` ###Assistant : We need to benchmark both the size and time to serialize and deserialize REPTs " 194,"def _ecm_one_factor(n, B1=10000, B2=100000, max_curve=200): n = as_int(n) if B1 % 2 != 0 or B2 % 2 != 0: raise ValueError(""The Bounds should be an even integer"") sieve.extend(B2) if isprime(n): return n from sympy.functions.elementary.miscellaneous import sqrt from sympy.polys.polytools import gcd curve = 0 D = int(sqrt(B2)) beta = [0]*(D + 1) S = [0]*(D + 1) k = 1 for p in sieve.primerange(1, B1 + 1): k *= pow(p, integer_log(B1, p)[0]) while(curve <= max_curve): curve += 1 #Suyama's Paramatrization sigma = rgen.randint(6, n - 1) u = (sigma*sigma - 5) % n v = (4*sigma) % n diff = v - u u_3 = pow(u, 3, n) try: C = (pow(diff, 3, n)*(3*u + v)*mod_inverse(4*u_3*v, n) - 2) % n except ValueError: #If the mod_inverse(4*u_3*v, n) doesn't exist return gcd(4*u_3*v, n) a24 = (C + 2)*mod_inverse(4, n) % n Q = Point(u_3, pow(v, 3, n), a24, n) Q = Q.mont_ladder(k) g = gcd(Q.z_cord, n) #Stage 1 factor if g != 1 and g != n: return g #Stage 1 failure. Q.z = 0, Try another curve elif g == n: continue #Stage 2 - Improved Standard Continuation S[1] = Q.double() S[2] = S[1].double() beta[1] = (S[1].x_cord*S[1].z_cord) % n beta[2] = (S[2].x_cord*S[2].z_cord) % n for d in range(3, D + 1): S[d] = S[d - 1].add(S[1], S[d - 2]) beta[d] = (S[d].x_cord*S[d].z_cord) % n g = 1 B = B1 - 1 T = Q.mont_ladder(B - 2*D) R = Q.mont_ladder(B) for r in range(B, B2, 2*D): alpha = (R.x_cord*R.z_cord) % n for q in sieve.primerange(r + 2, r + 2*D + 1): delta = (q - r) // 2 f = (R.x_cord - S[d].x_cord)*(R.z_cord + S[d].z_cord) -\ alpha + beta[delta] g = (g*f) % n #Swap T, R = R, R.add(S[D], T) g = gcd(n, g) #Stage 2 Factor found if g != 1 and g != n: return g #ECM failed, Increase the bounds raise ValueError(""Increase the bounds"") ","Returns one factor of n using Lenstra's 2 Stage Elliptic curve Factorization with Suyama's Parameterization. Here Montgomery arithmetic is used for fast computation of addition and doubling of points in elliptic curve. This ECM method considers elliptic curves in Montgomery form (E : b*y**2*z = x**3 + a*x**2*z + x*z**2) and involves elliptic curve operations (mod N), where the elements in Z are reduced (mod N). Since N is not a prime, E over FF(N) is not really an elliptic curve but we can still do point additions and doubling as if FF(N) was a field. Stage 1 : The basic algorithm involves taking a random point (P) on an elliptic curve in FF(N). The compute k*P using Montgomery ladder algorithm. Let q be an unknown factor of N. Then the order of the curve E, |E(FF(q))|, might be a smooth number that divides k. Then we have k = l * |E(FF(q))| for some l. For any point belonging to the curve E, |E(FF(q))|*P = O, hence k*P = l*|E(FF(q))|*P. Thus kP.z_cord = 0 (mod q), and the unknownn factor of N (q) can be recovered by taking gcd(kP.z_cord, N). Stage 2 : This is a continuation of Stage 1 if k*P != O. The idea utilize the fact that even if kP != 0, the value of k might miss just one large prime divisor of |E(FF(q))|. In this case we only need to compute the scalar multiplication by p to get p*k*P = O. Here a second bound B2 restrict the size of possible values of p. Parameters ========== n : Number to be Factored B1 : Stage 1 Bound B2 : Stage 2 Bound max_curve : Maximum number of curves generated References ========== .. [1] Carl Pomerance and Richard Crandall ""Prime Numbers: A Computational Perspective"" (2nd Ed.), page 344 ",303,319,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _ecm_one_factor(n, B1=10000, B2=100000, max_curve=200): n = as_int(n) if B1 % 2 != 0 or B2 % 2 != 0: raise ValueError(""The Bounds should be an even integer"") sieve.extend(B2) if isprime(n): return n from sympy.functions.elementary.miscellaneous import sqrt from sympy.polys.polytools import gcd curve = 0 D = int(sqrt(B2)) beta = [0]*(D + 1) S = [0]*(D + 1) k = 1 for p in sieve.primerange(1, B1 + 1): k *= pow(p, integer_log(B1, p)[0]) while(curve <= max_curve): curve += 1 #Suyama's Paramatrization sigma = rgen.randint(6, n - 1) u = (sigma*sigma - 5) % n v = (4*sigma) % n diff = v - u u_3 = pow(u, 3, n) try: C = (pow(diff, 3, n)*(3*u + v)*mod_inverse(4*u_3*v, n) - 2) % n except ValueError: #If the mod_inverse(4*u_3*v, n) doesn't exist return gcd(4*u_3*v, n) a24 = (C + 2)*mod_inverse(4, n) % n Q = Point(u_3, pow(v, 3, n), a24, n) Q = Q.mont_ladder(k) g = gcd(Q.z_cord, n) #Stage 1 factor if g != 1 and g != n: return g #Stage 1 failure. Q.z = 0, Try another curve elif g == n: continue #Stage 2 - Improved Standard Continuation S[1] = Q.double() S[2] = S[1].double() beta[1] = (S[1].x_cord*S[1].z_cord) % n beta[2] = (S[2].x_cord*S[2].z_cord) % n for d in range(3, D + 1): S[d] = S[d - 1].add(S[1], S[d - 2]) beta[d] = (S[d].x_cord*S[d].z_cord) % n g = 1 B = B1 - 1 T = Q.mont_ladder(B - 2*D) R = Q.mont_ladder(B) for r in range(B, B2, 2*D): alpha = (R.x_cord*R.z_cord) % n for q in sieve.primerange(r + 2, r + 2*D + 1): delta = (q - r) // 2 f = (R.x_cord - S[d].x_cord)*(R.z_cord + S[d].z_cord) -\ alpha + beta[delta] g = (g*f) % n #Swap T, R = R, R.add(S[D], T) g = gcd(n, g) #Stage 2 Factor found if g != 1 and g != n: return g #ECM failed, Increase the bounds raise ValueError(""Increase the bounds"") ``` ###Assistant : Returns one factor of n using Lenstra's 2 Stage Elliptic curve Factorization with Suyama's Parameterization. Here Montgomery arithmetic is used for fast computation of addition and doubling of points in elliptic curve. This ECM method considers elliptic curves in Montgomery form (E : b*y**2*z = x**3 + a*x**2*z + x*z**2) and involves elliptic curve operations (mod N), where the elements in Z are reduced (mod N). Since N is not a prime, E over FF(N) is not really an elliptic curve but we can still do point additions and doubling as if FF(N) was a field. Stage 1 : The basic algorithm involves taking a random point (P) on an elliptic curve in FF(N). The compute k*P using Montgomery ladder algorithm. Let q be an unknown factor of N. Then the order of the curve E, |E(FF(q))|, might be a smooth number that divides k. Then we have k = l * |E(FF(q))| for some l. For any point belonging to the curve E, |E(FF(q))|*P = O, hence k*P = l*|E(FF(q))|*P. Thus kP.z_cord = 0 (mod q), and the unknownn factor of N (q) can be recovered by taking gcd(kP.z_cord, N). Stage 2 : This is a continuation of Stage 1 if k*P != O. The idea utilize the fact that even if kP != 0, the value of k might miss just one large prime divisor of |E(FF(q))|. In this case we only need to compute the scalar multiplication by p to get p*k*P = O. Here a second bound B2 restrict the size of possible values of p. Parameters ========== n : Number to be Factored B1 : Stage 1 Bound B2 : Stage 2 Bound max_curve : Maximum number of curves generated References ========== .. [1] Carl Pomerance and Richard Crandall ""Prime Numbers: A Computational Perspective"" (2nd Ed.), page 344 " 195,"async def test_unique_id(hass): await setup_test_entity( hass, { ""unique"": { ""command_open"": ""echo open"", ""command_close"": ""echo close"", ""command_stop"": ""echo stop"", ""unique_id"": ""unique"", }, ""not_unique_1"": { ""command_open"": ""echo open"", ""command_close"": ""echo close"", ""command_stop"": ""echo stop"", ""unique_id"": ""not-so-unique-anymore"", }, ""not_unique_2"": { ""command_open"": ""echo open"", ""command_close"": ""echo close"", ""command_stop"": ""echo stop"", ""unique_id"": ""not-so-unique-anymore"", }, }, ) assert len(hass.states.async_all()) == 2 ent_reg = entity_registry.async_get(hass) assert len(ent_reg.entities) == 2 assert ent_reg.async_get_entity_id(""cover"", ""command_line"", ""unique"") is not None assert ( ent_reg.async_get_entity_id(""cover"", ""command_line"", ""not-so-unique-anymore"") is not None ) ",Test unique_id option and if it only creates one cover per id.,12,78,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_unique_id(hass): await setup_test_entity( hass, { ""unique"": { ""command_open"": ""echo open"", ""command_close"": ""echo close"", ""command_stop"": ""echo stop"", ""unique_id"": ""unique"", }, ""not_unique_1"": { ""command_open"": ""echo open"", ""command_close"": ""echo close"", ""command_stop"": ""echo stop"", ""unique_id"": ""not-so-unique-anymore"", }, ""not_unique_2"": { ""command_open"": ""echo open"", ""command_close"": ""echo close"", ""command_stop"": ""echo stop"", ""unique_id"": ""not-so-unique-anymore"", }, }, ) assert len(hass.states.async_all()) == 2 ent_reg = entity_registry.async_get(hass) assert len(ent_reg.entities) == 2 assert ent_reg.async_get_entity_id(""cover"", ""command_line"", ""unique"") is not None assert ( ent_reg.async_get_entity_id(""cover"", ""command_line"", ""not-so-unique-anymore"") is not None ) ``` ###Assistant : Test unique_id option and if it only creates one cover per id. " 196,"def mock_ssl_context(): with patch( ""homeassistant.components.mqtt.config_flow.SSLContext"" ) as mock_context, patch( ""homeassistant.components.mqtt.config_flow.load_pem_private_key"" ) as mock_key_check, patch( ""homeassistant.components.mqtt.config_flow.load_pem_x509_certificate"" ) as mock_cert_check: yield { ""context"": mock_context, ""load_pem_x509_certificate"": mock_cert_check, ""load_pem_private_key"": mock_key_check, } @pytest.fixture",Mock the SSL context used to load the cert chain and to load verify locations.,15,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mock_ssl_context(): with patch( ""homeassistant.components.mqtt.config_flow.SSLContext"" ) as mock_context, patch( ""homeassistant.components.mqtt.config_flow.load_pem_private_key"" ) as mock_key_check, patch( ""homeassistant.components.mqtt.config_flow.load_pem_x509_certificate"" ) as mock_cert_check: yield { ""context"": mock_context, ""load_pem_x509_certificate"": mock_cert_check, ""load_pem_private_key"": mock_key_check, } @pytest.fixture ``` ###Assistant : Mock the SSL context used to load the cert chain and to load verify locations. " 197,"def test_task_states_for_dag_run_when_dag_run_not_exists(self): with pytest.raises(DagRunNotFound): default_date2 = timezone.datetime(2016, 1, 9) task_command.task_states_for_dag_run( self.parser.parse_args( [ 'tasks', 'states-for-dag-run', 'not_exists_dag', default_date2.isoformat(), '--output', ""json"", ] ) ) "," task_states_for_dag_run should return an AirflowException when invalid dag id is passed ",11,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_task_states_for_dag_run_when_dag_run_not_exists(self): with pytest.raises(DagRunNotFound): default_date2 = timezone.datetime(2016, 1, 9) task_command.task_states_for_dag_run( self.parser.parse_args( [ 'tasks', 'states-for-dag-run', 'not_exists_dag', default_date2.isoformat(), '--output', ""json"", ] ) ) ``` ###Assistant : task_states_for_dag_run should return an AirflowException when invalid dag id is passed " 198,"def test_title_present(self): response = self.get(4) self.assertContains(response, ""Christmas"", 3) "," The page title should appear three times. Once in the header, and two times in the field listing (as the actual title and as the draft title) ",27,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_title_present(self): response = self.get(4) self.assertContains(response, ""Christmas"", 3) ``` ###Assistant : The page title should appear three times. Once in the header, and two times in the field listing (as the actual title and as the draft title) " 199,"def _set_position(self, pos, which='both'): if not isinstance(pos, mtransforms.BboxBase): pos = mtransforms.Bbox.from_bounds(*pos) for ax in self._twinned_axes.get_siblings(self): if which in ('both', 'active'): ax._position.set(pos) if which in ('both', 'original'): ax._originalPosition.set(pos) self.stale = True "," Private version of set_position. Call this internally to get the same functionality of `set_position`, but not to take the axis out of the constrained_layout hierarchy. ",25,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _set_position(self, pos, which='both'): if not isinstance(pos, mtransforms.BboxBase): pos = mtransforms.Bbox.from_bounds(*pos) for ax in self._twinned_axes.get_siblings(self): if which in ('both', 'active'): ax._position.set(pos) if which in ('both', 'original'): ax._originalPosition.set(pos) self.stale = True ``` ###Assistant : Private version of set_position. Call this internally to get the same functionality of `set_position`, but not to take the axis out of the constrained_layout hierarchy. " 200,"def hvac_modes(self) -> list[HVACMode]: hvac_state = HVAC_MODES_MAPPING[self._climate.get_hvac_state()] return [HVACMode.AUTO, hvac_state] ","Return the list of available hvac operation modes. HEAT and COOL mode are exclusive. End user has to enable a mode manually within the Somfy application. So only one mode can be displayed. Auto mode is a scheduler. ",38,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def hvac_modes(self) -> list[HVACMode]: hvac_state = HVAC_MODES_MAPPING[self._climate.get_hvac_state()] return [HVACMode.AUTO, hvac_state] ``` ###Assistant : Return the list of available hvac operation modes. HEAT and COOL mode are exclusive. End user has to enable a mode manually within the Somfy application. So only one mode can be displayed. Auto mode is a scheduler. " 201,"def _assert_expected_task_states(self, dagrun, expected_states): tis = dagrun.get_task_instances() for ti in tis: try: expected_state = expected_states[ti.task_id] except KeyError: raise ValueError(f""Invalid task id {ti.task_id} found!"") else: assert ti.state == expected_state all_downstream_skipped_states = { ""short_circuit"": State.SUCCESS, ""op1"": State.SKIPPED, ""op2"": State.SKIPPED, } all_success_states = {""short_circuit"": State.SUCCESS, ""op1"": State.SUCCESS, ""op2"": State.SUCCESS} ",Helper function that asserts `TaskInstances` of a given `task_id` are in a given state.,14,46,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _assert_expected_task_states(self, dagrun, expected_states): tis = dagrun.get_task_instances() for ti in tis: try: expected_state = expected_states[ti.task_id] except KeyError: raise ValueError(f""Invalid task id {ti.task_id} found!"") else: assert ti.state == expected_state all_downstream_skipped_states = { ""short_circuit"": State.SUCCESS, ""op1"": State.SKIPPED, ""op2"": State.SKIPPED, } all_success_states = {""short_circuit"": State.SUCCESS, ""op1"": State.SUCCESS, ""op2"": State.SUCCESS} ``` ###Assistant : Helper function that asserts `TaskInstances` of a given `task_id` are in a given state. " 202,"def set_customer_info(fieldname, customer, value=""""): if fieldname == ""loyalty_program"": frappe.db.set_value(""Customer"", customer, ""loyalty_program"", value) contact = frappe.get_cached_value(""Customer"", customer, ""customer_primary_contact"") if not contact: contact = frappe.db.sql( , (customer), as_dict=1, ) contact = contact[0].get(""parent"") if contact else None if not contact: new_contact = frappe.new_doc(""Contact"") new_contact.is_primary_contact = 1 new_contact.first_name = customer new_contact.set(""links"", [{""link_doctype"": ""Customer"", ""link_name"": customer}]) new_contact.save() contact = new_contact.name frappe.db.set_value(""Customer"", customer, ""customer_primary_contact"", contact) contact_doc = frappe.get_doc(""Contact"", contact) if fieldname == ""email_id"": contact_doc.set(""email_ids"", [{""email_id"": value, ""is_primary"": 1}]) frappe.db.set_value(""Customer"", customer, ""email_id"", value) elif fieldname == ""mobile_no"": contact_doc.set(""phone_nos"", [{""phone"": value, ""is_primary_mobile_no"": 1}]) frappe.db.set_value(""Customer"", customer, ""mobile_no"", value) contact_doc.save() @frappe.whitelist()"," SELECT parent FROM `tabDynamic Link` WHERE parenttype = 'Contact' AND parentfield = 'links' AND link_doctype = 'Customer' AND link_name = %s ",21,91,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_customer_info(fieldname, customer, value=""""): if fieldname == ""loyalty_program"": frappe.db.set_value(""Customer"", customer, ""loyalty_program"", value) contact = frappe.get_cached_value(""Customer"", customer, ""customer_primary_contact"") if not contact: contact = frappe.db.sql( , (customer), as_dict=1, ) contact = contact[0].get(""parent"") if contact else None if not contact: new_contact = frappe.new_doc(""Contact"") new_contact.is_primary_contact = 1 new_contact.first_name = customer new_contact.set(""links"", [{""link_doctype"": ""Customer"", ""link_name"": customer}]) new_contact.save() contact = new_contact.name frappe.db.set_value(""Customer"", customer, ""customer_primary_contact"", contact) contact_doc = frappe.get_doc(""Contact"", contact) if fieldname == ""email_id"": contact_doc.set(""email_ids"", [{""email_id"": value, ""is_primary"": 1}]) frappe.db.set_value(""Customer"", customer, ""email_id"", value) elif fieldname == ""mobile_no"": contact_doc.set(""phone_nos"", [{""phone"": value, ""is_primary_mobile_no"": 1}]) frappe.db.set_value(""Customer"", customer, ""mobile_no"", value) contact_doc.save() @frappe.whitelist() ``` ###Assistant : SELECT parent FROM `tabDynamic Link` WHERE parenttype = 'Contact' AND parentfield = 'links' AND link_doctype = 'Customer' AND link_name = %s " 203,"def test_unpublish_not_include_children_view_post(self): # Post to the unpublish page response = self.client.post( reverse(""wagtailadmin_pages:unpublish"", args=(self.test_page.id,)), {} ) # Should be redirected to explorer page self.assertRedirects( response, reverse(""wagtailadmin_explore"", args=(self.root_page.id,)) ) # Check that the page was unpublished self.assertFalse(SimplePage.objects.get(id=self.test_page.id).live) # Check that the descendant pages were not unpublished self.assertTrue(SimplePage.objects.get(id=self.test_child_page.id).live) self.assertTrue(SimplePage.objects.get(id=self.test_another_child_page.id).live) "," This posts to the unpublish view and checks that the page was unpublished but its descendants were not ",18,46,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_unpublish_not_include_children_view_post(self): # Post to the unpublish page response = self.client.post( reverse(""wagtailadmin_pages:unpublish"", args=(self.test_page.id,)), {} ) # Should be redirected to explorer page self.assertRedirects( response, reverse(""wagtailadmin_explore"", args=(self.root_page.id,)) ) # Check that the page was unpublished self.assertFalse(SimplePage.objects.get(id=self.test_page.id).live) # Check that the descendant pages were not unpublished self.assertTrue(SimplePage.objects.get(id=self.test_child_page.id).live) self.assertTrue(SimplePage.objects.get(id=self.test_another_child_page.id).live) ``` ###Assistant : This posts to the unpublish view and checks that the page was unpublished but its descendants were not " 204,"def _get_data_from_filepath(self, filepath_or_buffer): # if it is a string but the file does not exist, it might be a JSON string filepath_or_buffer = stringify_path(filepath_or_buffer) if ( not isinstance(filepath_or_buffer, str) or is_url(filepath_or_buffer) or is_fsspec_url(filepath_or_buffer) or file_exists(filepath_or_buffer) ): self.handles = get_handle( filepath_or_buffer, ""r"", encoding=self.encoding, compression=self.compression, storage_options=self.storage_options, errors=self.encoding_errors, ) filepath_or_buffer = self.handles.handle elif ( isinstance(filepath_or_buffer, str) and filepath_or_buffer.lower().endswith( ("".json"",) + tuple(f"".json{c}"" for c in _extension_to_compression) ) and not file_exists(filepath_or_buffer) ): raise FileNotFoundError(f""File {filepath_or_buffer} does not exist"") return filepath_or_buffer "," The function read_json accepts three input types: 1. filepath (string-like) 2. file-like object (e.g. open file object, StringIO) 3. JSON string This method turns (1) into (2) to simplify the rest of the processing. It returns input types (2) and (3) unchanged. It raises FileNotFoundError if the input is a string ending in one of .json, .json.gz, .json.bz2, etc. but no such file exists. ",64,75,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_data_from_filepath(self, filepath_or_buffer): # if it is a string but the file does not exist, it might be a JSON string filepath_or_buffer = stringify_path(filepath_or_buffer) if ( not isinstance(filepath_or_buffer, str) or is_url(filepath_or_buffer) or is_fsspec_url(filepath_or_buffer) or file_exists(filepath_or_buffer) ): self.handles = get_handle( filepath_or_buffer, ""r"", encoding=self.encoding, compression=self.compression, storage_options=self.storage_options, errors=self.encoding_errors, ) filepath_or_buffer = self.handles.handle elif ( isinstance(filepath_or_buffer, str) and filepath_or_buffer.lower().endswith( ("".json"",) + tuple(f"".json{c}"" for c in _extension_to_compression) ) and not file_exists(filepath_or_buffer) ): raise FileNotFoundError(f""File {filepath_or_buffer} does not exist"") return filepath_or_buffer ``` ###Assistant : The function read_json accepts three input types: 1. filepath (string-like) 2. file-like object (e.g. open file object, StringIO) 3. JSON string This method turns (1) into (2) to simplify the rest of the processing. It returns input types (2) and (3) unchanged. It raises FileNotFoundError if the input is a string ending in one of .json, .json.gz, .json.bz2, etc. but no such file exists. " 205,"def _run_pip(args, additional_paths=None): # Run the bootstraping in a subprocess to avoid leaking any state that happens # after pip has executed. Particulary, this avoids the case when pip holds onto # the files in *additional_paths*, preventing us to remove them at the end of the # invocation. code = f return subprocess.run([sys.executable, '-W', 'ignore::DeprecationWarning', ""-c"", code], check=True).returncode "," import runpy import sys sys.path = {additional_paths or []} + sys.path sys.argv[1:] = {args} runpy.run_module(""pip"", run_name=""__main__"", alter_sys=True) ",17,58,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _run_pip(args, additional_paths=None): # Run the bootstraping in a subprocess to avoid leaking any state that happens # after pip has executed. Particulary, this avoids the case when pip holds onto # the files in *additional_paths*, preventing us to remove them at the end of the # invocation. code = f return subprocess.run([sys.executable, '-W', 'ignore::DeprecationWarning', ""-c"", code], check=True).returncode ``` ###Assistant : import runpy import sys sys.path = {additional_paths or []} + sys.path sys.argv[1:] = {args} runpy.run_module(""pip"", run_name=""__main__"", alter_sys=True) " 206,"def test_pprint_heap_allocated_type(): module_name = ""xxlimited"" if sys.version_info < (3, 10) else ""xxlimited_35"" expected_output = ( ""xxlimited.Null"" if sys.version_info < (3, 11) else ""xxlimited_35.Null"" ) xxlimited = pytest.importorskip(module_name) output = pretty.pretty(xxlimited.Null) assert output == expected_output "," Test that pprint works for heap allocated types. ",8,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_pprint_heap_allocated_type(): module_name = ""xxlimited"" if sys.version_info < (3, 10) else ""xxlimited_35"" expected_output = ( ""xxlimited.Null"" if sys.version_info < (3, 11) else ""xxlimited_35.Null"" ) xxlimited = pytest.importorskip(module_name) output = pretty.pretty(xxlimited.Null) assert output == expected_output ``` ###Assistant : Test that pprint works for heap allocated types. " 207,"def test_orderby_percentile_with_many_fields_transactions_unsupported_fields(self): response = self.get_response( self.organization.slug, field=[ ""p50(sentry.transactions.measurements.lcp)"", ""sum(user_misery)"", ], statsPeriod=""1h"", interval=""1h"", datasource=""snuba"", groupBy=[""project_id"", ""transaction""], orderBy=""p50(sentry.transactions.measurements.lcp)"", ) assert response.status_code == 400 assert ( response.json()[""detail""] == ""Multi-field select order by queries is not supported for metric user_misery"" ) "," Test that contains a field in the `select` that is performance related but currently not supported should return a 400 ",20,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_orderby_percentile_with_many_fields_transactions_unsupported_fields(self): response = self.get_response( self.organization.slug, field=[ ""p50(sentry.transactions.measurements.lcp)"", ""sum(user_misery)"", ], statsPeriod=""1h"", interval=""1h"", datasource=""snuba"", groupBy=[""project_id"", ""transaction""], orderBy=""p50(sentry.transactions.measurements.lcp)"", ) assert response.status_code == 400 assert ( response.json()[""detail""] == ""Multi-field select order by queries is not supported for metric user_misery"" ) ``` ###Assistant : Test that contains a field in the `select` that is performance related but currently not supported should return a 400 " 208,"def generate_dependency_map(integration_targets): # type: (t.List[IntegrationTarget]) -> t.Dict[str, t.Set[IntegrationTarget]] targets_dict = dict((target.name, target) for target in integration_targets) target_dependencies = analyze_integration_target_dependencies(integration_targets) dependency_map = {} # type: t.Dict[str, t.Set[IntegrationTarget]] invalid_targets = set() for dependency, dependents in target_dependencies.items(): dependency_target = targets_dict.get(dependency) if not dependency_target: invalid_targets.add(dependency) continue for dependent in dependents: if dependent not in dependency_map: dependency_map[dependent] = set() dependency_map[dependent].add(dependency_target) if invalid_targets: raise ApplicationError('Non-existent target dependencies: %s' % ', '.join(sorted(invalid_targets))) return dependency_map ",Analyze the given list of integration test targets and return a dictionary expressing target names and the targets on which they depend.,22,67,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def generate_dependency_map(integration_targets): # type: (t.List[IntegrationTarget]) -> t.Dict[str, t.Set[IntegrationTarget]] targets_dict = dict((target.name, target) for target in integration_targets) target_dependencies = analyze_integration_target_dependencies(integration_targets) dependency_map = {} # type: t.Dict[str, t.Set[IntegrationTarget]] invalid_targets = set() for dependency, dependents in target_dependencies.items(): dependency_target = targets_dict.get(dependency) if not dependency_target: invalid_targets.add(dependency) continue for dependent in dependents: if dependent not in dependency_map: dependency_map[dependent] = set() dependency_map[dependent].add(dependency_target) if invalid_targets: raise ApplicationError('Non-existent target dependencies: %s' % ', '.join(sorted(invalid_targets))) return dependency_map ``` ###Assistant : Analyze the given list of integration test targets and return a dictionary expressing target names and the targets on which they depend. " 209,"def text(self) -> str: if self.error is None: return str(self) return self.error.databaseText() ","Get a short text description of the error. This is a string suitable to show to the user as error message. ",21,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def text(self) -> str: if self.error is None: return str(self) return self.error.databaseText() ``` ###Assistant : Get a short text description of the error. This is a string suitable to show to the user as error message. " 210,"def test_recover_start_from_replica_actor_names(serve_instance): # Test failed to deploy with total of 2 replicas, # but first constructor call fails.","Test controller is able to recover starting -> running replicas from actor names. ",13,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_recover_start_from_replica_actor_names(serve_instance): # Test failed to deploy with total of 2 replicas, # but first constructor call fails. ``` ###Assistant : Test controller is able to recover starting -> running replicas from actor names. " 211,"def associated_entities(self) -> list[dict[str, Any]]: ha_entity_registry = self.device.gateway.ha_entity_registry zha_device_registry = self.device.gateway.device_registry return [ GroupEntityReference( ha_entity_registry.async_get(entity_ref.reference_id).name, ha_entity_registry.async_get(entity_ref.reference_id).original_name, entity_ref.reference_id, )._asdict() for entity_ref in zha_device_registry.get(self.device.ieee) if list(entity_ref.cluster_channels.values())[ 0 ].cluster.endpoint.endpoint_id == self.endpoint_id ] ",Return the list of entities that were derived from this endpoint.,11,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def associated_entities(self) -> list[dict[str, Any]]: ha_entity_registry = self.device.gateway.ha_entity_registry zha_device_registry = self.device.gateway.device_registry return [ GroupEntityReference( ha_entity_registry.async_get(entity_ref.reference_id).name, ha_entity_registry.async_get(entity_ref.reference_id).original_name, entity_ref.reference_id, )._asdict() for entity_ref in zha_device_registry.get(self.device.ieee) if list(entity_ref.cluster_channels.values())[ 0 ].cluster.endpoint.endpoint_id == self.endpoint_id ] ``` ###Assistant : Return the list of entities that were derived from this endpoint. " 212,"def getheader(self, name, default=None): if self.headers is None: raise ResponseNotReady() headers = self.headers.get_all(name) or default if isinstance(headers, str) or not hasattr(headers, '__iter__'): return headers else: return ', '.join(headers) ","Returns the value of the header matching *name*. If there are multiple matching headers, the values are combined into a single string separated by commas and spaces. If no matching header is found, returns *default* or None if the *default* is not specified. If the headers are unknown, raises http.client.ResponseNotReady. ",50,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def getheader(self, name, default=None): if self.headers is None: raise ResponseNotReady() headers = self.headers.get_all(name) or default if isinstance(headers, str) or not hasattr(headers, '__iter__'): return headers else: return ', '.join(headers) ``` ###Assistant : Returns the value of the header matching *name*. If there are multiple matching headers, the values are combined into a single string separated by commas and spaces. If no matching header is found, returns *default* or None if the *default* is not specified. If the headers are unknown, raises http.client.ResponseNotReady. " 213,"def test_generate_pipeline_code_2(): pipeline = [ 'KNeighborsClassifier', [ 'CombineDFs', [ 'GradientBoostingClassifier', 'input_matrix', 38.0, 5, 5, 5, 0.05, 0.5], [ 'CombineDFs', [ 'MinMaxScaler', 'input_matrix' ], ['ZeroCount', [ 'MaxAbsScaler', 'input_matrix' ] ] ] ], 18, 'uniform', 2 ] expected_code = assert expected_code == generate_pipeline_code(pipeline, tpot_obj.operators) ","Assert that generate_pipeline_code() returns the correct code given a specific pipeline with two CombineDFs.make_pipeline( make_union( StackingEstimator(estimator=GradientBoostingClassifier(learning_rate=38.0, max_depth=5, max_features=5, min_samples_leaf=5, min_samples_split=0.05, n_estimators=0.5)), make_union( MinMaxScaler(), make_pipeline( MaxAbsScaler(), ZeroCount() ) ) ), KNeighborsClassifier(n_neighbors=18, p=""uniform"", weights=2) )",33,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_generate_pipeline_code_2(): pipeline = [ 'KNeighborsClassifier', [ 'CombineDFs', [ 'GradientBoostingClassifier', 'input_matrix', 38.0, 5, 5, 5, 0.05, 0.5], [ 'CombineDFs', [ 'MinMaxScaler', 'input_matrix' ], ['ZeroCount', [ 'MaxAbsScaler', 'input_matrix' ] ] ] ], 18, 'uniform', 2 ] expected_code = assert expected_code == generate_pipeline_code(pipeline, tpot_obj.operators) ``` ###Assistant : Assert that generate_pipeline_code() returns the correct code given a specific pipeline with two CombineDFs.make_pipeline( make_union( StackingEstimator(estimator=GradientBoostingClassifier(learning_rate=38.0, max_depth=5, max_features=5, min_samples_leaf=5, min_samples_split=0.05, n_estimators=0.5)), make_union( MinMaxScaler(), make_pipeline( MaxAbsScaler(), ZeroCount() ) ) ), KNeighborsClassifier(n_neighbors=18, p=""uniform"", weights=2) ) " 214,"def _script_names(dist, script_name, is_gui): # type: (Distribution, str, bool) -> List[str] if dist_in_usersite(dist): bin_dir = get_bin_user() else: bin_dir = get_bin_prefix() exe_name = os.path.join(bin_dir, script_name) paths_to_remove = [exe_name] if WINDOWS: paths_to_remove.append(exe_name + '.exe') paths_to_remove.append(exe_name + '.exe.manifest') if is_gui: paths_to_remove.append(exe_name + '-script.pyw') else: paths_to_remove.append(exe_name + '-script.py') return paths_to_remove ","Create the fully qualified name of the files created by {console,gui}_scripts for the given ``dist``. Returns the list of file names ",21,46,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _script_names(dist, script_name, is_gui): # type: (Distribution, str, bool) -> List[str] if dist_in_usersite(dist): bin_dir = get_bin_user() else: bin_dir = get_bin_prefix() exe_name = os.path.join(bin_dir, script_name) paths_to_remove = [exe_name] if WINDOWS: paths_to_remove.append(exe_name + '.exe') paths_to_remove.append(exe_name + '.exe.manifest') if is_gui: paths_to_remove.append(exe_name + '-script.pyw') else: paths_to_remove.append(exe_name + '-script.py') return paths_to_remove ``` ###Assistant : Create the fully qualified name of the files created by {console,gui}_scripts for the given ``dist``. Returns the list of file names " 215,"def bbox_center(boxes): boxes_cx = (boxes[..., 0] + boxes[..., 2]) / 2 boxes_cy = (boxes[..., 1] + boxes[..., 3]) / 2 return paddle.stack([boxes_cx, boxes_cy], axis=-1) ","Get bbox centers from boxes. Args: boxes (Tensor): boxes with shape (..., 4), ""xmin, ymin, xmax, ymax"" format. Returns: Tensor: boxes centers with shape (..., 2), ""cx, cy"" format. ",29,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def bbox_center(boxes): boxes_cx = (boxes[..., 0] + boxes[..., 2]) / 2 boxes_cy = (boxes[..., 1] + boxes[..., 3]) / 2 return paddle.stack([boxes_cx, boxes_cy], axis=-1) ``` ###Assistant : Get bbox centers from boxes. Args: boxes (Tensor): boxes with shape (..., 4), ""xmin, ymin, xmax, ymax"" format. Returns: Tensor: boxes centers with shape (..., 2), ""cx, cy"" format. " 216,"def predict(self, input): input_names = self.predictor.get_input_names() input_tensor = self.predictor.get_input_handle(input_names[0]) output_names = self.predictor.get_output_names() output_tensor = self.predictor.get_output_handle(output_names[0]) # preprocess self.recognize_times.preprocess_time_s.start() if type(input) == str: inputs = self.preprocess_video(input) else: inputs = self.preprocess_frames(input) self.recognize_times.preprocess_time_s.end() inputs = np.expand_dims( inputs, axis=0).repeat( self.batch_size, axis=0).copy() input_tensor.copy_from_cpu(inputs) # model prediction self.recognize_times.inference_time_s.start() self.predictor.run() self.recognize_times.inference_time_s.end() output = output_tensor.copy_to_cpu() # postprocess self.recognize_times.postprocess_time_s.start() classes, scores = self.postprocess(output) self.recognize_times.postprocess_time_s.end() return classes, scores "," Args: input (str) or (list): video file path or image data list Returns: results (dict): ",15,58,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def predict(self, input): input_names = self.predictor.get_input_names() input_tensor = self.predictor.get_input_handle(input_names[0]) output_names = self.predictor.get_output_names() output_tensor = self.predictor.get_output_handle(output_names[0]) # preprocess self.recognize_times.preprocess_time_s.start() if type(input) == str: inputs = self.preprocess_video(input) else: inputs = self.preprocess_frames(input) self.recognize_times.preprocess_time_s.end() inputs = np.expand_dims( inputs, axis=0).repeat( self.batch_size, axis=0).copy() input_tensor.copy_from_cpu(inputs) # model prediction self.recognize_times.inference_time_s.start() self.predictor.run() self.recognize_times.inference_time_s.end() output = output_tensor.copy_to_cpu() # postprocess self.recognize_times.postprocess_time_s.start() classes, scores = self.postprocess(output) self.recognize_times.postprocess_time_s.end() return classes, scores ``` ###Assistant : Args: input (str) or (list): video file path or image data list Returns: results (dict): " 217,"def deep_deconstruct(self, obj): if isinstance(obj, list): return [self.deep_deconstruct(value) for value in obj] elif isinstance(obj, tuple): return tuple(self.deep_deconstruct(value) for value in obj) elif isinstance(obj, dict): return {key: self.deep_deconstruct(value) for key, value in obj.items()} elif isinstance(obj, functools.partial): return ( obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords), ) elif isinstance(obj, COMPILED_REGEX_TYPE): return RegexObject(obj) elif isinstance(obj, type): # If this is a type that implements 'deconstruct' as an instance method, # avoid treating this as being deconstructible itself - see #22951 return obj elif hasattr(obj, ""deconstruct""): deconstructed = obj.deconstruct() if isinstance(obj, models.Field): # we have a field which also returns a name deconstructed = deconstructed[1:] path, args, kwargs = deconstructed return ( path, [self.deep_deconstruct(value) for value in args], {key: self.deep_deconstruct(value) for key, value in kwargs.items()}, ) else: return obj "," Recursive deconstruction for a field and its arguments. Used for full comparison for rename/alter; sometimes a single-level deconstruction will not compare correctly. ",22,121,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def deep_deconstruct(self, obj): if isinstance(obj, list): return [self.deep_deconstruct(value) for value in obj] elif isinstance(obj, tuple): return tuple(self.deep_deconstruct(value) for value in obj) elif isinstance(obj, dict): return {key: self.deep_deconstruct(value) for key, value in obj.items()} elif isinstance(obj, functools.partial): return ( obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords), ) elif isinstance(obj, COMPILED_REGEX_TYPE): return RegexObject(obj) elif isinstance(obj, type): # If this is a type that implements 'deconstruct' as an instance method, # avoid treating this as being deconstructible itself - see #22951 return obj elif hasattr(obj, ""deconstruct""): deconstructed = obj.deconstruct() if isinstance(obj, models.Field): # we have a field which also returns a name deconstructed = deconstructed[1:] path, args, kwargs = deconstructed return ( path, [self.deep_deconstruct(value) for value in args], {key: self.deep_deconstruct(value) for key, value in kwargs.items()}, ) else: return obj ``` ###Assistant : Recursive deconstruction for a field and its arguments. Used for full comparison for rename/alter; sometimes a single-level deconstruction will not compare correctly. " 218,"def test_cancellation(self): deferred: ""Deferred[str]"" = Deferred() wrapper_deferred = stop_cancellation(deferred) # Cancel the new `Deferred`. wrapper_deferred.cancel() self.assertTrue(wrapper_deferred.called) self.failureResultOf(wrapper_deferred, CancelledError) self.assertFalse( deferred.called, ""Original `Deferred` was unexpectedly cancelled."" ) # Now make the inner `Deferred` fail. # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed # in logs. deferred.errback(ValueError(""abc"")) self.assertIsNone(deferred.result, ""`Failure` was not consumed"") ",Test that cancellation of the new `Deferred` leaves the original running.,11,54,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_cancellation(self): deferred: ""Deferred[str]"" = Deferred() wrapper_deferred = stop_cancellation(deferred) # Cancel the new `Deferred`. wrapper_deferred.cancel() self.assertTrue(wrapper_deferred.called) self.failureResultOf(wrapper_deferred, CancelledError) self.assertFalse( deferred.called, ""Original `Deferred` was unexpectedly cancelled."" ) # Now make the inner `Deferred` fail. # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed # in logs. deferred.errback(ValueError(""abc"")) self.assertIsNone(deferred.result, ""`Failure` was not consumed"") ``` ###Assistant : Test that cancellation of the new `Deferred` leaves the original running. " 219,"def getgeneratorlocals(generator): if not isgenerator(generator): raise TypeError(""{!r} is not a Python generator"".format(generator)) frame = getattr(generator, ""gi_frame"", None) if frame is not None: return generator.gi_frame.f_locals else: return {} # ------------------------------------------------ coroutine introspection CORO_CREATED = 'CORO_CREATED' CORO_RUNNING = 'CORO_RUNNING' CORO_SUSPENDED = 'CORO_SUSPENDED' CORO_CLOSED = 'CORO_CLOSED' "," Get the mapping of generator local variables to their current values. A dict is returned, with the keys the local variable names and values the bound values.",27,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def getgeneratorlocals(generator): if not isgenerator(generator): raise TypeError(""{!r} is not a Python generator"".format(generator)) frame = getattr(generator, ""gi_frame"", None) if frame is not None: return generator.gi_frame.f_locals else: return {} # ------------------------------------------------ coroutine introspection CORO_CREATED = 'CORO_CREATED' CORO_RUNNING = 'CORO_RUNNING' CORO_SUSPENDED = 'CORO_SUSPENDED' CORO_CLOSED = 'CORO_CLOSED' ``` ###Assistant : Get the mapping of generator local variables to their current values. A dict is returned, with the keys the local variable names and values the bound values. " 220,"async def test_last_bin_contains_end_date(client, route): response = await client.post( f""/{route}/history"", json=dict( history_start=str(dt), history_end=str(dt.add(days=1, minutes=30)), history_interval_seconds=timedelta(days=1).total_seconds(), ), ) assert response.status_code == status.HTTP_200_OK parsed = pydantic.parse_obj_as(List[responses.HistoryResponse], response.json()) assert len(parsed) == 2 assert parsed[0].interval_start == dt assert parsed[0].interval_end == dt.add(days=1) assert parsed[1].interval_start == dt.add(days=1) assert parsed[1].interval_end == dt.add(days=2) ","The last bin contains the end date, so its own end could be after the history end",17,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_last_bin_contains_end_date(client, route): response = await client.post( f""/{route}/history"", json=dict( history_start=str(dt), history_end=str(dt.add(days=1, minutes=30)), history_interval_seconds=timedelta(days=1).total_seconds(), ), ) assert response.status_code == status.HTTP_200_OK parsed = pydantic.parse_obj_as(List[responses.HistoryResponse], response.json()) assert len(parsed) == 2 assert parsed[0].interval_start == dt assert parsed[0].interval_end == dt.add(days=1) assert parsed[1].interval_start == dt.add(days=1) assert parsed[1].interval_end == dt.add(days=2) ``` ###Assistant : The last bin contains the end date, so its own end could be after the history end " 221,"def _validate_attributes(self): # Run config if not isinstance(self.run_config, RunConfig): raise ValueError( f""`run_config` should be an instance of `ray.air.RunConfig`, "" f""found {type(self.run_config)} with value `{self.run_config}`."" ) # Scaling config # Todo: move to ray.air.ScalingConfig if not isinstance(self.scaling_config, dict): raise ValueError( f""`scaling_config` should be an instance of `dict`, "" f""found {type(self.scaling_config)} with value `{self.scaling_config}`."" ) # Datasets if not isinstance(self.datasets, dict): raise ValueError( f""`datasets` should be a dict mapping from a string to "" f""`ray.data.Dataset` objects, "" f""found {type(self.datasets)} with value `{self.datasets}`."" ) elif any( not isinstance(ds, ray.data.Dataset) and not callable(ds) for ds in self.datasets.values() ): raise ValueError( f""At least one value in the `datasets` dict is not a "" f""`ray.data.Dataset`: {self.datasets}"" ) # Preprocessor if self.preprocessor is not None and not isinstance( self.preprocessor, ray.data.Preprocessor ): raise ValueError( f""`preprocessor` should be an instance of `ray.data.Preprocessor`, "" f""found {type(self.preprocessor)} with value `{self.preprocessor}`."" ) if self.resume_from_checkpoint is not None and not isinstance( self.resume_from_checkpoint, ray.air.Checkpoint ): raise ValueError( f""`resume_from_checkpoint` should be an instance of "" f""`ray.air.Checkpoint`, found {type(self.resume_from_checkpoint)} "" f""with value `{self.resume_from_checkpoint}`."" ) ",Called on __init()__ to validate trainer attributes.,7,168,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _validate_attributes(self): # Run config if not isinstance(self.run_config, RunConfig): raise ValueError( f""`run_config` should be an instance of `ray.air.RunConfig`, "" f""found {type(self.run_config)} with value `{self.run_config}`."" ) # Scaling config # Todo: move to ray.air.ScalingConfig if not isinstance(self.scaling_config, dict): raise ValueError( f""`scaling_config` should be an instance of `dict`, "" f""found {type(self.scaling_config)} with value `{self.scaling_config}`."" ) # Datasets if not isinstance(self.datasets, dict): raise ValueError( f""`datasets` should be a dict mapping from a string to "" f""`ray.data.Dataset` objects, "" f""found {type(self.datasets)} with value `{self.datasets}`."" ) elif any( not isinstance(ds, ray.data.Dataset) and not callable(ds) for ds in self.datasets.values() ): raise ValueError( f""At least one value in the `datasets` dict is not a "" f""`ray.data.Dataset`: {self.datasets}"" ) # Preprocessor if self.preprocessor is not None and not isinstance( self.preprocessor, ray.data.Preprocessor ): raise ValueError( f""`preprocessor` should be an instance of `ray.data.Preprocessor`, "" f""found {type(self.preprocessor)} with value `{self.preprocessor}`."" ) if self.resume_from_checkpoint is not None and not isinstance( self.resume_from_checkpoint, ray.air.Checkpoint ): raise ValueError( f""`resume_from_checkpoint` should be an instance of "" f""`ray.air.Checkpoint`, found {type(self.resume_from_checkpoint)} "" f""with value `{self.resume_from_checkpoint}`."" ) ``` ###Assistant : Called on __init()__ to validate trainer attributes. " 222,"def longitude(self) -> float | None: if ( self.extra_state_attributes is not None and ATTR_LONGITUDE in self.extra_state_attributes ): longitude: float = self.extra_state_attributes[ATTR_LONGITUDE] return longitude return None ",Return longitude if provided in extra_state_attributes or None.,8,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def longitude(self) -> float | None: if ( self.extra_state_attributes is not None and ATTR_LONGITUDE in self.extra_state_attributes ): longitude: float = self.extra_state_attributes[ATTR_LONGITUDE] return longitude return None ``` ###Assistant : Return longitude if provided in extra_state_attributes or None. " 223,"def user_cache_dir(self) -> str: path = os.path.normpath(get_win_folder(""CSIDL_LOCAL_APPDATA"")) return self._append_parts(path, opinion_value=""Cache"") "," :return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g. ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version`` ",16,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def user_cache_dir(self) -> str: path = os.path.normpath(get_win_folder(""CSIDL_LOCAL_APPDATA"")) return self._append_parts(path, opinion_value=""Cache"") ``` ###Assistant : :return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g. ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version`` " 224,"def get_project_config(project, full_config=True, project_keys=None): with sentry_sdk.push_scope() as scope: scope.set_tag(""project"", project.id) with metrics.timer(""relay.config.get_project_config.duration""): return _get_project_config(project, full_config=full_config, project_keys=project_keys) ","Constructs the ProjectConfig information. :param project: The project to load configuration for. Ensure that organization is bound on this object; otherwise it will be loaded from the database. :param full_config: True if only the full config is required, False if only the restricted (for external relays) is required (default True, i.e. full configuration) :param project_keys: Pre-fetched project keys for performance. However, if no project keys are provided it is assumed that the config does not need to contain auth information (this is the case when used in python's StoreView) :return: a ProjectConfig object for the given project ",97,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_project_config(project, full_config=True, project_keys=None): with sentry_sdk.push_scope() as scope: scope.set_tag(""project"", project.id) with metrics.timer(""relay.config.get_project_config.duration""): return _get_project_config(project, full_config=full_config, project_keys=project_keys) ``` ###Assistant : Constructs the ProjectConfig information. :param project: The project to load configuration for. Ensure that organization is bound on this object; otherwise it will be loaded from the database. :param full_config: True if only the full config is required, False if only the restricted (for external relays) is required (default True, i.e. full configuration) :param project_keys: Pre-fetched project keys for performance. However, if no project keys are provided it is assumed that the config does not need to contain auth information (this is the case when used in python's StoreView) :return: a ProjectConfig object for the given project " 225,"def _query_kg(self, sparql_query): try: response = self.knowledge_graph.query(sparql_query=sparql_query) # unpack different answer styles if isinstance(response, list): if len(response) == 0: result = """" else: result = [] for x in response: for k, v in x.items(): result.append(v[""value""]) elif isinstance(response, bool): result = str(response) elif ""count"" in response[0]: result = str(int(response[0][""count""][""value""])) else: result = """" except Exception: result = """" return result, sparql_query "," Execute a single SPARQL query on the knowledge graph to retrieve an answer and unpack different answer styles for boolean queries, count queries, and list queries. :param sparql_query: SPARQL query that shall be executed on the knowledge graph ",38,61,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _query_kg(self, sparql_query): try: response = self.knowledge_graph.query(sparql_query=sparql_query) # unpack different answer styles if isinstance(response, list): if len(response) == 0: result = """" else: result = [] for x in response: for k, v in x.items(): result.append(v[""value""]) elif isinstance(response, bool): result = str(response) elif ""count"" in response[0]: result = str(int(response[0][""count""][""value""])) else: result = """" except Exception: result = """" return result, sparql_query ``` ###Assistant : Execute a single SPARQL query on the knowledge graph to retrieve an answer and unpack different answer styles for boolean queries, count queries, and list queries. :param sparql_query: SPARQL query that shall be executed on the knowledge graph " 226,"def _path_importer_cache(cls, path): if path == '': try: path = _os.getcwd() except FileNotFoundError: # Don't cache the failure as the cwd can easily change to # a valid directory later on. return None try: finder = sys.path_importer_cache[path] except KeyError: finder = cls._path_hooks(path) sys.path_importer_cache[path] = finder return finder ","Get the finder for the path entry from sys.path_importer_cache. If the path entry is not in the cache, find the appropriate finder and cache it. If no finder is available, store None. ",32,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _path_importer_cache(cls, path): if path == '': try: path = _os.getcwd() except FileNotFoundError: # Don't cache the failure as the cwd can easily change to # a valid directory later on. return None try: finder = sys.path_importer_cache[path] except KeyError: finder = cls._path_hooks(path) sys.path_importer_cache[path] = finder return finder ``` ###Assistant : Get the finder for the path entry from sys.path_importer_cache. If the path entry is not in the cache, find the appropriate finder and cache it. If no finder is available, store None. " 227,"def test_publish_parts(self): import docutils self.assertNotEqual( docutils.parsers.rst.roles.DEFAULT_INTERPRETED_ROLE, ""cmsreference"" ) source = ""reST, `interpreted text`, default role."" markup = ""

reST, interpreted text, default role.

\n"" parts = docutils.core.publish_parts(source=source, writer_name=""html4css1"") self.assertEqual(parts[""fragment""], markup) "," Django shouldn't break the default role for interpreted text when ``publish_parts`` is used directly, by setting it to ``cmsreference`` (#6681). ",20,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_publish_parts(self): import docutils self.assertNotEqual( docutils.parsers.rst.roles.DEFAULT_INTERPRETED_ROLE, ""cmsreference"" ) source = ""reST, `interpreted text`, default role."" markup = ""

reST, interpreted text, default role.

\n"" parts = docutils.core.publish_parts(source=source, writer_name=""html4css1"") self.assertEqual(parts[""fragment""], markup) ``` ###Assistant : Django shouldn't break the default role for interpreted text when ``publish_parts`` is used directly, by setting it to ``cmsreference`` (#6681). " 228,"def test_get_backfill_points_in_room(self): setup_info = self._setup_room_for_backfill_tests() room_id = setup_info.room_id depth_map = setup_info.depth_map # Try at ""B"" backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id, depth_map[""B""], limit=100) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertListEqual( backfill_event_ids, [""b6"", ""b5"", ""b4"", ""2"", ""b3"", ""b2"", ""b1""] ) # Try at ""A"" backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id, depth_map[""A""], limit=100) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] # Event ""2"" has a depth of 2 but is not included here because we only # know the approximate depth of 5 from our event ""3"". self.assertListEqual(backfill_event_ids, [""b3"", ""b2"", ""b1""]) "," Test to make sure only backfill points that are older and come before the `current_depth` are returned. ",17,88,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_backfill_points_in_room(self): setup_info = self._setup_room_for_backfill_tests() room_id = setup_info.room_id depth_map = setup_info.depth_map # Try at ""B"" backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id, depth_map[""B""], limit=100) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertListEqual( backfill_event_ids, [""b6"", ""b5"", ""b4"", ""2"", ""b3"", ""b2"", ""b1""] ) # Try at ""A"" backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id, depth_map[""A""], limit=100) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] # Event ""2"" has a depth of 2 but is not included here because we only # know the approximate depth of 5 from our event ""3"". self.assertListEqual(backfill_event_ids, [""b3"", ""b2"", ""b1""]) ``` ###Assistant : Test to make sure only backfill points that are older and come before the `current_depth` are returned. " 229,"def theano_code(expr, cache=None, **kwargs): sympy_deprecation_warning( , deprecated_since_version=""1.8"", active_deprecations_target='theanocode-deprecated') if not theano: raise ImportError(""theano is required for theano_code"") if cache is None: cache = global_cache return TheanoPrinter(cache=cache, settings={}).doprint(expr, **kwargs) "," Convert a SymPy expression into a Theano graph variable. .. deprecated:: 1.8 ``sympy.printing.theanocode`` is deprecated. Theano has been renamed to Aesara. Use ``sympy.printing.aesaracode`` instead. See :ref:`theanocode-deprecated` for more information. Parameters ========== expr : sympy.core.expr.Expr SymPy expression object to convert. cache : dict Cached Theano variables (see :class:`TheanoPrinter.cache `). Defaults to the module-level global cache. dtypes : dict Passed to :meth:`.TheanoPrinter.doprint`. broadcastables : dict Passed to :meth:`.TheanoPrinter.doprint`. Returns ======= theano.gof.graph.Variable A variable corresponding to the expression's value in a Theano symbolic expression graph. sympy.printing.theanocode is deprecated. Theano has been renamed to Aesara. Use sympy.printing.aesaracode instead.",94,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def theano_code(expr, cache=None, **kwargs): sympy_deprecation_warning( , deprecated_since_version=""1.8"", active_deprecations_target='theanocode-deprecated') if not theano: raise ImportError(""theano is required for theano_code"") if cache is None: cache = global_cache return TheanoPrinter(cache=cache, settings={}).doprint(expr, **kwargs) ``` ###Assistant : Convert a SymPy expression into a Theano graph variable. .. deprecated:: 1.8 ``sympy.printing.theanocode`` is deprecated. Theano has been renamed to Aesara. Use ``sympy.printing.aesaracode`` instead. See :ref:`theanocode-deprecated` for more information. Parameters ========== expr : sympy.core.expr.Expr SymPy expression object to convert. cache : dict Cached Theano variables (see :class:`TheanoPrinter.cache `). Defaults to the module-level global cache. dtypes : dict Passed to :meth:`.TheanoPrinter.doprint`. broadcastables : dict Passed to :meth:`.TheanoPrinter.doprint`. Returns ======= theano.gof.graph.Variable A variable corresponding to the expression's value in a Theano symbolic expression graph. sympy.printing.theanocode is deprecated. Theano has been renamed to Aesara. Use sympy.printing.aesaracode instead. " 230,"def _validate_axes_lengths(self): if self._row_lengths_cache is not None and len(self.index) > 0: # An empty frame can have 0 rows but a nonempty index. If the frame # does have rows, the number of rows must equal the size of the # index. num_rows = sum(self._row_lengths_cache) if num_rows > 0: ErrorMessage.catch_bugs_and_request_email( num_rows != len(self._index_cache), f""Row lengths: {num_rows} != {len(self._index_cache)}"", ) ErrorMessage.catch_bugs_and_request_email( any(val < 0 for val in self._row_lengths_cache), f""Row lengths cannot be negative: {self._row_lengths_cache}"", ) if self._column_widths_cache is not None and len(self.columns) > 0: # An empty frame can have 0 column but a nonempty column index. If # the frame does have columns, the number of columns must equal the # size of the columns. num_columns = sum(self._column_widths_cache) if num_columns > 0: ErrorMessage.catch_bugs_and_request_email( num_columns != len(self._columns_cache), f""Column widths: {num_columns} != {len(self._columns_cache)}"", ) ErrorMessage.catch_bugs_and_request_email( any(val < 0 for val in self._column_widths_cache), f""Column widths cannot be negative: {self._column_widths_cache}"", ) ",Validate that labels are split correctly if split is known.,10,147,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _validate_axes_lengths(self): if self._row_lengths_cache is not None and len(self.index) > 0: # An empty frame can have 0 rows but a nonempty index. If the frame # does have rows, the number of rows must equal the size of the # index. num_rows = sum(self._row_lengths_cache) if num_rows > 0: ErrorMessage.catch_bugs_and_request_email( num_rows != len(self._index_cache), f""Row lengths: {num_rows} != {len(self._index_cache)}"", ) ErrorMessage.catch_bugs_and_request_email( any(val < 0 for val in self._row_lengths_cache), f""Row lengths cannot be negative: {self._row_lengths_cache}"", ) if self._column_widths_cache is not None and len(self.columns) > 0: # An empty frame can have 0 column but a nonempty column index. If # the frame does have columns, the number of columns must equal the # size of the columns. num_columns = sum(self._column_widths_cache) if num_columns > 0: ErrorMessage.catch_bugs_and_request_email( num_columns != len(self._columns_cache), f""Column widths: {num_columns} != {len(self._columns_cache)}"", ) ErrorMessage.catch_bugs_and_request_email( any(val < 0 for val in self._column_widths_cache), f""Column widths cannot be negative: {self._column_widths_cache}"", ) ``` ###Assistant : Validate that labels are split correctly if split is known. " 231,"def test_get_name_capability_sid(): cap_sid = ""S-1-15-3-1024-1065365936-1281604716-3511738428-1654721687-432734479-3232135806-4053264122-3456934681"" sid_obj = win32security.ConvertStringSidToSid(cap_sid) assert salt.utils.win_dacl.get_name(sid_obj) is None "," Test get_name with a compatibility SID. Should return `None` as we want to ignore these SIDs ",16,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_name_capability_sid(): cap_sid = ""S-1-15-3-1024-1065365936-1281604716-3511738428-1654721687-432734479-3232135806-4053264122-3456934681"" sid_obj = win32security.ConvertStringSidToSid(cap_sid) assert salt.utils.win_dacl.get_name(sid_obj) is None ``` ###Assistant : Test get_name with a compatibility SID. Should return `None` as we want to ignore these SIDs " 232,"def entry_points(group=None): eps = importlib.metadata.entry_points() if group: try: return eps.select(group=group) except AttributeError: return eps.get(group, []) return eps ","Returns an iterable of entrypoints. For compatibility with Python 3.8/3.9. In 3.10 the return type changed from a dict to an ``importlib.metadata.EntryPoints``. This compatibility utility can be removed once Python 3.10 is the minimum. ",34,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def entry_points(group=None): eps = importlib.metadata.entry_points() if group: try: return eps.select(group=group) except AttributeError: return eps.get(group, []) return eps ``` ###Assistant : Returns an iterable of entrypoints. For compatibility with Python 3.8/3.9. In 3.10 the return type changed from a dict to an ``importlib.metadata.EntryPoints``. This compatibility utility can be removed once Python 3.10 is the minimum. " 233,"def elliptic_curve(self) -> Optional[str]: key = self._private_key() if isinstance(key, EllipticCurvePrivateKey): return key.curve.name return None "," :returns: If the private key is an elliptic key, the name of its curve. :rtype: str ",16,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def elliptic_curve(self) -> Optional[str]: key = self._private_key() if isinstance(key, EllipticCurvePrivateKey): return key.curve.name return None ``` ###Assistant : :returns: If the private key is an elliptic key, the name of its curve. :rtype: str " 234,"def page_type_display_name(self): if not self.specific_class or self.is_root(): return """" else: return self.specific_class.get_verbose_name() "," A human-readable version of this page's type ",7,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def page_type_display_name(self): if not self.specific_class or self.is_root(): return """" else: return self.specific_class.get_verbose_name() ``` ###Assistant : A human-readable version of this page's type " 235,"def save(self, fname, **kwargs) -> Plot: # TODO expose important keyword arugments in our signature? self.plot().save(fname, **kwargs) return self "," Render the plot and write it to a buffer or file on disk. Parameters ---------- fname : str, path, or buffer Location on disk to save the figure, or a buffer to write into. Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`. ",41,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save(self, fname, **kwargs) -> Plot: # TODO expose important keyword arugments in our signature? self.plot().save(fname, **kwargs) return self ``` ###Assistant : Render the plot and write it to a buffer or file on disk. Parameters ---------- fname : str, path, or buffer Location on disk to save the figure, or a buffer to write into. Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`. " 236,"def get_install_candidate(self, link_evaluator, link): # type: (LinkEvaluator, Link) -> Optional[InstallationCandidate] is_candidate, result = link_evaluator.evaluate_link(link) if not is_candidate: if result: self._log_skipped_link(link, reason=result) return None return InstallationCandidate( name=link_evaluator.project_name, link=link, version=result, ) "," If the link is a candidate for install, convert it to an InstallationCandidate and return it. Otherwise, return None. ",19,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_install_candidate(self, link_evaluator, link): # type: (LinkEvaluator, Link) -> Optional[InstallationCandidate] is_candidate, result = link_evaluator.evaluate_link(link) if not is_candidate: if result: self._log_skipped_link(link, reason=result) return None return InstallationCandidate( name=link_evaluator.project_name, link=link, version=result, ) ``` ###Assistant : If the link is a candidate for install, convert it to an InstallationCandidate and return it. Otherwise, return None. " 237,"def bar(self, x=None, y=None, **kwargs) -> PlotAccessor: return self(kind=""bar"", x=x, y=y, **kwargs) "," Vertical bar plot. A bar plot is a plot that presents categorical data with rectangular bars with lengths proportional to the values that they represent. A bar plot shows comparisons among discrete categories. One axis of the plot shows the specific categories being compared, and the other axis represents a measured value. ",52,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def bar(self, x=None, y=None, **kwargs) -> PlotAccessor: return self(kind=""bar"", x=x, y=y, **kwargs) ``` ###Assistant : Vertical bar plot. A bar plot is a plot that presents categorical data with rectangular bars with lengths proportional to the values that they represent. A bar plot shows comparisons among discrete categories. One axis of the plot shows the specific categories being compared, and the other axis represents a measured value. " 238,"def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1): input_shape = shape(y_pred) num_samples, num_steps = input_shape[0], input_shape[1] y_pred = tf.math.log( tf.compat.v1.transpose(y_pred, perm=[1, 0, 2]) + epsilon() ) input_length = tf.cast(input_length, tf.int32) if greedy: (decoded, log_prob) = tf.nn.ctc_greedy_decoder( inputs=y_pred, sequence_length=input_length ) else: (decoded, log_prob) = tf.compat.v1.nn.ctc_beam_search_decoder( inputs=y_pred, sequence_length=input_length, beam_width=beam_width, top_paths=top_paths, ) decoded_dense = [] for st in decoded: st = tf.SparseTensor(st.indices, st.values, (num_samples, num_steps)) decoded_dense.append(tf.sparse.to_dense(sp_input=st, default_value=-1)) return (decoded_dense, log_prob) # HIGH ORDER FUNCTIONS @keras_export(""keras.backend.map_fn"") @doc_controls.do_not_generate_docs","Decodes the output of a softmax. Can use either greedy search (also known as best path) or a constrained dictionary search. Args: y_pred: tensor `(samples, time_steps, num_categories)` containing the prediction, or output of the softmax. input_length: tensor `(samples, )` containing the sequence length for each batch item in `y_pred`. greedy: perform much faster best-path search if `true`. This does not use a dictionary. beam_width: if `greedy` is `false`: a beam search decoder will be used with a beam of this width. top_paths: if `greedy` is `false`, how many of the most probable paths will be returned. Returns: Tuple: List: if `greedy` is `true`, returns a list of one element that contains the decoded sequence. If `false`, returns the `top_paths` most probable decoded sequences. Each decoded sequence has shape (samples, time_steps). Important: blank labels are returned as `-1`. Tensor `(top_paths, )` that contains the log probability of each decoded sequence. ",149,71,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1): input_shape = shape(y_pred) num_samples, num_steps = input_shape[0], input_shape[1] y_pred = tf.math.log( tf.compat.v1.transpose(y_pred, perm=[1, 0, 2]) + epsilon() ) input_length = tf.cast(input_length, tf.int32) if greedy: (decoded, log_prob) = tf.nn.ctc_greedy_decoder( inputs=y_pred, sequence_length=input_length ) else: (decoded, log_prob) = tf.compat.v1.nn.ctc_beam_search_decoder( inputs=y_pred, sequence_length=input_length, beam_width=beam_width, top_paths=top_paths, ) decoded_dense = [] for st in decoded: st = tf.SparseTensor(st.indices, st.values, (num_samples, num_steps)) decoded_dense.append(tf.sparse.to_dense(sp_input=st, default_value=-1)) return (decoded_dense, log_prob) # HIGH ORDER FUNCTIONS @keras_export(""keras.backend.map_fn"") @doc_controls.do_not_generate_docs ``` ###Assistant : Decodes the output of a softmax. Can use either greedy search (also known as best path) or a constrained dictionary search. Args: y_pred: tensor `(samples, time_steps, num_categories)` containing the prediction, or output of the softmax. input_length: tensor `(samples, )` containing the sequence length for each batch item in `y_pred`. greedy: perform much faster best-path search if `true`. This does not use a dictionary. beam_width: if `greedy` is `false`: a beam search decoder will be used with a beam of this width. top_paths: if `greedy` is `false`, how many of the most probable paths will be returned. Returns: Tuple: List: if `greedy` is `true`, returns a list of one element that contains the decoded sequence. If `false`, returns the `top_paths` most probable decoded sequences. Each decoded sequence has shape (samples, time_steps). Important: blank labels are returned as `-1`. Tensor `(top_paths, )` that contains the log probability of each decoded sequence. " 239,"def display_time_updates(bar): threading.Thread(target=_show_time_updates, args=(bar,)).start() "," Start displaying the progress `bar` in a notebook. Parameters ---------- bar : tqdm.tqdm The progress bar wrapper to display in a notebook cell. ",23,4,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def display_time_updates(bar): threading.Thread(target=_show_time_updates, args=(bar,)).start() ``` ###Assistant : Start displaying the progress `bar` in a notebook. Parameters ---------- bar : tqdm.tqdm The progress bar wrapper to display in a notebook cell. " 240,"def remove(name=None, pkgs=None, **kwargs): targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug(""Removing these fileset(s)/rpm package(s) %s: %s"", name, targets) errors = [] # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the fileset or rpm package(s) for target in targets: try: named, versionpkg, rpmpkg = _check_pkg(target) except CommandExecutionError as exc: if exc.info: errors.append(exc.info[""errors""]) continue if rpmpkg: # assume use dnf or yum cmdflags = "" -y remove "" if pathlib.Path(""/opt/freeware/bin/dnf"").is_file(): cmdexe = ""/opt/freeware/bin/dnf"" elif pathlib.Path(""/opt/freeware/bin/yum"").is_file(): cmdexe = ""/opt/freeware/bin/yum"" elif pathlib.Path(""/usr/bin/yum"").is_file(): cmdexe = ""/usr/bin/yum"" else: cmdexe = ""/usr/bin/rpm"" cmdflags = "" -e "" cmd = [cmdexe, cmdflags, named] out = __salt__[""cmd.run_all""](cmd, python_shell=False) else: cmd = [""/usr/sbin/installp"", ""-u"", named] out = __salt__[""cmd.run_all""](cmd, python_shell=False) # Get a list of the packages after the uninstall __context__.pop(""pkg.list_pkgs"", None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( ""Problems encountered removing filesets(s)/package(s)"", info={""changes"": ret, ""errors"": errors}, ) return ret "," Remove specified fileset(s)/rpm package(s). name The name of the fileset or rpm package to be deleted. .. versionadded:: 3005 preference to install rpm packages are to use in the following order: /opt/freeware/bin/dnf /opt/freeware/bin/yum /usr/bin/yum /usr/bin/rpm Multiple Package Options: pkgs A list of filesets and/or rpm packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove salt '*' pkg.remove tcsh salt '*' pkg.remove xlC.rte salt '*' pkg.remove Firefox.base.adt salt '*' pkg.remove pkgs='[""foo"", ""bar""]' ",101,157,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def remove(name=None, pkgs=None, **kwargs): targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug(""Removing these fileset(s)/rpm package(s) %s: %s"", name, targets) errors = [] # Get a list of the currently installed pkgs. old = list_pkgs() # Remove the fileset or rpm package(s) for target in targets: try: named, versionpkg, rpmpkg = _check_pkg(target) except CommandExecutionError as exc: if exc.info: errors.append(exc.info[""errors""]) continue if rpmpkg: # assume use dnf or yum cmdflags = "" -y remove "" if pathlib.Path(""/opt/freeware/bin/dnf"").is_file(): cmdexe = ""/opt/freeware/bin/dnf"" elif pathlib.Path(""/opt/freeware/bin/yum"").is_file(): cmdexe = ""/opt/freeware/bin/yum"" elif pathlib.Path(""/usr/bin/yum"").is_file(): cmdexe = ""/usr/bin/yum"" else: cmdexe = ""/usr/bin/rpm"" cmdflags = "" -e "" cmd = [cmdexe, cmdflags, named] out = __salt__[""cmd.run_all""](cmd, python_shell=False) else: cmd = [""/usr/sbin/installp"", ""-u"", named] out = __salt__[""cmd.run_all""](cmd, python_shell=False) # Get a list of the packages after the uninstall __context__.pop(""pkg.list_pkgs"", None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( ""Problems encountered removing filesets(s)/package(s)"", info={""changes"": ret, ""errors"": errors}, ) return ret ``` ###Assistant : Remove specified fileset(s)/rpm package(s). name The name of the fileset or rpm package to be deleted. .. versionadded:: 3005 preference to install rpm packages are to use in the following order: /opt/freeware/bin/dnf /opt/freeware/bin/yum /usr/bin/yum /usr/bin/rpm Multiple Package Options: pkgs A list of filesets and/or rpm packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove salt '*' pkg.remove tcsh salt '*' pkg.remove xlC.rte salt '*' pkg.remove Firefox.base.adt salt '*' pkg.remove pkgs='[""foo"", ""bar""]' " 241,"def _from_ordinalf(x, tz=None): tz = _get_tzinfo(tz) dt = (np.datetime64(get_epoch()) + np.timedelta64(int(np.round(x * MUSECONDS_PER_DAY)), 'us')) if dt < np.datetime64('0001-01-01') or dt >= np.datetime64('10000-01-01'): raise ValueError(f'Date ordinal {x} converts to {dt} (using ' f'epoch {get_epoch()}), but Matplotlib dates must be ' 'between year 0001 and 9999.') # convert from datetime64 to datetime: dt = dt.tolist() # datetime64 is always UTC: dt = dt.replace(tzinfo=dateutil.tz.gettz('UTC')) # but maybe we are working in a different timezone so move. dt = dt.astimezone(tz) # fix round off errors if np.abs(x) > 70 * 365: # if x is big, round off to nearest twenty microseconds. # This avoids floating point roundoff error ms = round(dt.microsecond / 20) * 20 if ms == 1000000: dt = dt.replace(microsecond=0) + datetime.timedelta(seconds=1) else: dt = dt.replace(microsecond=ms) return dt # a version of _from_ordinalf that can operate on numpy arrays _from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf, otypes=""O"") # a version of dateutil.parser.parse that can operate on numpy arrays _dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse) "," Convert Gregorian float of the date, preserving hours, minutes, seconds and microseconds. Return value is a `.datetime`. The input date *x* is a float in ordinal days at UTC, and the output will be the specified `.datetime` object corresponding to that time in timezone *tz*, or if *tz* is ``None``, in the timezone specified in :rc:`timezone`. ",56,156,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _from_ordinalf(x, tz=None): tz = _get_tzinfo(tz) dt = (np.datetime64(get_epoch()) + np.timedelta64(int(np.round(x * MUSECONDS_PER_DAY)), 'us')) if dt < np.datetime64('0001-01-01') or dt >= np.datetime64('10000-01-01'): raise ValueError(f'Date ordinal {x} converts to {dt} (using ' f'epoch {get_epoch()}), but Matplotlib dates must be ' 'between year 0001 and 9999.') # convert from datetime64 to datetime: dt = dt.tolist() # datetime64 is always UTC: dt = dt.replace(tzinfo=dateutil.tz.gettz('UTC')) # but maybe we are working in a different timezone so move. dt = dt.astimezone(tz) # fix round off errors if np.abs(x) > 70 * 365: # if x is big, round off to nearest twenty microseconds. # This avoids floating point roundoff error ms = round(dt.microsecond / 20) * 20 if ms == 1000000: dt = dt.replace(microsecond=0) + datetime.timedelta(seconds=1) else: dt = dt.replace(microsecond=ms) return dt # a version of _from_ordinalf that can operate on numpy arrays _from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf, otypes=""O"") # a version of dateutil.parser.parse that can operate on numpy arrays _dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse) ``` ###Assistant : Convert Gregorian float of the date, preserving hours, minutes, seconds and microseconds. Return value is a `.datetime`. The input date *x* is a float in ordinal days at UTC, and the output will be the specified `.datetime` object corresponding to that time in timezone *tz*, or if *tz* is ``None``, in the timezone specified in :rc:`timezone`. " 242,"def getdoc(object): try: doc = object.__doc__ except AttributeError: return None if doc is None: try: doc = _finddoc(object) except (AttributeError, TypeError): return None if not isinstance(doc, str): return None return cleandoc(doc) ","Get the documentation string for an object. All tabs are expanded to spaces. To clean up docstrings that are indented to line up with blocks of code, any whitespace than can be uniformly removed from the second line onwards is removed.",41,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def getdoc(object): try: doc = object.__doc__ except AttributeError: return None if doc is None: try: doc = _finddoc(object) except (AttributeError, TypeError): return None if not isinstance(doc, str): return None return cleandoc(doc) ``` ###Assistant : Get the documentation string for an object. All tabs are expanded to spaces. To clean up docstrings that are indented to line up with blocks of code, any whitespace than can be uniformly removed from the second line onwards is removed. " 243,"def load_config_with_kwargs(cls, kwargs): assert_is_a_marshmallow_class(cls) schema = cls.Schema() fields = schema.fields.keys() return load_config(cls, **{k: v for k, v in kwargs.items() if k in fields}), { k: v for k, v in kwargs.items() if k not in fields } ",Takes a marshmallow class and dict of parameter values and appropriately instantiantes the schema.,14,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_config_with_kwargs(cls, kwargs): assert_is_a_marshmallow_class(cls) schema = cls.Schema() fields = schema.fields.keys() return load_config(cls, **{k: v for k, v in kwargs.items() if k in fields}), { k: v for k, v in kwargs.items() if k not in fields } ``` ###Assistant : Takes a marshmallow class and dict of parameter values and appropriately instantiantes the schema. " 244,"def test_payment_refund_or_void_refund_called_txn_exist(refund_mock, payment): # given payment.charge_status = ChargeStatus.FULLY_CHARGED payment.save(update_fields=[""charge_status""]) assert payment.can_refund() is True payment.captured_amount = payment.total payment.save(update_fields=[""captured_amount""]) txn = payment.transactions.create( is_success=True, action_required=False, kind=TransactionKind.REFUND_ONGOING, amount=payment.captured_amount / 2, currency=payment.currency, token=""test"", gateway_response={}, ) # when gateway.payment_refund_or_void( payment, get_plugins_manager(), None, transaction_id=txn.token ) # then assert refund_mock.called_once() @patch(""saleor.payment.gateway.refund"")","Ensure that the refund method is called when the refund process is already ongoing but not covered full payment captured amount.",21,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_payment_refund_or_void_refund_called_txn_exist(refund_mock, payment): # given payment.charge_status = ChargeStatus.FULLY_CHARGED payment.save(update_fields=[""charge_status""]) assert payment.can_refund() is True payment.captured_amount = payment.total payment.save(update_fields=[""captured_amount""]) txn = payment.transactions.create( is_success=True, action_required=False, kind=TransactionKind.REFUND_ONGOING, amount=payment.captured_amount / 2, currency=payment.currency, token=""test"", gateway_response={}, ) # when gateway.payment_refund_or_void( payment, get_plugins_manager(), None, transaction_id=txn.token ) # then assert refund_mock.called_once() @patch(""saleor.payment.gateway.refund"") ``` ###Assistant : Ensure that the refund method is called when the refund process is already ongoing but not covered full payment captured amount. " 245,"def _all(self): groups = super(Deprecated, self).values() return EntryPoints(itertools.chain.from_iterable(groups)) "," Reconstruct a list of all entrypoints from the groups. ",9,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _all(self): groups = super(Deprecated, self).values() return EntryPoints(itertools.chain.from_iterable(groups)) ``` ###Assistant : Reconstruct a list of all entrypoints from the groups. " 246,"def readlines(self, sizehint=None, keepends=True): data = self.read() return data.splitlines(keepends) "," Read all lines available on the input stream and return them as a list. Line breaks are implemented using the codec's decoder method and are included in the list entries. sizehint, if given, is ignored since there is no efficient way to finding the true end-of-line. ",46,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def readlines(self, sizehint=None, keepends=True): data = self.read() return data.splitlines(keepends) ``` ###Assistant : Read all lines available on the input stream and return them as a list. Line breaks are implemented using the codec's decoder method and are included in the list entries. sizehint, if given, is ignored since there is no efficient way to finding the true end-of-line. " 247,"async def notify_clients(cls) -> None: while not cls.STOP: await asyncio.sleep(cls.UPDATE_INTERVALS) if cls.EVENT_QUEUE: await cls.broadcast_estimations() "," Notify clients about events statuses in the queue periodically. ",9,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def notify_clients(cls) -> None: while not cls.STOP: await asyncio.sleep(cls.UPDATE_INTERVALS) if cls.EVENT_QUEUE: await cls.broadcast_estimations() ``` ###Assistant : Notify clients about events statuses in the queue periodically. " 248,"def _readPyPIFile(self): # Complex stuff, pylint: disable=too-many-branches,too-many-statements if self.used_modules is None: pyi_filename = self.getPyIFilename() if os.path.exists(pyi_filename): pyi_deps = OrderedSet() # Flag signalling multiline import handling in_import = False in_import_part = """" for line in getFileContentByLine(pyi_filename): line = line.strip() if not in_import: if line.startswith(""import ""): imported = line[7:] pyi_deps.add(imported) elif line.startswith(""from ""): parts = line.split(None, 3) assert parts[0] == ""from"" assert parts[2] == ""import"" origin_name = parts[1] if origin_name == ""typing"": continue if origin_name == ""."": origin_name = self.getFullName() else: dot_count = 0 while origin_name.startswith("".""): origin_name = origin_name[1:] dot_count += 1 if dot_count > 0: if origin_name: origin_name = ( self.getFullName() .getRelativePackageName(level=dot_count + 1) .getChildNamed(origin_name) ) else: origin_name = ( self.getFullName().getRelativePackageName( level=dot_count + 1 ) ) if origin_name != self.getFullName(): pyi_deps.add(origin_name) imported = parts[3] if imported.startswith(""(""): # Handle multiline imports if not imported.endswith("")""): in_import = True imported = imported[1:] in_import_part = origin_name assert in_import_part, ( ""Multiline part in file %s cannot be empty"" % pyi_filename ) else: in_import = False imported = imported[1:-1] assert imported if imported == ""*"": continue for name in imported.split("",""): if name: name = name.strip() pyi_deps.add(origin_name + ""."" + name) else: # In import imported = line if imported.endswith("")""): imported = imported[0:-1] in_import = False for name in imported.split("",""): name = name.strip() if name: pyi_deps.add(in_import_part + ""."" + name) if ""typing"" in pyi_deps: pyi_deps.discard(""typing"") if ""__future__"" in pyi_deps: pyi_deps.discard(""__future__"") if self.getFullName() in pyi_deps: pyi_deps.discard(self.getFullName()) if self.getFullName().getPackageName() in pyi_deps: pyi_deps.discard(self.getFullName().getPackageName()) self.used_modules = tuple((pyi_dep, None) for pyi_dep in pyi_deps) else: self.used_modules = () ",Read the .pyi file if present and scan for dependencies.,10,244,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _readPyPIFile(self): # Complex stuff, pylint: disable=too-many-branches,too-many-statements if self.used_modules is None: pyi_filename = self.getPyIFilename() if os.path.exists(pyi_filename): pyi_deps = OrderedSet() # Flag signalling multiline import handling in_import = False in_import_part = """" for line in getFileContentByLine(pyi_filename): line = line.strip() if not in_import: if line.startswith(""import ""): imported = line[7:] pyi_deps.add(imported) elif line.startswith(""from ""): parts = line.split(None, 3) assert parts[0] == ""from"" assert parts[2] == ""import"" origin_name = parts[1] if origin_name == ""typing"": continue if origin_name == ""."": origin_name = self.getFullName() else: dot_count = 0 while origin_name.startswith("".""): origin_name = origin_name[1:] dot_count += 1 if dot_count > 0: if origin_name: origin_name = ( self.getFullName() .getRelativePackageName(level=dot_count + 1) .getChildNamed(origin_name) ) else: origin_name = ( self.getFullName().getRelativePackageName( level=dot_count + 1 ) ) if origin_name != self.getFullName(): pyi_deps.add(origin_name) imported = parts[3] if imported.startswith(""(""): # Handle multiline imports if not imported.endswith("")""): in_import = True imported = imported[1:] in_import_part = origin_name assert in_import_part, ( ""Multiline part in file %s cannot be empty"" % pyi_filename ) else: in_import = False imported = imported[1:-1] assert imported if imported == ""*"": continue for name in imported.split("",""): if name: name = name.strip() pyi_deps.add(origin_name + ""."" + name) else: # In import imported = line if imported.endswith("")""): imported = imported[0:-1] in_import = False for name in imported.split("",""): name = name.strip() if name: pyi_deps.add(in_import_part + ""."" + name) if ""typing"" in pyi_deps: pyi_deps.discard(""typing"") if ""__future__"" in pyi_deps: pyi_deps.discard(""__future__"") if self.getFullName() in pyi_deps: pyi_deps.discard(self.getFullName()) if self.getFullName().getPackageName() in pyi_deps: pyi_deps.discard(self.getFullName().getPackageName()) self.used_modules = tuple((pyi_dep, None) for pyi_dep in pyi_deps) else: self.used_modules = () ``` ###Assistant : Read the .pyi file if present and scan for dependencies. " 249,"def topological_sort(self): result = [] # Make a shallow copy of the adjacency list alist = {} for k, v in self.adjacency_list.items(): alist[k] = v[:] while True: # See what we can remove in this run to_remove = [] for k, v in list(alist.items())[:]: if not v: to_remove.append(k) del alist[k] if not to_remove: # What's left in alist (if anything) is a cycle. break # Remove from the adjacency list of others for k, v in alist.items(): alist[k] = [(d, r) for d, r in v if d not in to_remove] logger.debug('Moving to result: %s', ['%s (%s)' % (d.name, d.version) for d in to_remove]) result.extend(to_remove) return result, list(alist.keys()) "," Perform a topological sort of the graph. :return: A tuple, the first element of which is a topologically sorted list of distributions, and the second element of which is a list of distributions that cannot be sorted because they have circular dependencies and so form a cycle. ",47,108,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def topological_sort(self): result = [] # Make a shallow copy of the adjacency list alist = {} for k, v in self.adjacency_list.items(): alist[k] = v[:] while True: # See what we can remove in this run to_remove = [] for k, v in list(alist.items())[:]: if not v: to_remove.append(k) del alist[k] if not to_remove: # What's left in alist (if anything) is a cycle. break # Remove from the adjacency list of others for k, v in alist.items(): alist[k] = [(d, r) for d, r in v if d not in to_remove] logger.debug('Moving to result: %s', ['%s (%s)' % (d.name, d.version) for d in to_remove]) result.extend(to_remove) return result, list(alist.keys()) ``` ###Assistant : Perform a topological sort of the graph. :return: A tuple, the first element of which is a topologically sorted list of distributions, and the second element of which is a list of distributions that cannot be sorted because they have circular dependencies and so form a cycle. " 250,"def soft_reset(self) -> None: self.length = 0 self.episode_id = random.randrange(int(2e9)) self.total_reward = 0.0 self.agent_rewards = defaultdict(float) self._agent_reward_history = defaultdict(list) ","Clears rewards and metrics, but retains RNN and other state. This is used to carry state across multiple logical episodes in the same env (i.e., if `soft_horizon` is set). ",29,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def soft_reset(self) -> None: self.length = 0 self.episode_id = random.randrange(int(2e9)) self.total_reward = 0.0 self.agent_rewards = defaultdict(float) self._agent_reward_history = defaultdict(list) ``` ###Assistant : Clears rewards and metrics, but retains RNN and other state. This is used to carry state across multiple logical episodes in the same env (i.e., if `soft_horizon` is set). " 251,"def get_ordering_field(self, field_name): try: field = self.opts.get_field(field_name) return field.name except FieldDoesNotExist: # See whether field_name is a name of a non-field # that allows sorting. if callable(field_name): attr = field_name elif hasattr(self.model_admin, field_name): attr = getattr(self.model_admin, field_name) else: attr = getattr(self.model, field_name) return getattr(attr, ""admin_order_field"", None) "," Returns the proper model field name corresponding to the given field_name to use for ordering. field_name may either be the name of a proper model field or the name of a method (on the admin or model) or a callable with the 'admin_order_field' attribute. Returns None if no proper model field name can be matched. ",55,46,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_ordering_field(self, field_name): try: field = self.opts.get_field(field_name) return field.name except FieldDoesNotExist: # See whether field_name is a name of a non-field # that allows sorting. if callable(field_name): attr = field_name elif hasattr(self.model_admin, field_name): attr = getattr(self.model_admin, field_name) else: attr = getattr(self.model, field_name) return getattr(attr, ""admin_order_field"", None) ``` ###Assistant : Returns the proper model field name corresponding to the given field_name to use for ordering. field_name may either be the name of a proper model field or the name of a method (on the admin or model) or a callable with the 'admin_order_field' attribute. Returns None if no proper model field name can be matched. " 252,"def _check_ordering(self, obj): # ordering = None if obj.ordering is None: # The default value is None return [] elif not isinstance(obj.ordering, (list, tuple)): return must_be( ""a list or tuple"", option=""ordering"", obj=obj, id=""admin.E031"" ) else: return list( chain.from_iterable( self._check_ordering_item(obj, field_name, ""ordering[%d]"" % index) for index, field_name in enumerate(obj.ordering) ) ) ",Check that ordering refers to existing fields or is random.,10,50,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_ordering(self, obj): # ordering = None if obj.ordering is None: # The default value is None return [] elif not isinstance(obj.ordering, (list, tuple)): return must_be( ""a list or tuple"", option=""ordering"", obj=obj, id=""admin.E031"" ) else: return list( chain.from_iterable( self._check_ordering_item(obj, field_name, ""ordering[%d]"" % index) for index, field_name in enumerate(obj.ordering) ) ) ``` ###Assistant : Check that ordering refers to existing fields or is random. " 253,"def parsing_hooks(cls) -> Tuple[Type[""Block""], Type[""Sentence""], Type[""Statements""]]: return Block, Sentence, Statements ","Returns object types that this class should be able to `parse` recusrively. The order of the objects indicates the order in which the parser should try to parse each subitem. :returns: A list of Parsable classes. :rtype list: ",38,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parsing_hooks(cls) -> Tuple[Type[""Block""], Type[""Sentence""], Type[""Statements""]]: return Block, Sentence, Statements ``` ###Assistant : Returns object types that this class should be able to `parse` recusrively. The order of the objects indicates the order in which the parser should try to parse each subitem. :returns: A list of Parsable classes. :rtype list: " 254,"def _cast_inplace(terms, acceptable_dtypes, dtype) -> None: dt = np.dtype(dtype) for term in terms: if term.type in acceptable_dtypes: continue try: new_value = term.value.astype(dt) except AttributeError: new_value = dt.type(term.value) term.update(new_value) "," Cast an expression inplace. Parameters ---------- terms : Op The expression that should cast. acceptable_dtypes : list of acceptable numpy.dtype Will not cast if term's dtype in this list. dtype : str or numpy.dtype The dtype to cast to. ",39,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _cast_inplace(terms, acceptable_dtypes, dtype) -> None: dt = np.dtype(dtype) for term in terms: if term.type in acceptable_dtypes: continue try: new_value = term.value.astype(dt) except AttributeError: new_value = dt.type(term.value) term.update(new_value) ``` ###Assistant : Cast an expression inplace. Parameters ---------- terms : Op The expression that should cast. acceptable_dtypes : list of acceptable numpy.dtype Will not cast if term's dtype in this list. dtype : str or numpy.dtype The dtype to cast to. " 255,"def _deployment_created(self, external_id, request): payload = request.data[""payload""] vercel_project_id = ( payload[""projectId""] if payload.get(""projectId"") else payload[""project""][""id""] ) # Only create releases for production deploys for now if payload[""target""] != ""production"": logger.info( f""Ignoring deployment for environment: {payload['target']}"", extra={""external_id"": external_id, ""vercel_project_id"": vercel_project_id}, ) return self.respond(status=204) logging_params = {""external_id"": external_id, ""vercel_project_id"": vercel_project_id} org_integrations = OrganizationIntegration.objects.select_related(""organization"").filter( integration__external_id=external_id, integration__provider=self.provider ) if not org_integrations: logger.info(""Integration not found"", extra=logging_params) return self.respond({""detail"": ""Integration not found""}, status=404) # for each org integration, search the configs to find one that matches the vercel project of the webhook for org_integration in org_integrations: project_mappings = org_integration.config.get(""project_mappings"") or [] matched_mappings = list(filter(lambda x: x[1] == vercel_project_id, project_mappings)) if matched_mappings: organization = org_integration.organization sentry_project_id = matched_mappings[0][0] logging_params[""organization_id""] = organization.id logging_params[""project_id""] = sentry_project_id try: release_payload, token = get_payload_and_token( payload, organization.id, sentry_project_id ) except Project.DoesNotExist: logger.info(""Project not found"", extra=logging_params) return self.respond({""detail"": ""Project not found""}, status=404) except SentryAppInstallationForProvider.DoesNotExist: logger.info(""Installation not found"", extra=logging_params) return self.respond({""detail"": ""Installation not found""}, status=404) except SentryAppInstallationToken.DoesNotExist: logger.info(""Token not found"", extra=logging_params) return self.respond({""detail"": ""Token not found""}, status=404) except NoCommitFoundError: logger.info(""No commit found"", extra=logging_params) return self.respond({""detail"": ""No commit found""}, status=404) except MissingRepositoryError: logger.info(""Could not determine repository"", extra=logging_params) return self.respond({""detail"": ""Could not determine repository""}, status=400) url = absolute_uri(f""/api/0/organizations/{organization.slug}/releases/"") headers = { ""Accept"": ""application/json"", ""Authorization"": f""Bearer {token}"", ""User-Agent"": f""sentry_vercel/{VERSION}"", } json_error = None # create the basic release payload without refs no_ref_payload = release_payload.copy() del no_ref_payload[""refs""] with http.build_session() as session: try: resp = session.post(url, json=no_ref_payload, headers=headers) json_error = safe_json_parse(resp) resp.raise_for_status() except RequestException as e: # errors here should be uncommon but we should be aware of them logger.error( f""Error creating release: {e} - {json_error}"", extra=logging_params, exc_info=True, ) # 400 probably isn't the right status code but oh well return self.respond({""detail"": f""Error creating release: {e}""}, status=400) # set the refs try: resp = session.post( url, json=release_payload, headers=headers, ) json_error = safe_json_parse(resp) resp.raise_for_status() except RequestException as e: # errors will probably be common if the user doesn't have repos set up logger.info( f""Error setting refs: {e} - {json_error}"", extra=logging_params, exc_info=True, ) # 400 probably isn't the right status code but oh well return self.respond({""detail"": f""Error setting refs: {e}""}, status=400) # we are going to quit after the first project match as there shouldn't be multiple matches return self.respond(status=201) return self.respond(status=204) "," Steps: 1. Find all org integrations that match the external id 2. Search the configs to find one that matches the vercel project of the webhook 3. Look up the Sentry project that matches 4. Look up the connected internal integration 5. Find the token associated with that installation 6. Determine the commit sha and repo based on what provider is used 7. Create the release using the token WITHOUT refs 8. Update the release with refs ",77,360,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _deployment_created(self, external_id, request): payload = request.data[""payload""] vercel_project_id = ( payload[""projectId""] if payload.get(""projectId"") else payload[""project""][""id""] ) # Only create releases for production deploys for now if payload[""target""] != ""production"": logger.info( f""Ignoring deployment for environment: {payload['target']}"", extra={""external_id"": external_id, ""vercel_project_id"": vercel_project_id}, ) return self.respond(status=204) logging_params = {""external_id"": external_id, ""vercel_project_id"": vercel_project_id} org_integrations = OrganizationIntegration.objects.select_related(""organization"").filter( integration__external_id=external_id, integration__provider=self.provider ) if not org_integrations: logger.info(""Integration not found"", extra=logging_params) return self.respond({""detail"": ""Integration not found""}, status=404) # for each org integration, search the configs to find one that matches the vercel project of the webhook for org_integration in org_integrations: project_mappings = org_integration.config.get(""project_mappings"") or [] matched_mappings = list(filter(lambda x: x[1] == vercel_project_id, project_mappings)) if matched_mappings: organization = org_integration.organization sentry_project_id = matched_mappings[0][0] logging_params[""organization_id""] = organization.id logging_params[""project_id""] = sentry_project_id try: release_payload, token = get_payload_and_token( payload, organization.id, sentry_project_id ) except Project.DoesNotExist: logger.info(""Project not found"", extra=logging_params) return self.respond({""detail"": ""Project not found""}, status=404) except SentryAppInstallationForProvider.DoesNotExist: logger.info(""Installation not found"", extra=logging_params) return self.respond({""detail"": ""Installation not found""}, status=404) except SentryAppInstallationToken.DoesNotExist: logger.info(""Token not found"", extra=logging_params) return self.respond({""detail"": ""Token not found""}, status=404) except NoCommitFoundError: logger.info(""No commit found"", extra=logging_params) return self.respond({""detail"": ""No commit found""}, status=404) except MissingRepositoryError: logger.info(""Could not determine repository"", extra=logging_params) return self.respond({""detail"": ""Could not determine repository""}, status=400) url = absolute_uri(f""/api/0/organizations/{organization.slug}/releases/"") headers = { ""Accept"": ""application/json"", ""Authorization"": f""Bearer {token}"", ""User-Agent"": f""sentry_vercel/{VERSION}"", } json_error = None # create the basic release payload without refs no_ref_payload = release_payload.copy() del no_ref_payload[""refs""] with http.build_session() as session: try: resp = session.post(url, json=no_ref_payload, headers=headers) json_error = safe_json_parse(resp) resp.raise_for_status() except RequestException as e: # errors here should be uncommon but we should be aware of them logger.error( f""Error creating release: {e} - {json_error}"", extra=logging_params, exc_info=True, ) # 400 probably isn't the right status code but oh well return self.respond({""detail"": f""Error creating release: {e}""}, status=400) # set the refs try: resp = session.post( url, json=release_payload, headers=headers, ) json_error = safe_json_parse(resp) resp.raise_for_status() except RequestException as e: # errors will probably be common if the user doesn't have repos set up logger.info( f""Error setting refs: {e} - {json_error}"", extra=logging_params, exc_info=True, ) # 400 probably isn't the right status code but oh well return self.respond({""detail"": f""Error setting refs: {e}""}, status=400) # we are going to quit after the first project match as there shouldn't be multiple matches return self.respond(status=201) return self.respond(status=204) ``` ###Assistant : Steps: 1. Find all org integrations that match the external id 2. Search the configs to find one that matches the vercel project of the webhook 3. Look up the Sentry project that matches 4. Look up the connected internal integration 5. Find the token associated with that installation 6. Determine the commit sha and repo based on what provider is used 7. Create the release using the token WITHOUT refs 8. Update the release with refs " 256,"def _app_user(self) -> User | None: return self.user if isinstance(self.user, User) else None ","The user, if they are represented persistently in our app.",10,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _app_user(self) -> User | None: return self.user if isinstance(self.user, User) else None ``` ###Assistant : The user, if they are represented persistently in our app. " 257,"def new_locator(self, nx, nx1=None): return AxesLocator(self, nx, 0, nx1 if nx1 is not None else nx + 1, 1) "," Create a new `.AxesLocator` for the specified cell. Parameters ---------- nx, nx1 : int Integers specifying the column-position of the cell. When *nx1* is None, a single *nx*-th column is specified. Otherwise, location of columns spanning between *nx* to *nx1* (but excluding *nx1*-th column) is specified. ",46,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def new_locator(self, nx, nx1=None): return AxesLocator(self, nx, 0, nx1 if nx1 is not None else nx + 1, 1) ``` ###Assistant : Create a new `.AxesLocator` for the specified cell. Parameters ---------- nx, nx1 : int Integers specifying the column-position of the cell. When *nx1* is None, a single *nx*-th column is specified. Otherwise, location of columns spanning between *nx* to *nx1* (but excluding *nx1*-th column) is specified. " 258,"def test_edit_cases(self) -> None: self.login(""hamlet"") hamlet = self.example_user(""hamlet"") msg_id = self.send_stream_message( self.example_user(""hamlet""), ""Denmark"", topic_name=""topic 1"", content=""content 1"" ) result = self.client_patch( f""/json/messages/{msg_id}"", { ""message_id"": msg_id, ""content"": ""content 2"", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][""prev_content""], ""content 1"") self.assertEqual(history[0][""user_id""], hamlet.id) self.assertEqual( set(history[0].keys()), { ""timestamp"", ""prev_content"", ""user_id"", ""prev_rendered_content"", ""prev_rendered_content_version"", }, ) result = self.client_patch( f""/json/messages/{msg_id}"", { ""message_id"": msg_id, ""topic"": ""topic 2"", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], ""topic 1"") self.assertEqual(history[0][""user_id""], hamlet.id) self.assertEqual(set(history[0].keys()), {""timestamp"", LEGACY_PREV_TOPIC, ""user_id""}) result = self.client_patch( f""/json/messages/{msg_id}"", { ""message_id"": msg_id, ""content"": ""content 3"", ""topic"": ""topic 3"", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][""prev_content""], ""content 2"") self.assertEqual(history[0][LEGACY_PREV_TOPIC], ""topic 2"") self.assertEqual(history[0][""user_id""], hamlet.id) self.assertEqual( set(history[0].keys()), { ""timestamp"", LEGACY_PREV_TOPIC, ""prev_content"", ""user_id"", ""prev_rendered_content"", ""prev_rendered_content_version"", }, ) result = self.client_patch( f""/json/messages/{msg_id}"", { ""message_id"": msg_id, ""content"": ""content 4"", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][""prev_content""], ""content 3"") self.assertEqual(history[0][""user_id""], hamlet.id) self.login(""iago"") result = self.client_patch( f""/json/messages/{msg_id}"", { ""message_id"": msg_id, ""topic"": ""topic 4"", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], ""topic 3"") self.assertEqual(history[0][""user_id""], self.example_user(""iago"").id) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], ""topic 3"") self.assertEqual(history[2][LEGACY_PREV_TOPIC], ""topic 2"") self.assertEqual(history[3][LEGACY_PREV_TOPIC], ""topic 1"") self.assertEqual(history[1][""prev_content""], ""content 3"") self.assertEqual(history[2][""prev_content""], ""content 2"") self.assertEqual(history[4][""prev_content""], ""content 1"") # Now, we verify that the edit history data sent back has the # correct filled-out fields message_edit_history = self.client_get(f""/json/messages/{msg_id}/history"") json_response = orjson.loads(message_edit_history.content) # We reverse the message history view output so that the IDs line up with the above. message_history = list(reversed(json_response[""message_history""])) i = 0 for entry in message_history: expected_entries = {""content"", ""rendered_content"", ""topic"", ""timestamp"", ""user_id""} if i in {0, 2, 3}: expected_entries.add(""prev_topic"") if i in {1, 2, 4}: expected_entries.add(""prev_content"") expected_entries.add(""prev_rendered_content"") expected_entries.add(""content_html_diff"") i += 1 self.assertEqual(expected_entries, set(entry.keys())) self.assert_length(message_history, 6) self.assertEqual(message_history[0][""prev_topic""], ""topic 3"") self.assertEqual(message_history[0][""topic""], ""topic 4"") self.assertEqual(message_history[1][""topic""], ""topic 3"") self.assertEqual(message_history[2][""topic""], ""topic 3"") self.assertEqual(message_history[2][""prev_topic""], ""topic 2"") self.assertEqual(message_history[3][""topic""], ""topic 2"") self.assertEqual(message_history[3][""prev_topic""], ""topic 1"") self.assertEqual(message_history[4][""topic""], ""topic 1"") self.assertEqual(message_history[0][""content""], ""content 4"") self.assertEqual(message_history[1][""content""], ""content 4"") self.assertEqual(message_history[1][""prev_content""], ""content 3"") self.assertEqual(message_history[2][""content""], ""content 3"") self.assertEqual(message_history[2][""prev_content""], ""content 2"") self.assertEqual(message_history[3][""content""], ""content 2"") self.assertEqual(message_history[4][""content""], ""content 2"") self.assertEqual(message_history[4][""prev_content""], ""content 1"") self.assertEqual(message_history[5][""content""], ""content 1"") self.assertEqual(message_history[5][""topic""], ""topic 1"") ","This test verifies the accuracy of construction of Zulip's edit history data structures.",13,310,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_edit_cases(self) -> None: self.login(""hamlet"") hamlet = self.example_user(""hamlet"") msg_id = self.send_stream_message( self.example_user(""hamlet""), ""Denmark"", topic_name=""topic 1"", content=""content 1"" ) result = self.client_patch( f""/json/messages/{msg_id}"", { ""message_id"": msg_id, ""content"": ""content 2"", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][""prev_content""], ""content 1"") self.assertEqual(history[0][""user_id""], hamlet.id) self.assertEqual( set(history[0].keys()), { ""timestamp"", ""prev_content"", ""user_id"", ""prev_rendered_content"", ""prev_rendered_content_version"", }, ) result = self.client_patch( f""/json/messages/{msg_id}"", { ""message_id"": msg_id, ""topic"": ""topic 2"", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], ""topic 1"") self.assertEqual(history[0][""user_id""], hamlet.id) self.assertEqual(set(history[0].keys()), {""timestamp"", LEGACY_PREV_TOPIC, ""user_id""}) result = self.client_patch( f""/json/messages/{msg_id}"", { ""message_id"": msg_id, ""content"": ""content 3"", ""topic"": ""topic 3"", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][""prev_content""], ""content 2"") self.assertEqual(history[0][LEGACY_PREV_TOPIC], ""topic 2"") self.assertEqual(history[0][""user_id""], hamlet.id) self.assertEqual( set(history[0].keys()), { ""timestamp"", LEGACY_PREV_TOPIC, ""prev_content"", ""user_id"", ""prev_rendered_content"", ""prev_rendered_content_version"", }, ) result = self.client_patch( f""/json/messages/{msg_id}"", { ""message_id"": msg_id, ""content"": ""content 4"", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][""prev_content""], ""content 3"") self.assertEqual(history[0][""user_id""], hamlet.id) self.login(""iago"") result = self.client_patch( f""/json/messages/{msg_id}"", { ""message_id"": msg_id, ""topic"": ""topic 4"", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], ""topic 3"") self.assertEqual(history[0][""user_id""], self.example_user(""iago"").id) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], ""topic 3"") self.assertEqual(history[2][LEGACY_PREV_TOPIC], ""topic 2"") self.assertEqual(history[3][LEGACY_PREV_TOPIC], ""topic 1"") self.assertEqual(history[1][""prev_content""], ""content 3"") self.assertEqual(history[2][""prev_content""], ""content 2"") self.assertEqual(history[4][""prev_content""], ""content 1"") # Now, we verify that the edit history data sent back has the # correct filled-out fields message_edit_history = self.client_get(f""/json/messages/{msg_id}/history"") json_response = orjson.loads(message_edit_history.content) # We reverse the message history view output so that the IDs line up with the above. message_history = list(reversed(json_response[""message_history""])) i = 0 for entry in message_history: expected_entries = {""content"", ""rendered_content"", ""topic"", ""timestamp"", ""user_id""} if i in {0, 2, 3}: expected_entries.add(""prev_topic"") if i in {1, 2, 4}: expected_entries.add(""prev_content"") expected_entries.add(""prev_rendered_content"") expected_entries.add(""content_html_diff"") i += 1 self.assertEqual(expected_entries, set(entry.keys())) self.assert_length(message_history, 6) self.assertEqual(message_history[0][""prev_topic""], ""topic 3"") self.assertEqual(message_history[0][""topic""], ""topic 4"") self.assertEqual(message_history[1][""topic""], ""topic 3"") self.assertEqual(message_history[2][""topic""], ""topic 3"") self.assertEqual(message_history[2][""prev_topic""], ""topic 2"") self.assertEqual(message_history[3][""topic""], ""topic 2"") self.assertEqual(message_history[3][""prev_topic""], ""topic 1"") self.assertEqual(message_history[4][""topic""], ""topic 1"") self.assertEqual(message_history[0][""content""], ""content 4"") self.assertEqual(message_history[1][""content""], ""content 4"") self.assertEqual(message_history[1][""prev_content""], ""content 3"") self.assertEqual(message_history[2][""content""], ""content 3"") self.assertEqual(message_history[2][""prev_content""], ""content 2"") self.assertEqual(message_history[3][""content""], ""content 2"") self.assertEqual(message_history[4][""content""], ""content 2"") self.assertEqual(message_history[4][""prev_content""], ""content 1"") self.assertEqual(message_history[5][""content""], ""content 1"") self.assertEqual(message_history[5][""topic""], ""topic 1"") ``` ###Assistant : This test verifies the accuracy of construction of Zulip's edit history data structures. " 259,"def get_project_name(doctype, txt, searchfield, start, page_len, filters): doctype = ""Project"" cond = """" if filters and filters.get(""customer""): cond = % ( frappe.db.escape(filters.get(""customer"")) ) fields = get_fields(doctype, [""name"", ""project_name""]) searchfields = frappe.get_meta(doctype).get_search_fields() searchfields = "" or "".join([""`tabProject`."" + field + "" like %(txt)s"" for field in searchfields]) return frappe.db.sql( .format( fields="", "".join([""`tabProject`.{0}"".format(f) for f in fields]), cond=cond, scond=searchfields, match_cond=get_match_cond(doctype), start=start, page_len=page_len, ), {""txt"": ""%{0}%"".format(txt), ""_txt"": txt.replace(""%"", """")}, ) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs","(`tabProject`.customer = %s or ifnull(`tabProject`.customer,"""")="""") andselect {fields} from `tabProject` where `tabProject`.status not in ('Completed', 'Cancelled') and {cond} {scond} {match_cond} order by (case when locate(%(_txt)s, `tabProject`.name) > 0 then locate(%(_txt)s, `tabProject`.name) else 99999 end), `tabProject`.idx desc, `tabProject`.name asc limit {page_len} offset {start}",41,69,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_project_name(doctype, txt, searchfield, start, page_len, filters): doctype = ""Project"" cond = """" if filters and filters.get(""customer""): cond = % ( frappe.db.escape(filters.get(""customer"")) ) fields = get_fields(doctype, [""name"", ""project_name""]) searchfields = frappe.get_meta(doctype).get_search_fields() searchfields = "" or "".join([""`tabProject`."" + field + "" like %(txt)s"" for field in searchfields]) return frappe.db.sql( .format( fields="", "".join([""`tabProject`.{0}"".format(f) for f in fields]), cond=cond, scond=searchfields, match_cond=get_match_cond(doctype), start=start, page_len=page_len, ), {""txt"": ""%{0}%"".format(txt), ""_txt"": txt.replace(""%"", """")}, ) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs ``` ###Assistant : (`tabProject`.customer = %s or ifnull(`tabProject`.customer,"""")="""") andselect {fields} from `tabProject` where `tabProject`.status not in ('Completed', 'Cancelled') and {cond} {scond} {match_cond} order by (case when locate(%(_txt)s, `tabProject`.name) > 0 then locate(%(_txt)s, `tabProject`.name) else 99999 end), `tabProject`.idx desc, `tabProject`.name asc limit {page_len} offset {start} " 260,"def test_autosuggest_at_EOL(text, cursor, suggestion, called): event = make_event(text, cursor, suggestion) event.current_buffer.insert_text = Mock() _apply_autosuggest(event) if called: event.current_buffer.insert_text.assert_called() else: event.current_buffer.insert_text.assert_not_called() # event.current_buffer.document.get_end_of_line_position.assert_called() "," test that autosuggest is only applied at end of line. ",10,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_autosuggest_at_EOL(text, cursor, suggestion, called): event = make_event(text, cursor, suggestion) event.current_buffer.insert_text = Mock() _apply_autosuggest(event) if called: event.current_buffer.insert_text.assert_called() else: event.current_buffer.insert_text.assert_not_called() # event.current_buffer.document.get_end_of_line_position.assert_called() ``` ###Assistant : test that autosuggest is only applied at end of line. " 261,"def stack3(x, filters, blocks, stride1=2, groups=32, name=None): x = block3(x, filters, stride=stride1, groups=groups, name=name + ""_block1"") for i in range(2, blocks + 1): x = block3( x, filters, groups=groups, conv_shortcut=False, name=name + ""_block"" + str(i), ) return x @keras_export( ""keras.applications.resnet50.ResNet50"", ""keras.applications.resnet.ResNet50"", ""keras.applications.ResNet50"", )","A set of stacked residual blocks. Args: x: input tensor. filters: integer, filters of the bottleneck layer in a block. blocks: integer, blocks in the stacked blocks. stride1: default 2, stride of the first layer in the first block. groups: default 32, group size for grouped convolution. name: string, stack label. Returns: Output tensor for the stacked blocks. ",58,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def stack3(x, filters, blocks, stride1=2, groups=32, name=None): x = block3(x, filters, stride=stride1, groups=groups, name=name + ""_block1"") for i in range(2, blocks + 1): x = block3( x, filters, groups=groups, conv_shortcut=False, name=name + ""_block"" + str(i), ) return x @keras_export( ""keras.applications.resnet50.ResNet50"", ""keras.applications.resnet.ResNet50"", ""keras.applications.ResNet50"", ) ``` ###Assistant : A set of stacked residual blocks. Args: x: input tensor. filters: integer, filters of the bottleneck layer in a block. blocks: integer, blocks in the stacked blocks. stride1: default 2, stride of the first layer in the first block. groups: default 32, group size for grouped convolution. name: string, stack label. Returns: Output tensor for the stacked blocks. " 262,"def is_mixed(self) -> bool: warnings.warn( ""Index.is_mixed is deprecated and will be removed in a future version. "" ""Check index.inferred_type directly instead."", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) return self.inferred_type in [""mixed""] "," Check if the Index holds data with mixed data types. Returns ------- bool Whether or not the Index holds data with mixed data types. See Also -------- is_boolean : Check if the Index only consists of booleans. is_integer : Check if the Index only consists of integers. is_floating : Check if the Index is a floating type. is_numeric : Check if the Index only consists of numeric data. is_object : Check if the Index is of the object dtype. is_categorical : Check if the Index holds categorical data. is_interval : Check if the Index holds Interval objects. Examples -------- >>> idx = pd.Index(['a', np.nan, 'b']) >>> idx.is_mixed() True >>> idx = pd.Index([1.0, 2.0, 3.0, 5.0]) >>> idx.is_mixed() False ",118,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_mixed(self) -> bool: warnings.warn( ""Index.is_mixed is deprecated and will be removed in a future version. "" ""Check index.inferred_type directly instead."", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) return self.inferred_type in [""mixed""] ``` ###Assistant : Check if the Index holds data with mixed data types. Returns ------- bool Whether or not the Index holds data with mixed data types. See Also -------- is_boolean : Check if the Index only consists of booleans. is_integer : Check if the Index only consists of integers. is_floating : Check if the Index is a floating type. is_numeric : Check if the Index only consists of numeric data. is_object : Check if the Index is of the object dtype. is_categorical : Check if the Index holds categorical data. is_interval : Check if the Index holds Interval objects. Examples -------- >>> idx = pd.Index(['a', np.nan, 'b']) >>> idx.is_mixed() True >>> idx = pd.Index([1.0, 2.0, 3.0, 5.0]) >>> idx.is_mixed() False " 263,"def responder(request): # Find an available port with socket.socket() as sock: sock.bind((""localhost"", 0)) port = sock.getsockname()[1] server_process = multiprocessing.Process( target=process_server, args=(request.param, port) ) server_process.start() yield port server_process.join(10) server_process.terminate() kill_time = 5 wait_time = 0 while server_process.is_alive(): if wait_time > kill_time: server_process.kill() break else: wait_time += 0.1 time.sleep(0.1) server_process.close() @pytest.mark.parametrize( ""responder, read_method, parquet_engine"", [ (CSVUserAgentResponder, pd.read_csv, None), (JSONUserAgentResponder, pd.read_json, None), (ParquetPyArrowUserAgentResponder, pd.read_parquet, ""pyarrow""), pytest.param( ParquetFastParquetUserAgentResponder, pd.read_parquet, ""fastparquet"", # TODO(ArrayManager) fastparquet marks=[ td.skip_array_manager_not_yet_implemented, pytest.mark.xfail(PY310, reason=""fastparquet failing on 3.10""), ], ), (PickleUserAgentResponder, pd.read_pickle, None), (StataUserAgentResponder, pd.read_stata, None), (GzippedCSVUserAgentResponder, pd.read_csv, None), (GzippedJSONUserAgentResponder, pd.read_json, None), ], indirect=[""responder""], )"," Fixture that starts a local http server in a separate process on localhost and returns the port. Running in a separate process instead of a thread to allow termination/killing of http server upon cleanup. ",34,93,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def responder(request): # Find an available port with socket.socket() as sock: sock.bind((""localhost"", 0)) port = sock.getsockname()[1] server_process = multiprocessing.Process( target=process_server, args=(request.param, port) ) server_process.start() yield port server_process.join(10) server_process.terminate() kill_time = 5 wait_time = 0 while server_process.is_alive(): if wait_time > kill_time: server_process.kill() break else: wait_time += 0.1 time.sleep(0.1) server_process.close() @pytest.mark.parametrize( ""responder, read_method, parquet_engine"", [ (CSVUserAgentResponder, pd.read_csv, None), (JSONUserAgentResponder, pd.read_json, None), (ParquetPyArrowUserAgentResponder, pd.read_parquet, ""pyarrow""), pytest.param( ParquetFastParquetUserAgentResponder, pd.read_parquet, ""fastparquet"", # TODO(ArrayManager) fastparquet marks=[ td.skip_array_manager_not_yet_implemented, pytest.mark.xfail(PY310, reason=""fastparquet failing on 3.10""), ], ), (PickleUserAgentResponder, pd.read_pickle, None), (StataUserAgentResponder, pd.read_stata, None), (GzippedCSVUserAgentResponder, pd.read_csv, None), (GzippedJSONUserAgentResponder, pd.read_json, None), ], indirect=[""responder""], ) ``` ###Assistant : Fixture that starts a local http server in a separate process on localhost and returns the port. Running in a separate process instead of a thread to allow termination/killing of http server upon cleanup. " 264,"def _pre_setup(self): super()._pre_setup() if self.available_apps is not None: apps.set_available_apps(self.available_apps) setting_changed.send( sender=settings._wrapped.__class__, setting=""INSTALLED_APPS"", value=self.available_apps, enter=True, ) for db_name in self._databases_names(include_mirrors=False): emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name) try: self._fixture_setup() except Exception: if self.available_apps is not None: apps.unset_available_apps() setting_changed.send( sender=settings._wrapped.__class__, setting=""INSTALLED_APPS"", value=settings.INSTALLED_APPS, enter=False, ) raise # Clear the queries_log so that it's less likely to overflow (a single # test probably won't execute 9K queries). If queries_log overflows, # then assertNumQueries() doesn't work. for db_name in self._databases_names(include_mirrors=False): connections[db_name].queries_log.clear() "," Perform pre-test setup: * If the class has an 'available_apps' attribute, restrict the app registry to these applications, then fire the post_migrate signal -- it must run with the correct set of applications for the test case. * If the class has a 'fixtures' attribute, install those fixtures. ",48,72,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _pre_setup(self): super()._pre_setup() if self.available_apps is not None: apps.set_available_apps(self.available_apps) setting_changed.send( sender=settings._wrapped.__class__, setting=""INSTALLED_APPS"", value=self.available_apps, enter=True, ) for db_name in self._databases_names(include_mirrors=False): emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name) try: self._fixture_setup() except Exception: if self.available_apps is not None: apps.unset_available_apps() setting_changed.send( sender=settings._wrapped.__class__, setting=""INSTALLED_APPS"", value=settings.INSTALLED_APPS, enter=False, ) raise # Clear the queries_log so that it's less likely to overflow (a single # test probably won't execute 9K queries). If queries_log overflows, # then assertNumQueries() doesn't work. for db_name in self._databases_names(include_mirrors=False): connections[db_name].queries_log.clear() ``` ###Assistant : Perform pre-test setup: * If the class has an 'available_apps' attribute, restrict the app registry to these applications, then fire the post_migrate signal -- it must run with the correct set of applications for the test case. * If the class has a 'fixtures' attribute, install those fixtures. " 265,"def test_multiple_actions_form(self): action_data = { ACTION_CHECKBOX_NAME: [self.s1.pk], # Two different actions selected on the two forms... ""action"": [""external_mail"", ""delete_selected""], # ...but ""go"" was clicked on the top form. ""index"": 0, } self.client.post( reverse(""admin:admin_views_externalsubscriber_changelist""), action_data ) # The action sends mail rather than deletes. self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].subject, ""Greetings from a function action"") "," Actions come from the form whose submit button was pressed (#10618). ",11,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_multiple_actions_form(self): action_data = { ACTION_CHECKBOX_NAME: [self.s1.pk], # Two different actions selected on the two forms... ""action"": [""external_mail"", ""delete_selected""], # ...but ""go"" was clicked on the top form. ""index"": 0, } self.client.post( reverse(""admin:admin_views_externalsubscriber_changelist""), action_data ) # The action sends mail rather than deletes. self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].subject, ""Greetings from a function action"") ``` ###Assistant : Actions come from the form whose submit button was pressed (#10618). " 266,"def test_empty_message(self) -> None: self.login(""hamlet"") othello = self.example_user(""othello"") result = self.client_post( ""/json/messages"", {""type"": ""private"", ""content"": "" "", ""to"": othello.email}, ) self.assert_json_error(result, ""Message must not be empty"") "," Sending a message that is empty or only whitespace should fail ",11,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_empty_message(self) -> None: self.login(""hamlet"") othello = self.example_user(""othello"") result = self.client_post( ""/json/messages"", {""type"": ""private"", ""content"": "" "", ""to"": othello.email}, ) self.assert_json_error(result, ""Message must not be empty"") ``` ###Assistant : Sending a message that is empty or only whitespace should fail " 267,"def freqai_feature_engineering_generic(self, dataframe, **kwargs): dataframe[""%-pct-change""] = dataframe[""close""].pct_change() dataframe[""%-raw_volume""] = dataframe[""volume""] dataframe[""%-raw_price""] = dataframe[""close""] return dataframe "," This optional function will be called for all include_timeframes (including corr_pairs). After that, the features will be shifted by the number of candles in the include_shifted_candles. :param df: strategy dataframe which will receive the features dataframe[""%-pct-change""] = dataframe[""close""].pct_change() ",38,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def freqai_feature_engineering_generic(self, dataframe, **kwargs): dataframe[""%-pct-change""] = dataframe[""close""].pct_change() dataframe[""%-raw_volume""] = dataframe[""volume""] dataframe[""%-raw_price""] = dataframe[""close""] return dataframe ``` ###Assistant : This optional function will be called for all include_timeframes (including corr_pairs). After that, the features will be shifted by the number of candles in the include_shifted_candles. :param df: strategy dataframe which will receive the features dataframe[""%-pct-change""] = dataframe[""close""].pct_change() " 268,"def next_sample(uid): return next(_SHARED_SEQUENCES[uid]) @keras_export(""keras.utils.GeneratorEnqueuer"")","Gets the next value from the generator `uid`. To allow multiple generators to be used at the same time, we use `uid` to get a specific one. A single generator would cause the validation to overwrite the training generator. Args: uid: int, generator identifier Returns: The next value of generator `uid`. ",51,5,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def next_sample(uid): return next(_SHARED_SEQUENCES[uid]) @keras_export(""keras.utils.GeneratorEnqueuer"") ``` ###Assistant : Gets the next value from the generator `uid`. To allow multiple generators to be used at the same time, we use `uid` to get a specific one. A single generator would cause the validation to overwrite the training generator. Args: uid: int, generator identifier Returns: The next value of generator `uid`. " 269,"def check_settings(base_url=None): if base_url is None: base_url = settings.STATIC_URL if not base_url: raise ImproperlyConfigured( ""You're using the staticfiles app "" ""without having set the required STATIC_URL setting."") if settings.MEDIA_URL == base_url: raise ImproperlyConfigured( ""The MEDIA_URL and STATIC_URL settings must have different values"" ) if (settings.DEBUG and settings.MEDIA_URL and settings.STATIC_URL and settings.MEDIA_URL.startswith(settings.STATIC_URL)): raise ImproperlyConfigured( ""runserver can't serve media if MEDIA_URL is within STATIC_URL."" ) if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and (settings.MEDIA_ROOT == settings.STATIC_ROOT)): raise ImproperlyConfigured( ""The MEDIA_ROOT and STATIC_ROOT settings must have different values"" ) "," Check if the staticfiles settings have sane values. ",8,83,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_settings(base_url=None): if base_url is None: base_url = settings.STATIC_URL if not base_url: raise ImproperlyConfigured( ""You're using the staticfiles app "" ""without having set the required STATIC_URL setting."") if settings.MEDIA_URL == base_url: raise ImproperlyConfigured( ""The MEDIA_URL and STATIC_URL settings must have different values"" ) if (settings.DEBUG and settings.MEDIA_URL and settings.STATIC_URL and settings.MEDIA_URL.startswith(settings.STATIC_URL)): raise ImproperlyConfigured( ""runserver can't serve media if MEDIA_URL is within STATIC_URL."" ) if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and (settings.MEDIA_ROOT == settings.STATIC_ROOT)): raise ImproperlyConfigured( ""The MEDIA_ROOT and STATIC_ROOT settings must have different values"" ) ``` ###Assistant : Check if the staticfiles settings have sane values. " 270,"def kubernetes_dict(self, **kwargs) -> Dict: config = self.dict(**kwargs) for idx, deployment in enumerate(config[""deployments""]): if isinstance(deployment.get(""ray_actor_options""), dict): # JSON-serialize ray_actor_options' resources dictionary if isinstance(deployment[""ray_actor_options""].get(""resources""), dict): deployment[""ray_actor_options""][""resources""] = json.dumps( deployment[""ray_actor_options""][""resources""] ) # JSON-serialize ray_actor_options' runtime_env dictionary if isinstance(deployment[""ray_actor_options""].get(""runtime_env""), dict): deployment[""ray_actor_options""][""runtime_env""] = json.dumps( deployment[""ray_actor_options""][""runtime_env""] ) # Convert ray_actor_options' keys deployment[""ray_actor_options""] = dict_keys_snake_to_camel_case( deployment[""ray_actor_options""] ) # JSON-serialize user_config dictionary if isinstance(deployment.get(""user_config""), dict): deployment[""user_config""] = json.dumps(deployment[""user_config""]) # Convert deployment's keys config[""deployments""][idx] = dict_keys_snake_to_camel_case(deployment) # Convert top-level runtime_env if isinstance(config.get(""runtime_env""), dict): config[""runtime_env""] = json.dumps(config[""runtime_env""]) # Convert top-level option's keys config = dict_keys_snake_to_camel_case(config) return config @PublicAPI(stability=""beta"")","Returns dictionary in Kubernetes format. Dictionary can be yaml-dumped to a Serve config file directly and then copy-pasted into a RayService Kubernetes config. Args: all kwargs are passed directly into schema's dict() function. ",33,89,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def kubernetes_dict(self, **kwargs) -> Dict: config = self.dict(**kwargs) for idx, deployment in enumerate(config[""deployments""]): if isinstance(deployment.get(""ray_actor_options""), dict): # JSON-serialize ray_actor_options' resources dictionary if isinstance(deployment[""ray_actor_options""].get(""resources""), dict): deployment[""ray_actor_options""][""resources""] = json.dumps( deployment[""ray_actor_options""][""resources""] ) # JSON-serialize ray_actor_options' runtime_env dictionary if isinstance(deployment[""ray_actor_options""].get(""runtime_env""), dict): deployment[""ray_actor_options""][""runtime_env""] = json.dumps( deployment[""ray_actor_options""][""runtime_env""] ) # Convert ray_actor_options' keys deployment[""ray_actor_options""] = dict_keys_snake_to_camel_case( deployment[""ray_actor_options""] ) # JSON-serialize user_config dictionary if isinstance(deployment.get(""user_config""), dict): deployment[""user_config""] = json.dumps(deployment[""user_config""]) # Convert deployment's keys config[""deployments""][idx] = dict_keys_snake_to_camel_case(deployment) # Convert top-level runtime_env if isinstance(config.get(""runtime_env""), dict): config[""runtime_env""] = json.dumps(config[""runtime_env""]) # Convert top-level option's keys config = dict_keys_snake_to_camel_case(config) return config @PublicAPI(stability=""beta"") ``` ###Assistant : Returns dictionary in Kubernetes format. Dictionary can be yaml-dumped to a Serve config file directly and then copy-pasted into a RayService Kubernetes config. Args: all kwargs are passed directly into schema's dict() function. " 271,"def reload_images(self, group_method, img_list): logger.info(""Preparing to group..."") if group_method == 'group_blur': filename_list, image_list = self._get_images() blurs = [self.estimate_blur(img) for img in image_list] temp_list = list(zip(filename_list, blurs)) elif group_method == 'group_blur_fft': filename_list, image_list = self._get_images() fft_blurs = [self.estimate_blur_fft(img) for img in image_list] temp_list = list(zip(filename_list, fft_blurs)) elif group_method == 'group_face_cnn': filename_list, image_list, landmarks = self._get_landmarks() temp_list = list(zip(filename_list, landmarks)) elif group_method == 'group_face_yaw': filename_list, image_list, landmarks = self._get_landmarks() yaws = [self.calc_landmarks_face_yaw(mark) for mark in landmarks] temp_list = list(zip(filename_list, yaws)) elif group_method == 'group_hist': filename_list, image_list = self._get_images() histograms = [cv2.calcHist([img], [0], None, [256], [0, 256]) for img in image_list] temp_list = list(zip(filename_list, histograms)) elif group_method == 'group_black_pixels': filename_list, image_list = self._get_images() black_pixels = [np.ndarray.all(img == [0, 0, 0], axis=2).sum()/img.size*100*3 for img in image_list] temp_list = list(zip(filename_list, black_pixels)) else: raise ValueError(f""{group_method} group_method not found."") return self.splice_lists(img_list, temp_list) "," Reloads the image list by replacing the comparative values with those that the chosen grouping method expects. :param group_method: str name of the grouping method that will be used. :param img_list: image list that has been sorted by one of the sort methods. :return: img_list but with the comparative values that the chosen grouping method expects. ",56,135,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def reload_images(self, group_method, img_list): logger.info(""Preparing to group..."") if group_method == 'group_blur': filename_list, image_list = self._get_images() blurs = [self.estimate_blur(img) for img in image_list] temp_list = list(zip(filename_list, blurs)) elif group_method == 'group_blur_fft': filename_list, image_list = self._get_images() fft_blurs = [self.estimate_blur_fft(img) for img in image_list] temp_list = list(zip(filename_list, fft_blurs)) elif group_method == 'group_face_cnn': filename_list, image_list, landmarks = self._get_landmarks() temp_list = list(zip(filename_list, landmarks)) elif group_method == 'group_face_yaw': filename_list, image_list, landmarks = self._get_landmarks() yaws = [self.calc_landmarks_face_yaw(mark) for mark in landmarks] temp_list = list(zip(filename_list, yaws)) elif group_method == 'group_hist': filename_list, image_list = self._get_images() histograms = [cv2.calcHist([img], [0], None, [256], [0, 256]) for img in image_list] temp_list = list(zip(filename_list, histograms)) elif group_method == 'group_black_pixels': filename_list, image_list = self._get_images() black_pixels = [np.ndarray.all(img == [0, 0, 0], axis=2).sum()/img.size*100*3 for img in image_list] temp_list = list(zip(filename_list, black_pixels)) else: raise ValueError(f""{group_method} group_method not found."") return self.splice_lists(img_list, temp_list) ``` ###Assistant : Reloads the image list by replacing the comparative values with those that the chosen grouping method expects. :param group_method: str name of the grouping method that will be used. :param img_list: image list that has been sorted by one of the sort methods. :return: img_list but with the comparative values that the chosen grouping method expects. " 272,"def get_sales_orders(quotations): if not quotations: return [] quotation_names = [q.name for q in quotations] return frappe.db.sql( .format( "", "".join([""%s""] * len(quotation_names)) ), tuple(quotation_names), as_dict=1, ) # nosec "," SELECT so.`name`, so.`base_grand_total`, soi.prevdoc_docname as quotation FROM `tabSales Order` so, `tabSales Order Item` soi WHERE so.docstatus=1 AND so.name = soi.parent AND soi.prevdoc_docname in ({0}) ",24,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_sales_orders(quotations): if not quotations: return [] quotation_names = [q.name for q in quotations] return frappe.db.sql( .format( "", "".join([""%s""] * len(quotation_names)) ), tuple(quotation_names), as_dict=1, ) # nosec ``` ###Assistant : SELECT so.`name`, so.`base_grand_total`, soi.prevdoc_docname as quotation FROM `tabSales Order` so, `tabSales Order Item` soi WHERE so.docstatus=1 AND so.name = soi.parent AND soi.prevdoc_docname in ({0}) " 273,"def download_extract(name, folder=None): fname = download(name) base_dir = os.path.dirname(fname) data_dir, ext = os.path.splitext(fname) if ext == '.zip': fp = zipfile.ZipFile(fname, 'r') elif ext in ('.tar', '.gz'): fp = tarfile.open(fname, 'r') else: assert False, 'Only zip/tar files can be extracted.' fp.extractall(base_dir) return os.path.join(base_dir, folder) if folder else data_dir ","Download and extract a zip/tar file. Defined in :numref:`sec_utils`",9,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def download_extract(name, folder=None): fname = download(name) base_dir = os.path.dirname(fname) data_dir, ext = os.path.splitext(fname) if ext == '.zip': fp = zipfile.ZipFile(fname, 'r') elif ext in ('.tar', '.gz'): fp = tarfile.open(fname, 'r') else: assert False, 'Only zip/tar files can be extracted.' fp.extractall(base_dir) return os.path.join(base_dir, folder) if folder else data_dir ``` ###Assistant : Download and extract a zip/tar file. Defined in :numref:`sec_utils` " 274,"def sample_weights_mismatch(self): # If there is a mismatch between sample weight mode and the placeholders # created, then recompile the sub-graphs that depend on sample weights. return ( self.sample_weight_mode is not None and self.sample_weight is None ) or ( self.sample_weight_mode is None and self.sample_weight is not None ) ",Check if the sample weight and the mode match or not.,11,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def sample_weights_mismatch(self): # If there is a mismatch between sample weight mode and the placeholders # created, then recompile the sub-graphs that depend on sample weights. return ( self.sample_weight_mode is not None and self.sample_weight is None ) or ( self.sample_weight_mode is None and self.sample_weight is not None ) ``` ###Assistant : Check if the sample weight and the mode match or not. " 275,"def shebang(self, line, cell): # Create the event loop in which to run script magics # this operates on a background thread if self.event_loop is None: if sys.platform == ""win32"": # don't override the current policy, # just create an event loop event_loop = asyncio.WindowsProactorEventLoopPolicy().new_event_loop() else: event_loop = asyncio.new_event_loop() self.event_loop = event_loop # start the loop in a background thread asyncio_thread = Thread(target=event_loop.run_forever, daemon=True) asyncio_thread.start() else: event_loop = self.event_loop ","Run a cell via a shell command The `%%script` line is like the #! line of script, specifying a program (bash, perl, ruby, etc.) with which to run. The rest of the cell is run by that program. Examples -------- :: In [1]: %%script bash ...: for i in 1 2 3; do ...: echo $i ...: done 1 2 3 ",61,69,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def shebang(self, line, cell): # Create the event loop in which to run script magics # this operates on a background thread if self.event_loop is None: if sys.platform == ""win32"": # don't override the current policy, # just create an event loop event_loop = asyncio.WindowsProactorEventLoopPolicy().new_event_loop() else: event_loop = asyncio.new_event_loop() self.event_loop = event_loop # start the loop in a background thread asyncio_thread = Thread(target=event_loop.run_forever, daemon=True) asyncio_thread.start() else: event_loop = self.event_loop ``` ###Assistant : Run a cell via a shell command The `%%script` line is like the #! line of script, specifying a program (bash, perl, ruby, etc.) with which to run. The rest of the cell is run by that program. Examples -------- :: In [1]: %%script bash ...: for i in 1 2 3; do ...: echo $i ...: done 1 2 3 " 276,"def statistics(self): try: return self._local.statistics except AttributeError: self._local.statistics = {} return self._local.statistics ","Return a dictionary of runtime statistics. This dictionary will be empty when the controller has never been ran. When it is running or has ran previously it should have (but may not) have useful and/or informational keys and values when running is underway and/or completed. .. warning:: The keys in this dictionary **should** be some what stable (not changing), but there existence **may** change between major releases as new statistics are gathered or removed so before accessing keys ensure that they actually exist and handle when they do not. .. note:: The values in this dictionary are local to the thread running call (so if multiple threads share the same retrying object - either directly or indirectly) they will each have there own view of statistics they have collected (in the future we may provide a way to aggregate the various statistics from each thread). ",145,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def statistics(self): try: return self._local.statistics except AttributeError: self._local.statistics = {} return self._local.statistics ``` ###Assistant : Return a dictionary of runtime statistics. This dictionary will be empty when the controller has never been ran. When it is running or has ran previously it should have (but may not) have useful and/or informational keys and values when running is underway and/or completed. .. warning:: The keys in this dictionary **should** be some what stable (not changing), but there existence **may** change between major releases as new statistics are gathered or removed so before accessing keys ensure that they actually exist and handle when they do not. .. note:: The values in this dictionary are local to the thread running call (so if multiple threads share the same retrying object - either directly or indirectly) they will each have there own view of statistics they have collected (in the future we may provide a way to aggregate the various statistics from each thread). " 277,"def send(self, sender, **named): if ( not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS ): return [] return [ (receiver, receiver(signal=self, sender=sender, **named)) for receiver in self._live_receivers(sender) ] "," Send signal from sender to all connected receivers. If any receiver raises an error, the error propagates back through send, terminating the dispatch loop. So it's possible that all receivers won't be called if an error is raised. Arguments: sender The sender of the signal. Either a specific object or None. named Named arguments which will be passed to receivers. Return a list of tuple pairs [(receiver, response), ... ]. ",70,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def send(self, sender, **named): if ( not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS ): return [] return [ (receiver, receiver(signal=self, sender=sender, **named)) for receiver in self._live_receivers(sender) ] ``` ###Assistant : Send signal from sender to all connected receivers. If any receiver raises an error, the error propagates back through send, terminating the dispatch loop. So it's possible that all receivers won't be called if an error is raised. Arguments: sender The sender of the signal. Either a specific object or None. named Named arguments which will be passed to receivers. Return a list of tuple pairs [(receiver, response), ... ]. " 278,"def deploy_ray_func(func, *args): # pragma: no cover result = func(*args) ip = get_node_ip_address() if isinstance(result, pandas.DataFrame): return result, len(result), len(result.columns), ip elif all(isinstance(r, pandas.DataFrame) for r in result): return [i for r in result for i in [r, len(r), len(r.columns), ip]] else: return [i for r in result for i in [r, None, None, ip]] "," Execute a function on an axis partition in a worker process. Parameters ---------- func : callable Function to be executed on an axis partition. *args : iterable Additional arguments that need to passed in ``func``. Returns ------- list The result of the function ``func`` and metadata for it. Notes ----- Ray functions are not detected by codecov (thus pragma: no cover). ",61,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def deploy_ray_func(func, *args): # pragma: no cover result = func(*args) ip = get_node_ip_address() if isinstance(result, pandas.DataFrame): return result, len(result), len(result.columns), ip elif all(isinstance(r, pandas.DataFrame) for r in result): return [i for r in result for i in [r, len(r), len(r.columns), ip]] else: return [i for r in result for i in [r, None, None, ip]] ``` ###Assistant : Execute a function on an axis partition in a worker process. Parameters ---------- func : callable Function to be executed on an axis partition. *args : iterable Additional arguments that need to passed in ``func``. Returns ------- list The result of the function ``func`` and metadata for it. Notes ----- Ray functions are not detected by codecov (thus pragma: no cover). " 279,"def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, ViltEncoder): module.gradient_checkpointing = value VILT_START_DOCSTRING = r VILT_INPUTS_DOCSTRING = r VILT_IMAGES_AND_TEXT_CLASSIFICATION_INPUTS_DOCSTRING = r @add_start_docstrings( ""The bare ViLT Model transformer outputting raw hidden-states without any specific head on top."", VILT_START_DOCSTRING, )"," This model is a PyTorch `torch.nn.Module `_ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ViltConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`ViltFeatureExtractor`]. See [`ViltFeatureExtractor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*): Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `pixel_values` into patch embeddings. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_images, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`ViltFeatureExtractor`]. See [`ViltFeatureExtractor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, num_images, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*): Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `pixel_values` into patch embeddings. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. ",802,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, ViltEncoder): module.gradient_checkpointing = value VILT_START_DOCSTRING = r VILT_INPUTS_DOCSTRING = r VILT_IMAGES_AND_TEXT_CLASSIFICATION_INPUTS_DOCSTRING = r @add_start_docstrings( ""The bare ViLT Model transformer outputting raw hidden-states without any specific head on top."", VILT_START_DOCSTRING, ) ``` ###Assistant : This model is a PyTorch `torch.nn.Module `_ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ViltConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`ViltFeatureExtractor`]. See [`ViltFeatureExtractor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*): Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `pixel_values` into patch embeddings. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_images, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`ViltFeatureExtractor`]. See [`ViltFeatureExtractor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, num_images, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*): Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `pixel_values` into patch embeddings. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. " 280,"def needs_document_end_workaround(self): if objects.backend == usertypes.Backend.QtWebKit: return False assert objects.backend == usertypes.Backend.QtWebEngine, objects.backend broken_scripts = [ ('http://userstyles.org', None), ('https://github.com/ParticleCore', 'Iridium'), ] return any(self._matches_id(namespace=namespace, name=name) for namespace, name in broken_scripts) ","Check whether to force @run-at document-end. This needs to be done on QtWebEngine for known-broken scripts. On Qt 5.12, accessing the DOM isn't possible with ""@run-at document-start"". It was documented to be impossible before, but seems to work fine. However, some scripts do DOM access with ""@run-at document-start"". Fix those by forcing them to use document-end instead. ",57,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def needs_document_end_workaround(self): if objects.backend == usertypes.Backend.QtWebKit: return False assert objects.backend == usertypes.Backend.QtWebEngine, objects.backend broken_scripts = [ ('http://userstyles.org', None), ('https://github.com/ParticleCore', 'Iridium'), ] return any(self._matches_id(namespace=namespace, name=name) for namespace, name in broken_scripts) ``` ###Assistant : Check whether to force @run-at document-end. This needs to be done on QtWebEngine for known-broken scripts. On Qt 5.12, accessing the DOM isn't possible with ""@run-at document-start"". It was documented to be impossible before, but seems to work fine. However, some scripts do DOM access with ""@run-at document-start"". Fix those by forcing them to use document-end instead. " 281,"def _temperature_unit(self) -> str: if ( weather_option_temperature_unit := self._weather_option_temperature_unit ) is not None: return weather_option_temperature_unit return self._default_temperature_unit ","Return the converted unit of measurement for temperature. Should not be set by integrations. ",14,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _temperature_unit(self) -> str: if ( weather_option_temperature_unit := self._weather_option_temperature_unit ) is not None: return weather_option_temperature_unit return self._default_temperature_unit ``` ###Assistant : Return the converted unit of measurement for temperature. Should not be set by integrations. " 282,"def fontName(self, fontprop): if isinstance(fontprop, str): filenames = [fontprop] elif mpl.rcParams['pdf.use14corefonts']: filenames = _fontManager._find_fonts_by_props( fontprop, fontext='afm', directory=RendererPdf._afm_font_dir ) else: filenames = _fontManager._find_fonts_by_props(fontprop) first_Fx = None for fname in filenames: Fx = self.fontNames.get(fname) if not first_Fx: first_Fx = Fx if Fx is None: Fx = next(self._internal_font_seq) self.fontNames[fname] = Fx _log.debug('Assigning font %s = %r', Fx, fname) if not first_Fx: first_Fx = Fx # find_fontsprop's first value always adheres to # findfont's value, so technically no behaviour change return first_Fx "," Select a font based on fontprop and return a name suitable for Op.selectfont. If fontprop is a string, it will be interpreted as the filename of the font. ",28,78,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fontName(self, fontprop): if isinstance(fontprop, str): filenames = [fontprop] elif mpl.rcParams['pdf.use14corefonts']: filenames = _fontManager._find_fonts_by_props( fontprop, fontext='afm', directory=RendererPdf._afm_font_dir ) else: filenames = _fontManager._find_fonts_by_props(fontprop) first_Fx = None for fname in filenames: Fx = self.fontNames.get(fname) if not first_Fx: first_Fx = Fx if Fx is None: Fx = next(self._internal_font_seq) self.fontNames[fname] = Fx _log.debug('Assigning font %s = %r', Fx, fname) if not first_Fx: first_Fx = Fx # find_fontsprop's first value always adheres to # findfont's value, so technically no behaviour change return first_Fx ``` ###Assistant : Select a font based on fontprop and return a name suitable for Op.selectfont. If fontprop is a string, it will be interpreted as the filename of the font. " 283,"def center_to_corners_format(x): x_center, y_center, width, height = x.unbind(-1) boxes = [(x_center - 0.5 * width), (y_center - 0.5 * height), (x_center + 0.5 * width), (y_center + 0.5 * height)] return torch.stack(boxes, dim=-1) "," Converts a PyTorch tensor of bounding boxes of center format (center_x, center_y, width, height) to corners format (left, top, right, bottom). ",21,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def center_to_corners_format(x): x_center, y_center, width, height = x.unbind(-1) boxes = [(x_center - 0.5 * width), (y_center - 0.5 * height), (x_center + 0.5 * width), (y_center + 0.5 * height)] return torch.stack(boxes, dim=-1) ``` ###Assistant : Converts a PyTorch tensor of bounding boxes of center format (center_x, center_y, width, height) to corners format (left, top, right, bottom). " 284,"def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): raise NotImplementedError( ""subclasses of BaseCache must provide an add() method"" ) "," Set a value in the cache if the key does not already exist. If timeout is given, use that timeout for the key; otherwise use the default cache timeout. Return True if the value was stored, False otherwise. ",38,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None): raise NotImplementedError( ""subclasses of BaseCache must provide an add() method"" ) ``` ###Assistant : Set a value in the cache if the key does not already exist. If timeout is given, use that timeout for the key; otherwise use the default cache timeout. Return True if the value was stored, False otherwise. " 285,"def normalized_laplacian_matrix(G, nodelist=None, weight=""weight""): r import numpy as np import scipy as sp import scipy.sparse # call as sp.sparse if nodelist is None: nodelist = list(G) A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format=""csr"") n, m = A.shape diags = A.sum(axis=1) # TODO: rm csr_array wrapper when spdiags can produce arrays D = sp.sparse.csr_array(sp.sparse.spdiags(diags, 0, m, n, format=""csr"")) L = D - A with sp.errstate(divide=""ignore""): diags_sqrt = 1.0 / np.sqrt(diags) diags_sqrt[np.isinf(diags_sqrt)] = 0 # TODO: rm csr_array wrapper when spdiags can produce arrays DH = sp.sparse.csr_array(sp.sparse.spdiags(diags_sqrt, 0, m, n, format=""csr"")) return DH @ (L @ DH) ","Returns the normalized Laplacian matrix of G. The normalized graph Laplacian is the matrix .. math:: N = D^{-1/2} L D^{-1/2} where `L` is the graph Laplacian and `D` is the diagonal matrix of node degrees [1]_. Parameters ---------- G : graph A NetworkX graph nodelist : list, optional The rows and columns are ordered according to the nodes in nodelist. If nodelist is None, then the ordering is produced by G.nodes(). weight : string or None, optional (default='weight') The edge data key used to compute each value in the matrix. If None, then each edge has weight 1. Returns ------- N : SciPy sparse array The normalized Laplacian matrix of G. Notes ----- For MultiGraph, the edges weights are summed. See :func:`to_numpy_array` for other options. If the Graph contains selfloops, D is defined as ``diag(sum(A, 1))``, where A is the adjacency matrix [2]_. See Also -------- laplacian_matrix normalized_laplacian_spectrum References ---------- .. [1] Fan Chung-Graham, Spectral Graph Theory, CBMS Regional Conference Series in Mathematics, Number 92, 1997. .. [2] Steve Butler, Interlacing For Weighted Graphs Using The Normalized Laplacian, Electronic Journal of Linear Algebra, Volume 16, pp. 90-98, March 2007. ",190,94,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def normalized_laplacian_matrix(G, nodelist=None, weight=""weight""): r import numpy as np import scipy as sp import scipy.sparse # call as sp.sparse if nodelist is None: nodelist = list(G) A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format=""csr"") n, m = A.shape diags = A.sum(axis=1) # TODO: rm csr_array wrapper when spdiags can produce arrays D = sp.sparse.csr_array(sp.sparse.spdiags(diags, 0, m, n, format=""csr"")) L = D - A with sp.errstate(divide=""ignore""): diags_sqrt = 1.0 / np.sqrt(diags) diags_sqrt[np.isinf(diags_sqrt)] = 0 # TODO: rm csr_array wrapper when spdiags can produce arrays DH = sp.sparse.csr_array(sp.sparse.spdiags(diags_sqrt, 0, m, n, format=""csr"")) return DH @ (L @ DH) ``` ###Assistant : Returns the normalized Laplacian matrix of G. The normalized graph Laplacian is the matrix .. math:: N = D^{-1/2} L D^{-1/2} where `L` is the graph Laplacian and `D` is the diagonal matrix of node degrees [1]_. Parameters ---------- G : graph A NetworkX graph nodelist : list, optional The rows and columns are ordered according to the nodes in nodelist. If nodelist is None, then the ordering is produced by G.nodes(). weight : string or None, optional (default='weight') The edge data key used to compute each value in the matrix. If None, then each edge has weight 1. Returns ------- N : SciPy sparse array The normalized Laplacian matrix of G. Notes ----- For MultiGraph, the edges weights are summed. See :func:`to_numpy_array` for other options. If the Graph contains selfloops, D is defined as ``diag(sum(A, 1))``, where A is the adjacency matrix [2]_. See Also -------- laplacian_matrix normalized_laplacian_spectrum References ---------- .. [1] Fan Chung-Graham, Spectral Graph Theory, CBMS Regional Conference Series in Mathematics, Number 92, 1997. .. [2] Steve Butler, Interlacing For Weighted Graphs Using The Normalized Laplacian, Electronic Journal of Linear Algebra, Volume 16, pp. 90-98, March 2007. " 286,"def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None): i_h, i_w = image.shape[:2] p_h, p_w = patch_size if p_h > i_h: raise ValueError( ""Height of the patch should be less than the height of the image."" ) if p_w > i_w: raise ValueError( ""Width of the patch should be less than the width of the image."" ) image = check_array(image, allow_nd=True) image = image.reshape((i_h, i_w, -1)) n_colors = image.shape[-1] extracted_patches = _extract_patches( image, patch_shape=(p_h, p_w, n_colors), extraction_step=1 ) n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches) if max_patches: rng = check_random_state(random_state) i_s = rng.randint(i_h - p_h + 1, size=n_patches) j_s = rng.randint(i_w - p_w + 1, size=n_patches) patches = extracted_patches[i_s, j_s, 0] else: patches = extracted_patches patches = patches.reshape(-1, p_h, p_w, n_colors) # remove the color dimension if useless if patches.shape[-1] == 1: return patches.reshape((n_patches, p_h, p_w)) else: return patches ","Reshape a 2D image into a collection of patches. The resulting patches are allocated in a dedicated array. Read more in the :ref:`User Guide `. Parameters ---------- image : ndarray of shape (image_height, image_width) or \ (image_height, image_width, n_channels) The original image data. For color images, the last dimension specifies the channel: a RGB image would have `n_channels=3`. patch_size : tuple of int (patch_height, patch_width) The dimensions of one patch. max_patches : int or float, default=None The maximum number of patches to extract. If `max_patches` is a float between 0 and 1, it is taken to be a proportion of the total number of patches. random_state : int, RandomState instance, default=None Determines the random number generator used for random sampling when `max_patches` is not None. Use an int to make the randomness deterministic. See :term:`Glossary `. Returns ------- patches : array of shape (n_patches, patch_height, patch_width) or \ (n_patches, patch_height, patch_width, n_channels) The collection of patches extracted from the image, where `n_patches` is either `max_patches` or the total number of patches that can be extracted. Examples -------- >>> from sklearn.datasets import load_sample_image >>> from sklearn.feature_extraction import image >>> # Use the array data from the first image in this dataset: >>> one_image = load_sample_image(""china.jpg"") >>> print('Image shape: {}'.format(one_image.shape)) Image shape: (427, 640, 3) >>> patches = image.extract_patches_2d(one_image, (2, 2)) >>> print('Patches shape: {}'.format(patches.shape)) Patches shape: (272214, 2, 2, 3) >>> # Here are just two of these patches: >>> print(patches[1]) [[[174 201 231] [174 201 231]] [[173 200 230] [173 200 230]]] >>> print(patches[800]) [[[187 214 243] [188 215 244]] [[187 214 243] [188 215 244]]] ",266,136,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None): i_h, i_w = image.shape[:2] p_h, p_w = patch_size if p_h > i_h: raise ValueError( ""Height of the patch should be less than the height of the image."" ) if p_w > i_w: raise ValueError( ""Width of the patch should be less than the width of the image."" ) image = check_array(image, allow_nd=True) image = image.reshape((i_h, i_w, -1)) n_colors = image.shape[-1] extracted_patches = _extract_patches( image, patch_shape=(p_h, p_w, n_colors), extraction_step=1 ) n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches) if max_patches: rng = check_random_state(random_state) i_s = rng.randint(i_h - p_h + 1, size=n_patches) j_s = rng.randint(i_w - p_w + 1, size=n_patches) patches = extracted_patches[i_s, j_s, 0] else: patches = extracted_patches patches = patches.reshape(-1, p_h, p_w, n_colors) # remove the color dimension if useless if patches.shape[-1] == 1: return patches.reshape((n_patches, p_h, p_w)) else: return patches ``` ###Assistant : Reshape a 2D image into a collection of patches. The resulting patches are allocated in a dedicated array. Read more in the :ref:`User Guide `. Parameters ---------- image : ndarray of shape (image_height, image_width) or \ (image_height, image_width, n_channels) The original image data. For color images, the last dimension specifies the channel: a RGB image would have `n_channels=3`. patch_size : tuple of int (patch_height, patch_width) The dimensions of one patch. max_patches : int or float, default=None The maximum number of patches to extract. If `max_patches` is a float between 0 and 1, it is taken to be a proportion of the total number of patches. random_state : int, RandomState instance, default=None Determines the random number generator used for random sampling when `max_patches` is not None. Use an int to make the randomness deterministic. See :term:`Glossary `. Returns ------- patches : array of shape (n_patches, patch_height, patch_width) or \ (n_patches, patch_height, patch_width, n_channels) The collection of patches extracted from the image, where `n_patches` is either `max_patches` or the total number of patches that can be extracted. Examples -------- >>> from sklearn.datasets import load_sample_image >>> from sklearn.feature_extraction import image >>> # Use the array data from the first image in this dataset: >>> one_image = load_sample_image(""china.jpg"") >>> print('Image shape: {}'.format(one_image.shape)) Image shape: (427, 640, 3) >>> patches = image.extract_patches_2d(one_image, (2, 2)) >>> print('Patches shape: {}'.format(patches.shape)) Patches shape: (272214, 2, 2, 3) >>> # Here are just two of these patches: >>> print(patches[1]) [[[174 201 231] [174 201 231]] [[173 200 230] [173 200 230]]] >>> print(patches[800]) [[[187 214 243] [188 215 244]] [[187 214 243] [188 215 244]]] " 287,"def refresh_from_db(self, using=None, fields=None): if fields is None: self._prefetched_objects_cache = {} else: prefetched_objects_cache = getattr(self, ""_prefetched_objects_cache"", ()) for field in fields: if field in prefetched_objects_cache: del prefetched_objects_cache[field] fields.remove(field) if not fields: return if any(LOOKUP_SEP in f for f in fields): raise ValueError( 'Found ""%s"" in fields argument. Relations and transforms ' ""are not allowed in fields."" % LOOKUP_SEP ) hints = {""instance"": self} db_instance_qs = self.__class__._base_manager.db_manager( using, hints=hints ).filter(pk=self.pk) # Use provided fields, if not set then reload all non-deferred fields. deferred_fields = self.get_deferred_fields() if fields is not None: fields = list(fields) db_instance_qs = db_instance_qs.only(*fields) elif deferred_fields: fields = [ f.attname for f in self._meta.concrete_fields if f.attname not in deferred_fields ] db_instance_qs = db_instance_qs.only(*fields) db_instance = db_instance_qs.get() non_loaded_fields = db_instance.get_deferred_fields() for field in self._meta.concrete_fields: if field.attname in non_loaded_fields: # This field wasn't refreshed - skip ahead. continue setattr(self, field.attname, getattr(db_instance, field.attname)) # Clear cached foreign keys. if field.is_relation and field.is_cached(self): field.delete_cached_value(self) # Clear cached relations. for field in self._meta.related_objects: if field.is_cached(self): field.delete_cached_value(self) self._state.db = db_instance._state.db "," Reload field values from the database. By default, the reloading happens from the database this instance was loaded from, or by the read router if this instance wasn't loaded from any database. The using parameter will override the default. Fields can be used to specify which fields to reload. The fields should be an iterable of field attnames. If fields is None, then all non-deferred fields are reloaded. When accessing deferred fields of an instance, the deferred loading of the field will call this method. ",85,165,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def refresh_from_db(self, using=None, fields=None): if fields is None: self._prefetched_objects_cache = {} else: prefetched_objects_cache = getattr(self, ""_prefetched_objects_cache"", ()) for field in fields: if field in prefetched_objects_cache: del prefetched_objects_cache[field] fields.remove(field) if not fields: return if any(LOOKUP_SEP in f for f in fields): raise ValueError( 'Found ""%s"" in fields argument. Relations and transforms ' ""are not allowed in fields."" % LOOKUP_SEP ) hints = {""instance"": self} db_instance_qs = self.__class__._base_manager.db_manager( using, hints=hints ).filter(pk=self.pk) # Use provided fields, if not set then reload all non-deferred fields. deferred_fields = self.get_deferred_fields() if fields is not None: fields = list(fields) db_instance_qs = db_instance_qs.only(*fields) elif deferred_fields: fields = [ f.attname for f in self._meta.concrete_fields if f.attname not in deferred_fields ] db_instance_qs = db_instance_qs.only(*fields) db_instance = db_instance_qs.get() non_loaded_fields = db_instance.get_deferred_fields() for field in self._meta.concrete_fields: if field.attname in non_loaded_fields: # This field wasn't refreshed - skip ahead. continue setattr(self, field.attname, getattr(db_instance, field.attname)) # Clear cached foreign keys. if field.is_relation and field.is_cached(self): field.delete_cached_value(self) # Clear cached relations. for field in self._meta.related_objects: if field.is_cached(self): field.delete_cached_value(self) self._state.db = db_instance._state.db ``` ###Assistant : Reload field values from the database. By default, the reloading happens from the database this instance was loaded from, or by the read router if this instance wasn't loaded from any database. The using parameter will override the default. Fields can be used to specify which fields to reload. The fields should be an iterable of field attnames. If fields is None, then all non-deferred fields are reloaded. When accessing deferred fields of an instance, the deferred loading of the field will call this method. " 288,"def set_tunnel(self, host, port=None, headers=None): if self.sock: raise RuntimeError(""Can't set up tunnel for established connection"") self._tunnel_host, self._tunnel_port = self._get_hostport(host, port) if headers: self._tunnel_headers = headers else: self._tunnel_headers.clear() ","Set up host and port for HTTP CONNECT tunnelling. In a connection that uses HTTP CONNECT tunneling, the host passed to the constructor is used as a proxy server that relays all communication to the endpoint passed to `set_tunnel`. This done by sending an HTTP CONNECT request to the proxy server when the connection is established. This method must be called before the HTTP connection has been established. The headers argument should be a mapping of extra HTTP headers to send with the CONNECT request. ",85,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_tunnel(self, host, port=None, headers=None): if self.sock: raise RuntimeError(""Can't set up tunnel for established connection"") self._tunnel_host, self._tunnel_port = self._get_hostport(host, port) if headers: self._tunnel_headers = headers else: self._tunnel_headers.clear() ``` ###Assistant : Set up host and port for HTTP CONNECT tunnelling. In a connection that uses HTTP CONNECT tunneling, the host passed to the constructor is used as a proxy server that relays all communication to the endpoint passed to `set_tunnel`. This done by sending an HTTP CONNECT request to the proxy server when the connection is established. This method must be called before the HTTP connection has been established. The headers argument should be a mapping of extra HTTP headers to send with the CONNECT request. " 289,"def explicit_device_get_scope() -> Iterator[None]: state = transfer_guard_lib.thread_local_state() prev = state.explicit_device_get state.explicit_device_get = True try: yield finally: state.explicit_device_get = prev ",Indicates that the current context is an explicit device_get() call.,10,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def explicit_device_get_scope() -> Iterator[None]: state = transfer_guard_lib.thread_local_state() prev = state.explicit_device_get state.explicit_device_get = True try: yield finally: state.explicit_device_get = prev ``` ###Assistant : Indicates that the current context is an explicit device_get() call. " 290,"def accessory_info(self) -> Service: return self.accessory.services.first( service_type=ServicesTypes.ACCESSORY_INFORMATION ) ",Information about the make and model of an accessory.,9,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def accessory_info(self) -> Service: return self.accessory.services.first( service_type=ServicesTypes.ACCESSORY_INFORMATION ) ``` ###Assistant : Information about the make and model of an accessory. " 291,"def _always_object(classes): if object not in classes: return classes + (object,) return classes "," Ensure object appears in the mro even for old-style classes. ",10,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _always_object(classes): if object not in classes: return classes + (object,) return classes ``` ###Assistant : Ensure object appears in the mro even for old-style classes. " 292,"def addIncludedDataFilesFromFileOptions(): for included_datafile in _addIncludedDataFilesFromFileOptions(): addIncludedDataFile(included_datafile) ","Early data files, from user options that work with file system.",11,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def addIncludedDataFilesFromFileOptions(): for included_datafile in _addIncludedDataFilesFromFileOptions(): addIncludedDataFile(included_datafile) ``` ###Assistant : Early data files, from user options that work with file system. " 293,"def _attributes(**kwargs) -> dict[str, str]: return {key: str(value) for key, value in kwargs.items() if value is not None} ",Return the given kwargs as a dictionary with values converted to strings. Items with a value of None will be omitted.,21,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _attributes(**kwargs) -> dict[str, str]: return {key: str(value) for key, value in kwargs.items() if value is not None} ``` ###Assistant : Return the given kwargs as a dictionary with values converted to strings. Items with a value of None will be omitted. " 294,"def test_charpp(self): dll = CDLL(_ctypes_test.__file__) func = dll._testfunc_c_p_p func.restype = c_char_p argv = (c_char_p * 2)() argc = c_int( 2 ) argv[0] = b'hello' argv[1] = b'world' result = func( byref(argc), argv ) self.assertEqual(result, b'world') ",Test that a character pointer-to-pointer is correctly passed,8,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_charpp(self): dll = CDLL(_ctypes_test.__file__) func = dll._testfunc_c_p_p func.restype = c_char_p argv = (c_char_p * 2)() argc = c_int( 2 ) argv[0] = b'hello' argv[1] = b'world' result = func( byref(argc), argv ) self.assertEqual(result, b'world') ``` ###Assistant : Test that a character pointer-to-pointer is correctly passed " 295,"def test_error_message_unsigned(self): # Ensure to test for potential overflow in the case of: # x - y # and # y - x x = np.asarray([0, 1, 8], dtype='uint8') y = np.asarray([4, 4, 4], dtype='uint8') with pytest.raises(AssertionError) as exc_info: assert_allclose(x, y, atol=3) msgs = str(exc_info.value).split('\n') assert_equal(msgs[4], 'Max absolute difference: 4') ","Check the the message is formatted correctly when overflow can occur (gh21768)",12,50,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_error_message_unsigned(self): # Ensure to test for potential overflow in the case of: # x - y # and # y - x x = np.asarray([0, 1, 8], dtype='uint8') y = np.asarray([4, 4, 4], dtype='uint8') with pytest.raises(AssertionError) as exc_info: assert_allclose(x, y, atol=3) msgs = str(exc_info.value).split('\n') assert_equal(msgs[4], 'Max absolute difference: 4') ``` ###Assistant : Check the the message is formatted correctly when overflow can occur (gh21768) " 296,"def _join_or_get_room(self, room_id_or_alias): rooms = self._client.get_rooms() if room_id_or_alias in rooms: _LOGGER.debug(""Already in room %s"", room_id_or_alias) return rooms[room_id_or_alias] for room in rooms.values(): if room.room_id not in self._aliases_fetched_for: room.update_aliases() self._aliases_fetched_for.add(room.room_id) if ( room_id_or_alias in room.aliases or room_id_or_alias == room.canonical_alias ): _LOGGER.debug( ""Already in room %s (known as %s)"", room.room_id, room_id_or_alias ) return room room = self._client.join_room(room_id_or_alias) _LOGGER.info(""Joined room %s (known as %s)"", room.room_id, room_id_or_alias) return room ","Join a room or get it, if we are already in the room. We can't just always call join_room(), since that seems to crash the client if we're already in the room. ",32,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _join_or_get_room(self, room_id_or_alias): rooms = self._client.get_rooms() if room_id_or_alias in rooms: _LOGGER.debug(""Already in room %s"", room_id_or_alias) return rooms[room_id_or_alias] for room in rooms.values(): if room.room_id not in self._aliases_fetched_for: room.update_aliases() self._aliases_fetched_for.add(room.room_id) if ( room_id_or_alias in room.aliases or room_id_or_alias == room.canonical_alias ): _LOGGER.debug( ""Already in room %s (known as %s)"", room.room_id, room_id_or_alias ) return room room = self._client.join_room(room_id_or_alias) _LOGGER.info(""Joined room %s (known as %s)"", room.room_id, room_id_or_alias) return room ``` ###Assistant : Join a room or get it, if we are already in the room. We can't just always call join_room(), since that seems to crash the client if we're already in the room. " 297,"def _is_function_class_equation(func_class, f, symbol): if f.is_Mul or f.is_Add: return all(_is_function_class_equation(func_class, arg, symbol) for arg in f.args) if f.is_Pow: if not f.exp.has(symbol): return _is_function_class_equation(func_class, f.base, symbol) else: return False if not f.has(symbol): return True if isinstance(f, func_class): try: g = Poly(f.args[0], symbol) return g.degree() <= 1 except PolynomialError: return False else: return False "," Tests whether the equation is an equation of the given function class. The given equation belongs to the given function class if it is comprised of functions of the function class which are multiplied by or added to expressions independent of the symbol. In addition, the arguments of all such functions must be linear in the symbol as well. Examples ======== >>> from sympy.solvers.solveset import _is_function_class_equation >>> from sympy import tan, sin, tanh, sinh, exp >>> from sympy.abc import x >>> from sympy.functions.elementary.trigonometric import TrigonometricFunction >>> from sympy.functions.elementary.hyperbolic import HyperbolicFunction >>> _is_function_class_equation(TrigonometricFunction, exp(x) + tan(x), x) False >>> _is_function_class_equation(TrigonometricFunction, tan(x) + sin(x), x) True >>> _is_function_class_equation(TrigonometricFunction, tan(x**2), x) False >>> _is_function_class_equation(TrigonometricFunction, tan(x + 2), x) True >>> _is_function_class_equation(HyperbolicFunction, tanh(x) + sinh(x), x) True ",123,52,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _is_function_class_equation(func_class, f, symbol): if f.is_Mul or f.is_Add: return all(_is_function_class_equation(func_class, arg, symbol) for arg in f.args) if f.is_Pow: if not f.exp.has(symbol): return _is_function_class_equation(func_class, f.base, symbol) else: return False if not f.has(symbol): return True if isinstance(f, func_class): try: g = Poly(f.args[0], symbol) return g.degree() <= 1 except PolynomialError: return False else: return False ``` ###Assistant : Tests whether the equation is an equation of the given function class. The given equation belongs to the given function class if it is comprised of functions of the function class which are multiplied by or added to expressions independent of the symbol. In addition, the arguments of all such functions must be linear in the symbol as well. Examples ======== >>> from sympy.solvers.solveset import _is_function_class_equation >>> from sympy import tan, sin, tanh, sinh, exp >>> from sympy.abc import x >>> from sympy.functions.elementary.trigonometric import TrigonometricFunction >>> from sympy.functions.elementary.hyperbolic import HyperbolicFunction >>> _is_function_class_equation(TrigonometricFunction, exp(x) + tan(x), x) False >>> _is_function_class_equation(TrigonometricFunction, tan(x) + sin(x), x) True >>> _is_function_class_equation(TrigonometricFunction, tan(x**2), x) False >>> _is_function_class_equation(TrigonometricFunction, tan(x + 2), x) True >>> _is_function_class_equation(HyperbolicFunction, tanh(x) + sinh(x), x) True " 298,"def get_feature_names_out(self, input_features=None): class_name = self.__class__.__name__.lower() return np.asarray([f""{class_name}0""], dtype=object) ","Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Ignored. Returns ------- feature_names_out : ndarray of str objects An ndarray with one string i.e. [""isotonicregression0""]. ",32,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_feature_names_out(self, input_features=None): class_name = self.__class__.__name__.lower() return np.asarray([f""{class_name}0""], dtype=object) ``` ###Assistant : Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Ignored. Returns ------- feature_names_out : ndarray of str objects An ndarray with one string i.e. [""isotonicregression0""]. " 299,"def get_distance(self, f, dist_val, lookup_type): # Getting the distance parameter value = dist_val[0] # Shorthand boolean flags. geodetic = f.geodetic(self.connection) geography = f.geography if isinstance(value, Distance): if geography: dist_param = value.m elif geodetic: if lookup_type == ""dwithin"": raise ValueError( ""Only numeric values of degree units are "" ""allowed on geographic DWithin queries."" ) dist_param = value.m else: dist_param = getattr( value, Distance.unit_attname(f.units_name(self.connection)) ) else: # Assuming the distance is in the units of the field. dist_param = value return [dist_param] "," Retrieve the distance parameters for the given geometry field, distance lookup value, and the distance lookup type. This is the most complex implementation of the spatial backends due to what is supported on geodetic geometry columns vs. what's available on projected geometry columns. In addition, it has to take into account the geography column type. ",55,80,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_distance(self, f, dist_val, lookup_type): # Getting the distance parameter value = dist_val[0] # Shorthand boolean flags. geodetic = f.geodetic(self.connection) geography = f.geography if isinstance(value, Distance): if geography: dist_param = value.m elif geodetic: if lookup_type == ""dwithin"": raise ValueError( ""Only numeric values of degree units are "" ""allowed on geographic DWithin queries."" ) dist_param = value.m else: dist_param = getattr( value, Distance.unit_attname(f.units_name(self.connection)) ) else: # Assuming the distance is in the units of the field. dist_param = value return [dist_param] ``` ###Assistant : Retrieve the distance parameters for the given geometry field, distance lookup value, and the distance lookup type. This is the most complex implementation of the spatial backends due to what is supported on geodetic geometry columns vs. what's available on projected geometry columns. In addition, it has to take into account the geography column type. " 300,"def test_type_eventpage_two_indexes(self): self.make_event_section(""Other events"") self.assertEqual( self.get_best_root({""page_type"": ""tests.EventPage""}), self.home_page ) "," The chooser should start at the home page, as there are two EventIndexes with EventPages. ",15,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_type_eventpage_two_indexes(self): self.make_event_section(""Other events"") self.assertEqual( self.get_best_root({""page_type"": ""tests.EventPage""}), self.home_page ) ``` ###Assistant : The chooser should start at the home page, as there are two EventIndexes with EventPages. " 301,"def parse(self, filename, constraint): # type: (str, bool) -> Iterator[ParsedLine] yield from self._parse_and_recurse(filename, constraint) ","Parse a given file, yielding parsed lines. ",7,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parse(self, filename, constraint): # type: (str, bool) -> Iterator[ParsedLine] yield from self._parse_and_recurse(filename, constraint) ``` ###Assistant : Parse a given file, yielding parsed lines. " 302,"def sparse_bincount(inputs, depth, binary_output, dtype, count_weights=None): result = tf.sparse.bincount( inputs, weights=count_weights, minlength=depth, maxlength=depth, axis=-1, binary_output=binary_output, ) result = tf.cast(result, dtype) if inputs.shape.rank == 1: output_shape = (depth,) else: batch_size = tf.shape(result)[0] output_shape = (batch_size, depth) result = tf.SparseTensor( indices=result.indices, values=result.values, dense_shape=output_shape ) return result ",Apply binary or count encoding to an input and return a sparse tensor.,13,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def sparse_bincount(inputs, depth, binary_output, dtype, count_weights=None): result = tf.sparse.bincount( inputs, weights=count_weights, minlength=depth, maxlength=depth, axis=-1, binary_output=binary_output, ) result = tf.cast(result, dtype) if inputs.shape.rank == 1: output_shape = (depth,) else: batch_size = tf.shape(result)[0] output_shape = (batch_size, depth) result = tf.SparseTensor( indices=result.indices, values=result.values, dense_shape=output_shape ) return result ``` ###Assistant : Apply binary or count encoding to an input and return a sparse tensor. " 303,"def unmarshal(self, serialized_data): logger.debug(""data type: %s"", type(serialized_data)) try: retval = self._unmarshal(serialized_data) except Exception as err: msg = f""Error unserializing data for type {type(serialized_data)}: {str(err)}"" raise FaceswapError(msg) from err logger.debug(""returned data type: %s"", type(retval)) return retval "," Unserialize data to its original object type Parameters ---------- serialized_data: varies Data in serializer format that is to be unmarshalled to its original object Returns ------- data: varies The data in a python object format Example ------ >>> serializer = get_serializer('json') >>> json_data = >>> data = serializer.unmarshal(json_data) ",50,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def unmarshal(self, serialized_data): logger.debug(""data type: %s"", type(serialized_data)) try: retval = self._unmarshal(serialized_data) except Exception as err: msg = f""Error unserializing data for type {type(serialized_data)}: {str(err)}"" raise FaceswapError(msg) from err logger.debug(""returned data type: %s"", type(retval)) return retval ``` ###Assistant : Unserialize data to its original object type Parameters ---------- serialized_data: varies Data in serializer format that is to be unmarshalled to its original object Returns ------- data: varies The data in a python object format Example ------ >>> serializer = get_serializer('json') >>> json_data = >>> data = serializer.unmarshal(json_data) " 304,"def copyDataFiles(): for included_datafile in getIncludedDataFiles(): # TODO: directories should be resolved to files. if ( not isinstance(included_datafile, (IncludedDataFile)) or included_datafile.needsCopy() ): _handleDataFile( included_datafile, ) ","Copy the data files needed for standalone distribution. Notes: This is for data files only, not DLLs or even extension modules, those must be registered as entry points, and would not go through necessary handling if provided like this. ",39,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def copyDataFiles(): for included_datafile in getIncludedDataFiles(): # TODO: directories should be resolved to files. if ( not isinstance(included_datafile, (IncludedDataFile)) or included_datafile.needsCopy() ): _handleDataFile( included_datafile, ) ``` ###Assistant : Copy the data files needed for standalone distribution. Notes: This is for data files only, not DLLs or even extension modules, those must be registered as entry points, and would not go through necessary handling if provided like this. " 305,"def toggler(self, attr): if attr not in self._options: raise KeyError(""No such option: %s"" % attr) o = self._options[attr] if o.typespec != bool: raise ValueError(""Toggler can only be used with boolean options"") "," Generate a toggler for a boolean attribute. This returns a callable that takes no arguments. ",15,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def toggler(self, attr): if attr not in self._options: raise KeyError(""No such option: %s"" % attr) o = self._options[attr] if o.typespec != bool: raise ValueError(""Toggler can only be used with boolean options"") ``` ###Assistant : Generate a toggler for a boolean attribute. This returns a callable that takes no arguments. " 306,"def disable_terminal_wrapping(monkeypatch): monkeypatch.setattr( ""prefect.cli.profile.console"", rich.console.Console(soft_wrap=True) ) "," Sometimes, line wrapping makes it hard to make deterministic assertions about the output of a CLI command. Wrapping can be disabled by using this fixture. ",25,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def disable_terminal_wrapping(monkeypatch): monkeypatch.setattr( ""prefect.cli.profile.console"", rich.console.Console(soft_wrap=True) ) ``` ###Assistant : Sometimes, line wrapping makes it hard to make deterministic assertions about the output of a CLI command. Wrapping can be disabled by using this fixture. " 307,"def __add__(self, other): rank = (self.rank() + other) % self.cardinality rv = self.unrank_lex(self.size, rank) rv._rank = rank return rv ","Return permutation that is other higher in rank than self. The rank is the lexicographical rank, with the identity permutation having rank of 0. Examples ======== >>> from sympy.combinatorics import Permutation >>> I = Permutation([0, 1, 2, 3]) >>> a = Permutation([2, 1, 3, 0]) >>> I + a.rank() == a True See Also ======== __sub__, inversion_vector ",57,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __add__(self, other): rank = (self.rank() + other) % self.cardinality rv = self.unrank_lex(self.size, rank) rv._rank = rank return rv ``` ###Assistant : Return permutation that is other higher in rank than self. The rank is the lexicographical rank, with the identity permutation having rank of 0. Examples ======== >>> from sympy.combinatorics import Permutation >>> I = Permutation([0, 1, 2, 3]) >>> a = Permutation([2, 1, 3, 0]) >>> I + a.rank() == a True See Also ======== __sub__, inversion_vector " 308,"def test_set_presence_from_syncing_not_set(self): user_id = ""@test:server"" status_msg = ""I'm here!"" self._set_presencestate_with_status_msg( user_id, PresenceState.UNAVAILABLE, status_msg ) self.get_success( self.presence_handler.user_syncing(user_id, False, PresenceState.ONLINE) ) state = self.get_success( self.presence_handler.get_state(UserID.from_string(user_id)) ) # we should still be unavailable self.assertEqual(state.state, PresenceState.UNAVAILABLE) # and status message should still be the same self.assertEqual(state.status_msg, status_msg) ",Test that presence is not set by syncing if affect_presence is false,12,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_set_presence_from_syncing_not_set(self): user_id = ""@test:server"" status_msg = ""I'm here!"" self._set_presencestate_with_status_msg( user_id, PresenceState.UNAVAILABLE, status_msg ) self.get_success( self.presence_handler.user_syncing(user_id, False, PresenceState.ONLINE) ) state = self.get_success( self.presence_handler.get_state(UserID.from_string(user_id)) ) # we should still be unavailable self.assertEqual(state.state, PresenceState.UNAVAILABLE) # and status message should still be the same self.assertEqual(state.status_msg, status_msg) ``` ###Assistant : Test that presence is not set by syncing if affect_presence is false " 309,"def test_stringy_integers(self): input = { ""a"": ""100"", ""b"": { ""foo"": 99, ""bar"": ""-98"", }, ""d"": ""0999"", } output = copy_and_fixup_power_levels_contents(input) expected_output = { ""a"": 100, ""b"": { ""foo"": 99, ""bar"": -98, }, ""d"": 999, } self.assertEqual(output, expected_output) ",String representations of decimal integers are converted to integers.,9,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_stringy_integers(self): input = { ""a"": ""100"", ""b"": { ""foo"": 99, ""bar"": ""-98"", }, ""d"": ""0999"", } output = copy_and_fixup_power_levels_contents(input) expected_output = { ""a"": 100, ""b"": { ""foo"": 99, ""bar"": -98, }, ""d"": 999, } self.assertEqual(output, expected_output) ``` ###Assistant : String representations of decimal integers are converted to integers. " 310,"def get_ec_matching_query(bank_account, company, amount_condition): # get matching Expense Claim query mode_of_payments = [ x[""parent""] for x in frappe.db.get_all( ""Mode of Payment Account"", filters={""default_account"": bank_account}, fields=[""parent""] ) ] mode_of_payments = ""('"" + ""', '"".join(mode_of_payments) + ""' )"" company_currency = get_company_currency(company) return f "," SELECT ( CASE WHEN employee = %(party)s THEN 1 ELSE 0 END + 1 ) AS rank, 'Expense Claim' as doctype, name, total_sanctioned_amount as paid_amount, '' as reference_no, '' as reference_date, employee as party, 'Employee' as party_type, posting_date, '{company_currency}' as currency FROM `tabExpense Claim` WHERE total_sanctioned_amount {amount_condition} %(amount)s AND docstatus = 1 AND is_paid = 1 AND ifnull(clearance_date, '') = """" AND mode_of_payment in {mode_of_payments} ",65,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_ec_matching_query(bank_account, company, amount_condition): # get matching Expense Claim query mode_of_payments = [ x[""parent""] for x in frappe.db.get_all( ""Mode of Payment Account"", filters={""default_account"": bank_account}, fields=[""parent""] ) ] mode_of_payments = ""('"" + ""', '"".join(mode_of_payments) + ""' )"" company_currency = get_company_currency(company) return f ``` ###Assistant : SELECT ( CASE WHEN employee = %(party)s THEN 1 ELSE 0 END + 1 ) AS rank, 'Expense Claim' as doctype, name, total_sanctioned_amount as paid_amount, '' as reference_no, '' as reference_date, employee as party, 'Employee' as party_type, posting_date, '{company_currency}' as currency FROM `tabExpense Claim` WHERE total_sanctioned_amount {amount_condition} %(amount)s AND docstatus = 1 AND is_paid = 1 AND ifnull(clearance_date, '') = """" AND mode_of_payment in {mode_of_payments} " 311,"def run_exec_plan(cls, plan, index_cols, dtypes, columns): omniSession = DbWorker() # First step is to make sure all partitions are in HDK. frames = plan.collect_frames() for frame in frames: if frame._partitions.size != 1: raise NotImplementedError( ""HdkOnNative engine doesn't suport partitioned frames"" ) for p in frame._partitions.flatten(): if p.frame_id is None: obj = p.get() if isinstance(obj, (pandas.DataFrame, pandas.Series)): p.frame_id = omniSession.import_pandas_dataframe(obj) else: assert isinstance(obj, pyarrow.Table) p.frame_id = omniSession.import_arrow_table(obj) calcite_plan = CalciteBuilder().build(plan) calcite_json = CalciteSerializer().serialize(calcite_plan) cmd_prefix = ""execute relalg "" if DoUseCalcite.get(): cmd_prefix = ""execute calcite "" at = omniSession.executeRA(cmd_prefix + calcite_json) res = np.empty((1, 1), dtype=np.dtype(object)) # workaround for https://github.com/modin-project/modin/issues/1851 if DoUseCalcite.get(): at = at.rename_columns([""F_"" + str(c) for c in columns]) res[0][0] = cls._partition_class.put_arrow(at) return res "," Run execution plan in HDK storage format to materialize frame. Parameters ---------- plan : DFAlgNode A root of an execution plan tree. index_cols : list of str A list of index columns. dtypes : pandas.Index Column data types. columns : list of str A frame column names. Returns ------- np.array Created frame's partitions. ",53,114,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def run_exec_plan(cls, plan, index_cols, dtypes, columns): omniSession = DbWorker() # First step is to make sure all partitions are in HDK. frames = plan.collect_frames() for frame in frames: if frame._partitions.size != 1: raise NotImplementedError( ""HdkOnNative engine doesn't suport partitioned frames"" ) for p in frame._partitions.flatten(): if p.frame_id is None: obj = p.get() if isinstance(obj, (pandas.DataFrame, pandas.Series)): p.frame_id = omniSession.import_pandas_dataframe(obj) else: assert isinstance(obj, pyarrow.Table) p.frame_id = omniSession.import_arrow_table(obj) calcite_plan = CalciteBuilder().build(plan) calcite_json = CalciteSerializer().serialize(calcite_plan) cmd_prefix = ""execute relalg "" if DoUseCalcite.get(): cmd_prefix = ""execute calcite "" at = omniSession.executeRA(cmd_prefix + calcite_json) res = np.empty((1, 1), dtype=np.dtype(object)) # workaround for https://github.com/modin-project/modin/issues/1851 if DoUseCalcite.get(): at = at.rename_columns([""F_"" + str(c) for c in columns]) res[0][0] = cls._partition_class.put_arrow(at) return res ``` ###Assistant : Run execution plan in HDK storage format to materialize frame. Parameters ---------- plan : DFAlgNode A root of an execution plan tree. index_cols : list of str A list of index columns. dtypes : pandas.Index Column data types. columns : list of str A frame column names. Returns ------- np.array Created frame's partitions. " 312,"def to_numeric_dtype(dtype): dtype = np.dtype(dtype) return np.dtype('int32') if dtype == np.dtype('bool') else dtype ","Promotes a dtype into an numeric dtype, if it is not already one.",13,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def to_numeric_dtype(dtype): dtype = np.dtype(dtype) return np.dtype('int32') if dtype == np.dtype('bool') else dtype ``` ###Assistant : Promotes a dtype into an numeric dtype, if it is not already one. " 313,"def test_do_schedule_max_active_runs_task_removed(self, session, dag_maker): with dag_maker( dag_id='test_do_schedule_max_active_runs_task_removed', start_date=DEFAULT_DATE, schedule_interval='@once', max_active_runs=1, session=session, ): # Can't use EmptyOperator as that goes straight to success BashOperator(task_id='dummy1', bash_command='true') run1 = dag_maker.create_dagrun( run_type=DagRunType.SCHEDULED, execution_date=DEFAULT_DATE + timedelta(hours=1), state=State.RUNNING, ) self.scheduler_job = SchedulerJob(subdir=os.devnull) self.scheduler_job.executor = MockExecutor(do_update=False) self.scheduler_job.processor_agent = mock.MagicMock(spec=DagFileProcessorAgent) num_queued = self.scheduler_job._do_scheduling(session) assert num_queued == 1 session.flush() ti = run1.task_instances[0] ti.refresh_from_db(session=session) assert ti.state == State.QUEUED ",Test that tasks in removed state don't count as actively running.,11,58,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_do_schedule_max_active_runs_task_removed(self, session, dag_maker): with dag_maker( dag_id='test_do_schedule_max_active_runs_task_removed', start_date=DEFAULT_DATE, schedule_interval='@once', max_active_runs=1, session=session, ): # Can't use EmptyOperator as that goes straight to success BashOperator(task_id='dummy1', bash_command='true') run1 = dag_maker.create_dagrun( run_type=DagRunType.SCHEDULED, execution_date=DEFAULT_DATE + timedelta(hours=1), state=State.RUNNING, ) self.scheduler_job = SchedulerJob(subdir=os.devnull) self.scheduler_job.executor = MockExecutor(do_update=False) self.scheduler_job.processor_agent = mock.MagicMock(spec=DagFileProcessorAgent) num_queued = self.scheduler_job._do_scheduling(session) assert num_queued == 1 session.flush() ti = run1.task_instances[0] ti.refresh_from_db(session=session) assert ti.state == State.QUEUED ``` ###Assistant : Test that tasks in removed state don't count as actively running. " 314,"def forward(self, xs, masks): if isinstance(self.embed, (Conv2dSubsampling, VGG2L)): xs, masks = self.embed(xs, masks) else: xs = self.embed(xs) xs, masks = self.encoders(xs, masks) if isinstance(xs, tuple): xs = xs[0] if self.normalize_before: xs = self.after_norm(xs) return xs, masks ","Encode input sequence. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :return: position embedded tensor and mask :rtype Tuple[torch.Tensor, torch.Tensor]: ",22,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def forward(self, xs, masks): if isinstance(self.embed, (Conv2dSubsampling, VGG2L)): xs, masks = self.embed(xs, masks) else: xs = self.embed(xs) xs, masks = self.encoders(xs, masks) if isinstance(xs, tuple): xs = xs[0] if self.normalize_before: xs = self.after_norm(xs) return xs, masks ``` ###Assistant : Encode input sequence. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :return: position embedded tensor and mask :rtype Tuple[torch.Tensor, torch.Tensor]: " 315,"def save_class(self): if gtff.REMEMBER_CONTEXTS: controllers[self.PATH] = self ",Saves the current instance of the class to be loaded later,11,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save_class(self): if gtff.REMEMBER_CONTEXTS: controllers[self.PATH] = self ``` ###Assistant : Saves the current instance of the class to be loaded later " 316,"def _decode_messages(self, messages): messages_len = len(messages) # if it was one message, then its old style if messages_len == 1: payload = salt.payload.loads(messages[0]) # 2 includes a header which says who should do it elif messages_len == 2: message_target = salt.utils.stringutils.to_str(messages[0]) if ( self.opts.get(""__role"") != ""syndic"" and message_target not in (""broadcast"", self.hexid) ) or ( self.opts.get(""__role"") == ""syndic"" and message_target not in (""broadcast"", ""syndic"") ): log.debug(""Publish received for not this minion: %s"", message_target) raise salt.ext.tornado.gen.Return(None) payload = salt.payload.loads(messages[1]) else: raise Exception( ""Invalid number of messages ({}) in zeromq pubmessage from master"".format( len(messages_len) ) ) # Yield control back to the caller. When the payload has been decoded, assign # the decoded payload to 'ret' and resume operation raise salt.ext.tornado.gen.Return(payload) "," Take the zmq messages, decrypt/decode them into a payload :param list messages: A list of messages to be decoded ",19,119,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _decode_messages(self, messages): messages_len = len(messages) # if it was one message, then its old style if messages_len == 1: payload = salt.payload.loads(messages[0]) # 2 includes a header which says who should do it elif messages_len == 2: message_target = salt.utils.stringutils.to_str(messages[0]) if ( self.opts.get(""__role"") != ""syndic"" and message_target not in (""broadcast"", self.hexid) ) or ( self.opts.get(""__role"") == ""syndic"" and message_target not in (""broadcast"", ""syndic"") ): log.debug(""Publish received for not this minion: %s"", message_target) raise salt.ext.tornado.gen.Return(None) payload = salt.payload.loads(messages[1]) else: raise Exception( ""Invalid number of messages ({}) in zeromq pubmessage from master"".format( len(messages_len) ) ) # Yield control back to the caller. When the payload has been decoded, assign # the decoded payload to 'ret' and resume operation raise salt.ext.tornado.gen.Return(payload) ``` ###Assistant : Take the zmq messages, decrypt/decode them into a payload :param list messages: A list of messages to be decoded " 317,"def test_get_release_wheel_url(): # This should be a commit for which wheels have already been built for # all platforms and python versions at # `s3://ray-wheels/releases/2.2.0//`. test_commits = {""2.2.0"": ""b6af0887ee5f2e460202133791ad941a41f15beb""} for sys_platform in [""darwin"", ""linux"", ""win32""]: for py_version in ray_constants.RUNTIME_ENV_CONDA_PY_VERSIONS: for version, commit in test_commits.items(): if sys_platform == ""win32"" and py_version == (3, 6): # Windows wheels are not built for py3.6 anymore continue url = get_release_wheel_url(commit, sys_platform, version, py_version) assert requests.head(url).status_code == 200, url ",Test the code that generates the filenames of the `release` branch wheels.,12,74,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_release_wheel_url(): # This should be a commit for which wheels have already been built for # all platforms and python versions at # `s3://ray-wheels/releases/2.2.0//`. test_commits = {""2.2.0"": ""b6af0887ee5f2e460202133791ad941a41f15beb""} for sys_platform in [""darwin"", ""linux"", ""win32""]: for py_version in ray_constants.RUNTIME_ENV_CONDA_PY_VERSIONS: for version, commit in test_commits.items(): if sys_platform == ""win32"" and py_version == (3, 6): # Windows wheels are not built for py3.6 anymore continue url = get_release_wheel_url(commit, sys_platform, version, py_version) assert requests.head(url).status_code == 200, url ``` ###Assistant : Test the code that generates the filenames of the `release` branch wheels. " 318,"def test_keep_media_by_date(self) -> None: # timestamp before upload now_ms = self.clock.time_msec() server_and_media_id = self._create_media() self._access_media(server_and_media_id) channel = self.make_request( ""POST"", self.url + ""?before_ts="" + str(now_ms), access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body[""total""]) self._access_media(server_and_media_id) # timestamp after upload now_ms = self.clock.time_msec() channel = self.make_request( ""POST"", self.url + ""?before_ts="" + str(now_ms), access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body[""total""]) self.assertEqual( server_and_media_id.split(""/"")[1], channel.json_body[""deleted_media""][0], ) self._access_media(server_and_media_id, False) "," Tests that media is not deleted if it is newer than `before_ts` ",12,61,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_keep_media_by_date(self) -> None: # timestamp before upload now_ms = self.clock.time_msec() server_and_media_id = self._create_media() self._access_media(server_and_media_id) channel = self.make_request( ""POST"", self.url + ""?before_ts="" + str(now_ms), access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(0, channel.json_body[""total""]) self._access_media(server_and_media_id) # timestamp after upload now_ms = self.clock.time_msec() channel = self.make_request( ""POST"", self.url + ""?before_ts="" + str(now_ms), access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body[""total""]) self.assertEqual( server_and_media_id.split(""/"")[1], channel.json_body[""deleted_media""][0], ) self._access_media(server_and_media_id, False) ``` ###Assistant : Tests that media is not deleted if it is newer than `before_ts` " 319,"def test_fetch_openml_requires_pandas_in_future(monkeypatch): params = {""as_frame"": False, ""parser"": ""auto""} data_id = 1119 try: check_pandas_support(""test_fetch_openml_requires_pandas"") except ImportError: _monkey_patch_webbased_functions(monkeypatch, data_id, True) warn_msg = ( ""From version 1.4, `parser='auto'` with `as_frame=False` will use pandas"" ) with pytest.warns(FutureWarning, match=warn_msg): fetch_openml(data_id=data_id, **params) else: raise SkipTest(""This test requires pandas to not be installed."") @pytest.mark.filterwarnings(""ignore:Version 1 of dataset Australian is inactive"") # TODO(1.4): remove this filterwarning decorator for `parser` @pytest.mark.filterwarnings(""ignore:The default value of `parser` will change"") @pytest.mark.parametrize( ""params, err_msg"", [ ( {""parser"": ""pandas""}, ""Sparse ARFF datasets cannot be loaded with parser='pandas'"", ), ( {""as_frame"": True}, ""Sparse ARFF datasets cannot be loaded with as_frame=True."", ), ( {""parser"": ""pandas"", ""as_frame"": True}, ""Sparse ARFF datasets cannot be loaded with as_frame=True."", ), ], )",Check that we raise a warning that pandas will be required in the future.,14,112,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_fetch_openml_requires_pandas_in_future(monkeypatch): params = {""as_frame"": False, ""parser"": ""auto""} data_id = 1119 try: check_pandas_support(""test_fetch_openml_requires_pandas"") except ImportError: _monkey_patch_webbased_functions(monkeypatch, data_id, True) warn_msg = ( ""From version 1.4, `parser='auto'` with `as_frame=False` will use pandas"" ) with pytest.warns(FutureWarning, match=warn_msg): fetch_openml(data_id=data_id, **params) else: raise SkipTest(""This test requires pandas to not be installed."") @pytest.mark.filterwarnings(""ignore:Version 1 of dataset Australian is inactive"") # TODO(1.4): remove this filterwarning decorator for `parser` @pytest.mark.filterwarnings(""ignore:The default value of `parser` will change"") @pytest.mark.parametrize( ""params, err_msg"", [ ( {""parser"": ""pandas""}, ""Sparse ARFF datasets cannot be loaded with parser='pandas'"", ), ( {""as_frame"": True}, ""Sparse ARFF datasets cannot be loaded with as_frame=True."", ), ( {""parser"": ""pandas"", ""as_frame"": True}, ""Sparse ARFF datasets cannot be loaded with as_frame=True."", ), ], ) ``` ###Assistant : Check that we raise a warning that pandas will be required in the future. " 320,"def get_connection(self, url, proxies=None): proxy = select_proxy(url, proxies) if proxy: proxy = prepend_scheme_if_needed(proxy, ""http"") proxy_url = parse_url(proxy) if not proxy_url.host: raise InvalidProxyURL( ""Please check proxy URL. It is malformed "" ""and could be missing the host."" ) proxy_manager = self.proxy_manager_for(proxy) conn = proxy_manager.connection_from_url(url) else: # Only scheme should be lower case parsed = urlparse(url) url = parsed.geturl() conn = self.poolmanager.connection_from_url(url) return conn ","Returns a urllib3 connection for the given URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param url: The URL to connect to. :param proxies: (optional) A Requests-style dictionary of proxies used on this request. :rtype: urllib3.ConnectionPool ",48,62,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_connection(self, url, proxies=None): proxy = select_proxy(url, proxies) if proxy: proxy = prepend_scheme_if_needed(proxy, ""http"") proxy_url = parse_url(proxy) if not proxy_url.host: raise InvalidProxyURL( ""Please check proxy URL. It is malformed "" ""and could be missing the host."" ) proxy_manager = self.proxy_manager_for(proxy) conn = proxy_manager.connection_from_url(url) else: # Only scheme should be lower case parsed = urlparse(url) url = parsed.geturl() conn = self.poolmanager.connection_from_url(url) return conn ``` ###Assistant : Returns a urllib3 connection for the given URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter `. :param url: The URL to connect to. :param proxies: (optional) A Requests-style dictionary of proxies used on this request. :rtype: urllib3.ConnectionPool " 321,"def feature_engineering_expand_all(self, dataframe, period, **kwargs): dataframe[""%-rsi-period""] = ta.RSI(dataframe, timeperiod=period) dataframe[""%-mfi-period""] = ta.MFI(dataframe, timeperiod=period) dataframe[""%-adx-period""] = ta.ADX(dataframe, timeperiod=period) dataframe[""%-sma-period""] = ta.SMA(dataframe, timeperiod=period) dataframe[""%-ema-period""] = ta.EMA(dataframe, timeperiod=period) bollinger = qtpylib.bollinger_bands( qtpylib.typical_price(dataframe), window=period, stds=2.2 ) dataframe[""bb_lowerband-period""] = bollinger[""lower""] dataframe[""bb_middleband-period""] = bollinger[""mid""] dataframe[""bb_upperband-period""] = bollinger[""upper""] dataframe[""%-bb_width-period""] = ( dataframe[""bb_upperband-period""] - dataframe[""bb_lowerband-period""] ) / dataframe[""bb_middleband-period""] dataframe[""%-close-bb_lower-period""] = ( dataframe[""close""] / dataframe[""bb_lowerband-period""] ) dataframe[""%-roc-period""] = ta.ROC(dataframe, timeperiod=period) dataframe[""%-relative_volume-period""] = ( dataframe[""volume""] / dataframe[""volume""].rolling(period).mean() ) return dataframe "," *Only functional with FreqAI enabled strategies* This function will automatically expand the defined features on the config defined `indicator_periods_candles`, `include_timeframes`, `include_shifted_candles`, and `include_corr_pairs`. In other words, a single feature defined in this function will automatically expand to a total of `indicator_periods_candles` * `include_timeframes` * `include_shifted_candles` * `include_corr_pairs` numbers of features added to the model. All features must be prepended with `%` to be recognized by FreqAI internals. More details on how these config defined parameters accelerate feature engineering in the documentation at: https://www.freqtrade.io/en/latest/freqai-parameter-table/#feature-parameters https://www.freqtrade.io/en/latest/freqai-feature-engineering/#defining-the-features :param df: strategy dataframe which will receive the features :param period: period of the indicator - usage example: dataframe[""%-ema-period""] = ta.EMA(dataframe, timeperiod=period) ",106,70,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def feature_engineering_expand_all(self, dataframe, period, **kwargs): dataframe[""%-rsi-period""] = ta.RSI(dataframe, timeperiod=period) dataframe[""%-mfi-period""] = ta.MFI(dataframe, timeperiod=period) dataframe[""%-adx-period""] = ta.ADX(dataframe, timeperiod=period) dataframe[""%-sma-period""] = ta.SMA(dataframe, timeperiod=period) dataframe[""%-ema-period""] = ta.EMA(dataframe, timeperiod=period) bollinger = qtpylib.bollinger_bands( qtpylib.typical_price(dataframe), window=period, stds=2.2 ) dataframe[""bb_lowerband-period""] = bollinger[""lower""] dataframe[""bb_middleband-period""] = bollinger[""mid""] dataframe[""bb_upperband-period""] = bollinger[""upper""] dataframe[""%-bb_width-period""] = ( dataframe[""bb_upperband-period""] - dataframe[""bb_lowerband-period""] ) / dataframe[""bb_middleband-period""] dataframe[""%-close-bb_lower-period""] = ( dataframe[""close""] / dataframe[""bb_lowerband-period""] ) dataframe[""%-roc-period""] = ta.ROC(dataframe, timeperiod=period) dataframe[""%-relative_volume-period""] = ( dataframe[""volume""] / dataframe[""volume""].rolling(period).mean() ) return dataframe ``` ###Assistant : *Only functional with FreqAI enabled strategies* This function will automatically expand the defined features on the config defined `indicator_periods_candles`, `include_timeframes`, `include_shifted_candles`, and `include_corr_pairs`. In other words, a single feature defined in this function will automatically expand to a total of `indicator_periods_candles` * `include_timeframes` * `include_shifted_candles` * `include_corr_pairs` numbers of features added to the model. All features must be prepended with `%` to be recognized by FreqAI internals. More details on how these config defined parameters accelerate feature engineering in the documentation at: https://www.freqtrade.io/en/latest/freqai-parameter-table/#feature-parameters https://www.freqtrade.io/en/latest/freqai-feature-engineering/#defining-the-features :param df: strategy dataframe which will receive the features :param period: period of the indicator - usage example: dataframe[""%-ema-period""] = ta.EMA(dataframe, timeperiod=period) " 322,"def get_script_prefix(self, scope): if settings.FORCE_SCRIPT_NAME: return settings.FORCE_SCRIPT_NAME return scope.get(""root_path"", """") or """" "," Return the script prefix to use from either the scope or a setting. ",13,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_script_prefix(self, scope): if settings.FORCE_SCRIPT_NAME: return settings.FORCE_SCRIPT_NAME return scope.get(""root_path"", """") or """" ``` ###Assistant : Return the script prefix to use from either the scope or a setting. " 323,"def test_get_first_menu_and_fail(): part_one = f part_two = f command = [""storage"", ""create""] invoke_and_assert_in( command=command, desired_contents=(part_one, part_two), expected_code=1, user_input=f""{INVALID_OPTION}\n"", ) "," Make sure that our utility function is returning as expected Found the following storage types: 0) Azure Blob Storage Store data in an Azure blob storage container. 1) File Storage Store data as a file on local or remote file systems. 2) Google Cloud Storage Store data in a GCS bucket. 3) Local Storage Store data in a run's local file system. Select a storage type to create: 99999999 Invalid selection {INVALID_OPTION} ",72,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_first_menu_and_fail(): part_one = f part_two = f command = [""storage"", ""create""] invoke_and_assert_in( command=command, desired_contents=(part_one, part_two), expected_code=1, user_input=f""{INVALID_OPTION}\n"", ) ``` ###Assistant : Make sure that our utility function is returning as expected Found the following storage types: 0) Azure Blob Storage Store data in an Azure blob storage container. 1) File Storage Store data as a file on local or remote file systems. 2) Google Cloud Storage Store data in a GCS bucket. 3) Local Storage Store data in a run's local file system. Select a storage type to create: 99999999 Invalid selection {INVALID_OPTION} " 324,"def check_planarity(G, counterexample=False): planarity_state = LRPlanarity(G) embedding = planarity_state.lr_planarity() if embedding is None: # graph is not planar if counterexample: return False, get_counterexample(G) else: return False, None else: # graph is planar return True, embedding ","Check if a graph is planar and return a counterexample or an embedding. A graph is planar iff it can be drawn in a plane without any edge intersections. Parameters ---------- G : NetworkX graph counterexample : bool A Kuratowski subgraph (to proof non planarity) is only returned if set to true. Returns ------- (is_planar, certificate) : (bool, NetworkX graph) tuple is_planar is true if the graph is planar. If the graph is planar `certificate` is a PlanarEmbedding otherwise it is a Kuratowski subgraph. Examples -------- >>> G = nx.Graph([(0, 1), (0, 2)]) >>> is_planar, P = nx.check_planarity(G) >>> print(is_planar) True When `G` is planar, a `PlanarEmbedding` instance is returned: >>> P.get_data() {0: [1, 2], 1: [0], 2: [0]} Notes ----- A (combinatorial) embedding consists of cyclic orderings of the incident edges at each vertex. Given such an embedding there are multiple approaches discussed in literature to drawing the graph (subject to various constraints, e.g. integer coordinates), see e.g. [2]. The planarity check algorithm and extraction of the combinatorial embedding is based on the Left-Right Planarity Test [1]. A counterexample is only generated if the corresponding parameter is set, because the complexity of the counterexample generation is higher. References ---------- .. [1] Ulrik Brandes: The Left-Right Planarity Test 2009 http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.217.9208 .. [2] Takao Nishizeki, Md Saidur Rahman: Planar graph drawing Lecture Notes Series on Computing: Volume 12 2004 ",228,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_planarity(G, counterexample=False): planarity_state = LRPlanarity(G) embedding = planarity_state.lr_planarity() if embedding is None: # graph is not planar if counterexample: return False, get_counterexample(G) else: return False, None else: # graph is planar return True, embedding ``` ###Assistant : Check if a graph is planar and return a counterexample or an embedding. A graph is planar iff it can be drawn in a plane without any edge intersections. Parameters ---------- G : NetworkX graph counterexample : bool A Kuratowski subgraph (to proof non planarity) is only returned if set to true. Returns ------- (is_planar, certificate) : (bool, NetworkX graph) tuple is_planar is true if the graph is planar. If the graph is planar `certificate` is a PlanarEmbedding otherwise it is a Kuratowski subgraph. Examples -------- >>> G = nx.Graph([(0, 1), (0, 2)]) >>> is_planar, P = nx.check_planarity(G) >>> print(is_planar) True When `G` is planar, a `PlanarEmbedding` instance is returned: >>> P.get_data() {0: [1, 2], 1: [0], 2: [0]} Notes ----- A (combinatorial) embedding consists of cyclic orderings of the incident edges at each vertex. Given such an embedding there are multiple approaches discussed in literature to drawing the graph (subject to various constraints, e.g. integer coordinates), see e.g. [2]. The planarity check algorithm and extraction of the combinatorial embedding is based on the Left-Right Planarity Test [1]. A counterexample is only generated if the corresponding parameter is set, because the complexity of the counterexample generation is higher. References ---------- .. [1] Ulrik Brandes: The Left-Right Planarity Test 2009 http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.217.9208 .. [2] Takao Nishizeki, Md Saidur Rahman: Planar graph drawing Lecture Notes Series on Computing: Volume 12 2004 " 325,"def _get_bool(val) -> Optional[bool]: if isinstance(val, bool): return val elif isinstance(val, str): if val.strip().lower() == 'true': return True elif val.strip().lower() == 'false': return False return None "," Converts val to bool if can be done with certainty. If we cannot infer intention we return None. ",18,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_bool(val) -> Optional[bool]: if isinstance(val, bool): return val elif isinstance(val, str): if val.strip().lower() == 'true': return True elif val.strip().lower() == 'false': return False return None ``` ###Assistant : Converts val to bool if can be done with certainty. If we cannot infer intention we return None. " 326,"def load_data_ptb(batch_size, max_window_size, num_noise_words): sentences = read_ptb() vocab = d2l.Vocab(sentences, min_freq=10) subsampled, counter = subsample(sentences, vocab) corpus = [vocab[line] for line in subsampled] all_centers, all_contexts = get_centers_and_contexts( corpus, max_window_size) all_negatives = get_negatives( all_contexts, vocab, counter, num_noise_words) dataset = gluon.data.ArrayDataset( all_centers, all_contexts, all_negatives) data_iter = gluon.data.DataLoader( dataset, batch_size, shuffle=True,batchify_fn=batchify, num_workers=d2l.get_dataloader_workers()) return data_iter, vocab d2l.DATA_HUB['glove.6b.50d'] = (d2l.DATA_URL + 'glove.6B.50d.zip', '0b8703943ccdb6eb788e6f091b8946e82231bc4d') d2l.DATA_HUB['glove.6b.100d'] = (d2l.DATA_URL + 'glove.6B.100d.zip', 'cd43bfb07e44e6f27cbcc7bc9ae3d80284fdaf5a') d2l.DATA_HUB['glove.42b.300d'] = (d2l.DATA_URL + 'glove.42B.300d.zip', 'b5116e234e9eb9076672cfeabf5469f3eec904fa') d2l.DATA_HUB['wiki.en'] = (d2l.DATA_URL + 'wiki.en.zip', 'c1816da3821ae9f43899be655002f6c723e91b88') ","Download the PTB dataset and then load it into memory. Defined in :numref:`subsec_word2vec-minibatch-loading`",13,76,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_data_ptb(batch_size, max_window_size, num_noise_words): sentences = read_ptb() vocab = d2l.Vocab(sentences, min_freq=10) subsampled, counter = subsample(sentences, vocab) corpus = [vocab[line] for line in subsampled] all_centers, all_contexts = get_centers_and_contexts( corpus, max_window_size) all_negatives = get_negatives( all_contexts, vocab, counter, num_noise_words) dataset = gluon.data.ArrayDataset( all_centers, all_contexts, all_negatives) data_iter = gluon.data.DataLoader( dataset, batch_size, shuffle=True,batchify_fn=batchify, num_workers=d2l.get_dataloader_workers()) return data_iter, vocab d2l.DATA_HUB['glove.6b.50d'] = (d2l.DATA_URL + 'glove.6B.50d.zip', '0b8703943ccdb6eb788e6f091b8946e82231bc4d') d2l.DATA_HUB['glove.6b.100d'] = (d2l.DATA_URL + 'glove.6B.100d.zip', 'cd43bfb07e44e6f27cbcc7bc9ae3d80284fdaf5a') d2l.DATA_HUB['glove.42b.300d'] = (d2l.DATA_URL + 'glove.42B.300d.zip', 'b5116e234e9eb9076672cfeabf5469f3eec904fa') d2l.DATA_HUB['wiki.en'] = (d2l.DATA_URL + 'wiki.en.zip', 'c1816da3821ae9f43899be655002f6c723e91b88') ``` ###Assistant : Download the PTB dataset and then load it into memory. Defined in :numref:`subsec_word2vec-minibatch-loading` " 327,"async def async_config_changed(self) -> None: assert self.driver is not None await self.hass.async_add_executor_job(self.driver.config_changed) ",Call config changed which writes out the new config to disk.,11,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_config_changed(self) -> None: assert self.driver is not None await self.hass.async_add_executor_job(self.driver.config_changed) ``` ###Assistant : Call config changed which writes out the new config to disk. " 328,"def setDebugActions(self, startAction, successAction, exceptionAction): self.debugActions = (startAction or _defaultStartDebugAction, successAction or _defaultSuccessDebugAction, exceptionAction or _defaultExceptionDebugAction) self.debug = True return self "," Enable display of debugging messages while doing pattern matching. ",9,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def setDebugActions(self, startAction, successAction, exceptionAction): self.debugActions = (startAction or _defaultStartDebugAction, successAction or _defaultSuccessDebugAction, exceptionAction or _defaultExceptionDebugAction) self.debug = True return self ``` ###Assistant : Enable display of debugging messages while doing pattern matching. " 329,"def get_qty_amount_data_for_cumulative(pr_doc, doc, items=None): if items is None: items = [] sum_qty, sum_amt = [0, 0] doctype = doc.get(""parenttype"") or doc.doctype date_field = ( ""transaction_date"" if frappe.get_meta(doctype).has_field(""transaction_date"") else ""posting_date"" ) child_doctype = ""{0} Item"".format(doctype) apply_on = frappe.scrub(pr_doc.get(""apply_on"")) values = [pr_doc.valid_from, pr_doc.valid_upto] condition = """" if pr_doc.warehouse: warehouses = get_child_warehouses(pr_doc.warehouse) condition += .format( child_doc=child_doctype, warehouses="","".join([""%s""] * len(warehouses)) ) values.extend(warehouses) if items: condition = "" and `tab{child_doc}`.{apply_on} in ({items})"".format( child_doc=child_doctype, apply_on=apply_on, items="","".join([""%s""] * len(items)) ) values.extend(items) data_set = frappe.db.sql( .format( parent_doc=doctype, child_doc=child_doctype, condition=condition, date_field=date_field ), tuple(values), as_dict=1, ) for data in data_set: sum_qty += data.get(""stock_qty"") sum_amt += data.get(""amount"") return [sum_qty, sum_amt] "," and `tab{child_doc}`.warehouse in ({warehouses}) SELECT `tab{child_doc}`.stock_qty, `tab{child_doc}`.amount FROM `tab{child_doc}`, `tab{parent_doc}` WHERE `tab{child_doc}`.parent = `tab{parent_doc}`.name and `tab{parent_doc}`.{date_field} between %s and %s and `tab{parent_doc}`.docstatus = 1 {condition} group by `tab{child_doc}`.name ",28,99,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_qty_amount_data_for_cumulative(pr_doc, doc, items=None): if items is None: items = [] sum_qty, sum_amt = [0, 0] doctype = doc.get(""parenttype"") or doc.doctype date_field = ( ""transaction_date"" if frappe.get_meta(doctype).has_field(""transaction_date"") else ""posting_date"" ) child_doctype = ""{0} Item"".format(doctype) apply_on = frappe.scrub(pr_doc.get(""apply_on"")) values = [pr_doc.valid_from, pr_doc.valid_upto] condition = """" if pr_doc.warehouse: warehouses = get_child_warehouses(pr_doc.warehouse) condition += .format( child_doc=child_doctype, warehouses="","".join([""%s""] * len(warehouses)) ) values.extend(warehouses) if items: condition = "" and `tab{child_doc}`.{apply_on} in ({items})"".format( child_doc=child_doctype, apply_on=apply_on, items="","".join([""%s""] * len(items)) ) values.extend(items) data_set = frappe.db.sql( .format( parent_doc=doctype, child_doc=child_doctype, condition=condition, date_field=date_field ), tuple(values), as_dict=1, ) for data in data_set: sum_qty += data.get(""stock_qty"") sum_amt += data.get(""amount"") return [sum_qty, sum_amt] ``` ###Assistant : and `tab{child_doc}`.warehouse in ({warehouses}) SELECT `tab{child_doc}`.stock_qty, `tab{child_doc}`.amount FROM `tab{child_doc}`, `tab{parent_doc}` WHERE `tab{child_doc}`.parent = `tab{parent_doc}`.name and `tab{parent_doc}`.{date_field} between %s and %s and `tab{parent_doc}`.docstatus = 1 {condition} group by `tab{child_doc}`.name " 330,"def make_system(A, M, x0, b): A_ = A A = aslinearoperator(A) if A.shape[0] != A.shape[1]: raise ValueError(f'expected square matrix, but got shape={(A.shape,)}') N = A.shape[0] b = asanyarray(b) if not (b.shape == (N,1) or b.shape == (N,)): raise ValueError(f'shapes of A {A.shape} and b {b.shape} are ' 'incompatible') if b.dtype.char not in 'fdFD': b = b.astype('d') # upcast non-FP types to double ","Make a linear system Ax=b Parameters ---------- A : LinearOperator sparse or dense matrix (or any valid input to aslinearoperator) M : {LinearOperator, Nones} preconditioner sparse or dense matrix (or any valid input to aslinearoperator) x0 : {array_like, str, None} initial guess to iterative method. ``x0 = 'Mb'`` means using the nonzero initial guess ``M @ b``. Default is `None`, which means using the zero initial guess. b : array_like right hand side Returns ------- (A, M, x, b, postprocess) A : LinearOperator matrix of the linear system M : LinearOperator preconditioner x : rank 1 ndarray initial guess b : rank 1 ndarray right hand side postprocess : function converts the solution vector to the appropriate type and dimensions (e.g. (N,1) matrix) ",123,62,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def make_system(A, M, x0, b): A_ = A A = aslinearoperator(A) if A.shape[0] != A.shape[1]: raise ValueError(f'expected square matrix, but got shape={(A.shape,)}') N = A.shape[0] b = asanyarray(b) if not (b.shape == (N,1) or b.shape == (N,)): raise ValueError(f'shapes of A {A.shape} and b {b.shape} are ' 'incompatible') if b.dtype.char not in 'fdFD': b = b.astype('d') # upcast non-FP types to double ``` ###Assistant : Make a linear system Ax=b Parameters ---------- A : LinearOperator sparse or dense matrix (or any valid input to aslinearoperator) M : {LinearOperator, Nones} preconditioner sparse or dense matrix (or any valid input to aslinearoperator) x0 : {array_like, str, None} initial guess to iterative method. ``x0 = 'Mb'`` means using the nonzero initial guess ``M @ b``. Default is `None`, which means using the zero initial guess. b : array_like right hand side Returns ------- (A, M, x, b, postprocess) A : LinearOperator matrix of the linear system M : LinearOperator preconditioner x : rank 1 ndarray initial guess b : rank 1 ndarray right hand side postprocess : function converts the solution vector to the appropriate type and dimensions (e.g. (N,1) matrix) " 331,"def dodecahedral_graph(create_using=None): G = LCF_graph(20, [10, 7, 4, -4, -7, 10, -4, 7, -7, 4], 2, create_using) G.name = ""Dodecahedral Graph"" return G "," Returns the Platonic Dodecahedral graph. The dodecahedral graph has 20 nodes and 30 edges. The skeleton of the dodecahedron forms a graph. It is one of 5 Platonic graphs [1]_. It can be described in LCF notation as: ``[10, 7, 4, -4, -7, 10, -4, 7, -7, 4]^2`` [2]_. Parameters ---------- create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Returns ------- G : networkx Graph Dodecahedral Graph with 20 nodes and 30 edges References ---------- .. [1] https://en.wikipedia.org/wiki/Regular_dodecahedron#Dodecahedral_graph .. [2] https://mathworld.wolfram.com/DodecahedralGraph.html ",91,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dodecahedral_graph(create_using=None): G = LCF_graph(20, [10, 7, 4, -4, -7, 10, -4, 7, -7, 4], 2, create_using) G.name = ""Dodecahedral Graph"" return G ``` ###Assistant : Returns the Platonic Dodecahedral graph. The dodecahedral graph has 20 nodes and 30 edges. The skeleton of the dodecahedron forms a graph. It is one of 5 Platonic graphs [1]_. It can be described in LCF notation as: ``[10, 7, 4, -4, -7, 10, -4, 7, -7, 4]^2`` [2]_. Parameters ---------- create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Returns ------- G : networkx Graph Dodecahedral Graph with 20 nodes and 30 edges References ---------- .. [1] https://en.wikipedia.org/wiki/Regular_dodecahedron#Dodecahedral_graph .. [2] https://mathworld.wolfram.com/DodecahedralGraph.html " 332,"def arc_tangent(value, default=_SENTINEL): try: return math.atan(float(value)) except (ValueError, TypeError): if default is _SENTINEL: raise_no_default(""atan"", value) return default ",Filter and function to get arc tangent of the value.,10,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def arc_tangent(value, default=_SENTINEL): try: return math.atan(float(value)) except (ValueError, TypeError): if default is _SENTINEL: raise_no_default(""atan"", value) return default ``` ###Assistant : Filter and function to get arc tangent of the value. " 333,"def sparse_top_k_categorical_matches(y_true, y_pred, k=5): reshape_matches = False y_true = tf.convert_to_tensor(y_true) y_pred = tf.convert_to_tensor(y_pred) y_true_rank = y_true.shape.ndims y_pred_rank = y_pred.shape.ndims y_true_org_shape = tf.shape(y_true) # Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,) if (y_true_rank is not None) and (y_pred_rank is not None): if y_pred_rank > 2: y_pred = tf.reshape(y_pred, [-1, y_pred.shape[-1]]) if y_true_rank > 1: reshape_matches = True y_true = tf.reshape(y_true, [-1]) matches = tf.cast( tf.math.in_top_k( predictions=y_pred, targets=tf.cast(y_true, ""int32""), k=k ), dtype=backend.floatx(), ) # returned matches is expected to have same shape as y_true input if reshape_matches: return tf.reshape(matches, shape=y_true_org_shape) return matches ","Creates float Tensor, 1.0 for label-TopK_prediction match, 0.0 for mismatch. Args: y_true: tensor of true targets. y_pred: tensor of predicted targets. k: (Optional) Number of top elements to look at for computing accuracy. Defaults to 5. Returns: Match tensor: 1.0 for label-prediction match, 0.0 for mismatch. ",46,92,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def sparse_top_k_categorical_matches(y_true, y_pred, k=5): reshape_matches = False y_true = tf.convert_to_tensor(y_true) y_pred = tf.convert_to_tensor(y_pred) y_true_rank = y_true.shape.ndims y_pred_rank = y_pred.shape.ndims y_true_org_shape = tf.shape(y_true) # Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,) if (y_true_rank is not None) and (y_pred_rank is not None): if y_pred_rank > 2: y_pred = tf.reshape(y_pred, [-1, y_pred.shape[-1]]) if y_true_rank > 1: reshape_matches = True y_true = tf.reshape(y_true, [-1]) matches = tf.cast( tf.math.in_top_k( predictions=y_pred, targets=tf.cast(y_true, ""int32""), k=k ), dtype=backend.floatx(), ) # returned matches is expected to have same shape as y_true input if reshape_matches: return tf.reshape(matches, shape=y_true_org_shape) return matches ``` ###Assistant : Creates float Tensor, 1.0 for label-TopK_prediction match, 0.0 for mismatch. Args: y_true: tensor of true targets. y_pred: tensor of predicted targets. k: (Optional) Number of top elements to look at for computing accuracy. Defaults to 5. Returns: Match tensor: 1.0 for label-prediction match, 0.0 for mismatch. " 334,"def tax_account_query(doctype, txt, searchfield, start, page_len, filters): company_currency = erpnext.get_company_currency(filters.get(""company"")) def get_accounts(with_account_type_filter): account_type_condition = """" if with_account_type_filter: account_type_condition = ""AND account_type in %(account_types)s"" accounts = frappe.db.sql( .format( account_type_condition=account_type_condition, searchfield=searchfield, mcond=get_match_cond(doctype), ), dict( account_types=filters.get(""account_type""), company=filters.get(""company""), disabled=filters.get(""disabled"", 0), currency=company_currency, txt=""%{}%"".format(txt), offset=start, limit=page_len, ), ) return accounts tax_accounts = get_accounts(True) if not tax_accounts: tax_accounts = get_accounts(False) return tax_accounts @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs"," SELECT name, parent_account FROM `tabAccount` WHERE `tabAccount`.docstatus!=2 {account_type_condition} AND is_group = 0 AND company = %(company)s AND disabled = %(disabled)s AND (account_currency = %(currency)s or ifnull(account_currency, '') = '') AND `{searchfield}` LIKE %(txt)s {mcond} ORDER BY idx DESC, name LIMIT %(offset)s, %(limit)s ",42,57,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def tax_account_query(doctype, txt, searchfield, start, page_len, filters): company_currency = erpnext.get_company_currency(filters.get(""company"")) def get_accounts(with_account_type_filter): account_type_condition = """" if with_account_type_filter: account_type_condition = ""AND account_type in %(account_types)s"" accounts = frappe.db.sql( .format( account_type_condition=account_type_condition, searchfield=searchfield, mcond=get_match_cond(doctype), ), dict( account_types=filters.get(""account_type""), company=filters.get(""company""), disabled=filters.get(""disabled"", 0), currency=company_currency, txt=""%{}%"".format(txt), offset=start, limit=page_len, ), ) return accounts tax_accounts = get_accounts(True) if not tax_accounts: tax_accounts = get_accounts(False) return tax_accounts @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs ``` ###Assistant : SELECT name, parent_account FROM `tabAccount` WHERE `tabAccount`.docstatus!=2 {account_type_condition} AND is_group = 0 AND company = %(company)s AND disabled = %(disabled)s AND (account_currency = %(currency)s or ifnull(account_currency, '') = '') AND `{searchfield}` LIKE %(txt)s {mcond} ORDER BY idx DESC, name LIMIT %(offset)s, %(limit)s " 335,"def test_add_post_duplicate_choose_permission(self): # Create group with access to admin and add permission. bakers_group = Group.objects.create(name=""Bakers"") access_admin_perm = Permission.objects.get( content_type__app_label=""wagtailadmin"", codename=""access_admin"" ) bakers_group.permissions.add(access_admin_perm) # Create the ""Bakery"" Collection and grant ""add"" permission to the Bakers group. root = Collection.objects.get(id=get_root_collection_id()) bakery_collection = root.add_child(instance=Collection(name=""Bakery"")) GroupCollectionPermission.objects.create( group=bakers_group, collection=bakery_collection, permission=Permission.objects.get( content_type__app_label=""wagtailimages"", codename=""add_image"" ), ) "," When a duplicate image is added but the user doesn't have permission to choose the original image, the add views lets the user upload it as if it weren't a duplicate. ",31,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_add_post_duplicate_choose_permission(self): # Create group with access to admin and add permission. bakers_group = Group.objects.create(name=""Bakers"") access_admin_perm = Permission.objects.get( content_type__app_label=""wagtailadmin"", codename=""access_admin"" ) bakers_group.permissions.add(access_admin_perm) # Create the ""Bakery"" Collection and grant ""add"" permission to the Bakers group. root = Collection.objects.get(id=get_root_collection_id()) bakery_collection = root.add_child(instance=Collection(name=""Bakery"")) GroupCollectionPermission.objects.create( group=bakers_group, collection=bakery_collection, permission=Permission.objects.get( content_type__app_label=""wagtailimages"", codename=""add_image"" ), ) ``` ###Assistant : When a duplicate image is added but the user doesn't have permission to choose the original image, the add views lets the user upload it as if it weren't a duplicate. " 336,"def netmiko_commands(*commands, **kwargs): conn = _netmiko_conn(**kwargs) ret = [] for cmd in commands: ret.append(conn.send_command(cmd)) return ret @proxy_napalm_wrap"," .. versionadded:: 2019.2.0 Invoke one or more commands to be executed on the remote device, via Netmiko. Returns a list of strings, with the output from each command. commands A list of commands to be executed. expect_string Regular expression pattern to use for determining end of output. If left blank will default to being based on router prompt. delay_factor: ``1`` Multiplying factor used to adjust delays (default: ``1``). max_loops: ``500`` Controls wait time in conjunction with delay_factor. Will default to be based upon self.timeout. auto_find_prompt: ``True`` Whether it should try to auto-detect the prompt (default: ``True``). strip_prompt: ``True`` Remove the trailing router prompt from the output (default: ``True``). strip_command: ``True`` Remove the echo of the command from the output (default: ``True``). normalize: ``True`` Ensure the proper enter is sent at end of command (default: ``True``). use_textfsm: ``False`` Process command output through TextFSM template (default: ``False``). CLI Example: .. code-block:: bash salt '*' napalm.netmiko_commands 'show version' 'show interfaces' ",157,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def netmiko_commands(*commands, **kwargs): conn = _netmiko_conn(**kwargs) ret = [] for cmd in commands: ret.append(conn.send_command(cmd)) return ret @proxy_napalm_wrap ``` ###Assistant : .. versionadded:: 2019.2.0 Invoke one or more commands to be executed on the remote device, via Netmiko. Returns a list of strings, with the output from each command. commands A list of commands to be executed. expect_string Regular expression pattern to use for determining end of output. If left blank will default to being based on router prompt. delay_factor: ``1`` Multiplying factor used to adjust delays (default: ``1``). max_loops: ``500`` Controls wait time in conjunction with delay_factor. Will default to be based upon self.timeout. auto_find_prompt: ``True`` Whether it should try to auto-detect the prompt (default: ``True``). strip_prompt: ``True`` Remove the trailing router prompt from the output (default: ``True``). strip_command: ``True`` Remove the echo of the command from the output (default: ``True``). normalize: ``True`` Ensure the proper enter is sent at end of command (default: ``True``). use_textfsm: ``False`` Process command output through TextFSM template (default: ``False``). CLI Example: .. code-block:: bash salt '*' napalm.netmiko_commands 'show version' 'show interfaces' " 337,"def decoder(self, side): input_ = Input(shape=(8, 8, 512)) var_x = input_ var_x = UpscaleBlock(256, activation=""leakyrelu"")(var_x) var_x = UpscaleBlock(128, activation=""leakyrelu"")(var_x) var_x = UpscaleBlock(64, activation=""leakyrelu"")(var_x) var_x = Conv2DOutput(3, 5, name=f""face_out_{side}"")(var_x) outputs = [var_x] if self.learn_mask: var_y = input_ var_y = UpscaleBlock(256, activation=""leakyrelu"")(var_y) var_y = UpscaleBlock(128, activation=""leakyrelu"")(var_y) var_y = UpscaleBlock(64, activation=""leakyrelu"")(var_y) var_y = Conv2DOutput(1, 5, name=f""mask_out_{side}"")(var_y) outputs.append(var_y) return KerasModel(input_, outputs=outputs, name=f""decoder_{side}"") "," The original Faceswap Decoder Network. The decoders for the original model have separate weights for each side ""A"" and ""B"", so two instances are created in :func:`build_model`, one for each side. Parameters ---------- side: str Either `""a` or `""b""`. This is used for naming the decoder model. Returns ------- :class:`keras.models.Model` The Keras decoder model. This will be called twice, once for each side. ",63,58,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def decoder(self, side): input_ = Input(shape=(8, 8, 512)) var_x = input_ var_x = UpscaleBlock(256, activation=""leakyrelu"")(var_x) var_x = UpscaleBlock(128, activation=""leakyrelu"")(var_x) var_x = UpscaleBlock(64, activation=""leakyrelu"")(var_x) var_x = Conv2DOutput(3, 5, name=f""face_out_{side}"")(var_x) outputs = [var_x] if self.learn_mask: var_y = input_ var_y = UpscaleBlock(256, activation=""leakyrelu"")(var_y) var_y = UpscaleBlock(128, activation=""leakyrelu"")(var_y) var_y = UpscaleBlock(64, activation=""leakyrelu"")(var_y) var_y = Conv2DOutput(1, 5, name=f""mask_out_{side}"")(var_y) outputs.append(var_y) return KerasModel(input_, outputs=outputs, name=f""decoder_{side}"") ``` ###Assistant : The original Faceswap Decoder Network. The decoders for the original model have separate weights for each side ""A"" and ""B"", so two instances are created in :func:`build_model`, one for each side. Parameters ---------- side: str Either `""a` or `""b""`. This is used for naming the decoder model. Returns ------- :class:`keras.models.Model` The Keras decoder model. This will be called twice, once for each side. " 338,"def test_users_getting_add_peer_event(self) -> None: streams_to_sub = [""multi_user_stream""] othello = self.example_user(""othello"") cordelia = self.example_user(""cordelia"") iago = self.example_user(""iago"") orig_user_ids_to_subscribe = [self.test_user.id, othello.id] self.common_subscribe_to_streams( self.test_user, streams_to_sub, dict(principals=orjson.dumps(orig_user_ids_to_subscribe).decode()), ) new_user_ids_to_subscribe = [iago.id, cordelia.id] events: List[Mapping[str, Any]] = [] with self.tornado_redirected_to_list(events, expected_num_events=5): self.common_subscribe_to_streams( self.test_user, streams_to_sub, dict(principals=orjson.dumps(new_user_ids_to_subscribe).decode()), ) add_peer_events = [event for event in events if event[""event""].get(""op"") == ""peer_add""] (add_peer_event,) = add_peer_events self.assertEqual(add_peer_event[""event""][""type""], ""subscription"") self.assertEqual(add_peer_event[""event""][""op""], ""peer_add"") event_sent_to_ids = add_peer_event[""users""] for user_id in new_user_ids_to_subscribe: # Make sure new users subscribed to stream is not in # peer_add event recipient list self.assertNotIn(user_id, event_sent_to_ids) for old_user in orig_user_ids_to_subscribe: # Check non-new users are in peer_add event recipient list. self.assertIn(old_user, event_sent_to_ids) "," Check users getting add_peer_event is correct ",6,101,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_users_getting_add_peer_event(self) -> None: streams_to_sub = [""multi_user_stream""] othello = self.example_user(""othello"") cordelia = self.example_user(""cordelia"") iago = self.example_user(""iago"") orig_user_ids_to_subscribe = [self.test_user.id, othello.id] self.common_subscribe_to_streams( self.test_user, streams_to_sub, dict(principals=orjson.dumps(orig_user_ids_to_subscribe).decode()), ) new_user_ids_to_subscribe = [iago.id, cordelia.id] events: List[Mapping[str, Any]] = [] with self.tornado_redirected_to_list(events, expected_num_events=5): self.common_subscribe_to_streams( self.test_user, streams_to_sub, dict(principals=orjson.dumps(new_user_ids_to_subscribe).decode()), ) add_peer_events = [event for event in events if event[""event""].get(""op"") == ""peer_add""] (add_peer_event,) = add_peer_events self.assertEqual(add_peer_event[""event""][""type""], ""subscription"") self.assertEqual(add_peer_event[""event""][""op""], ""peer_add"") event_sent_to_ids = add_peer_event[""users""] for user_id in new_user_ids_to_subscribe: # Make sure new users subscribed to stream is not in # peer_add event recipient list self.assertNotIn(user_id, event_sent_to_ids) for old_user in orig_user_ids_to_subscribe: # Check non-new users are in peer_add event recipient list. self.assertIn(old_user, event_sent_to_ids) ``` ###Assistant : Check users getting add_peer_event is correct " 339,"def show_trace_2d(f, results): d2l.set_figsize() d2l.plt.plot(*zip(*results), '-o', color='#ff7f0e') x1, x2 = d2l.meshgrid(d2l.arange(-5.5, 1.0, 0.1), d2l.arange(-3.0, 1.0, 0.1)) d2l.plt.contour(x1, x2, f(x1, x2), colors='#1f77b4') d2l.plt.xlabel('x1') d2l.plt.ylabel('x2') d2l.DATA_HUB['airfoil'] = (d2l.DATA_URL + 'airfoil_self_noise.dat', '76e5be1548fd8222e5074cf0faae75edff8cf93f') ","Show the trace of 2D variables during optimization. Defined in :numref:`subsec_gd-learningrate`",11,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def show_trace_2d(f, results): d2l.set_figsize() d2l.plt.plot(*zip(*results), '-o', color='#ff7f0e') x1, x2 = d2l.meshgrid(d2l.arange(-5.5, 1.0, 0.1), d2l.arange(-3.0, 1.0, 0.1)) d2l.plt.contour(x1, x2, f(x1, x2), colors='#1f77b4') d2l.plt.xlabel('x1') d2l.plt.ylabel('x2') d2l.DATA_HUB['airfoil'] = (d2l.DATA_URL + 'airfoil_self_noise.dat', '76e5be1548fd8222e5074cf0faae75edff8cf93f') ``` ###Assistant : Show the trace of 2D variables during optimization. Defined in :numref:`subsec_gd-learningrate` " 340,"def test_dataset(ray_start_4_cpus, use_local): model_creator = mlp_identity.model_creator optimizer_creator = mlp_identity.optimizer_creator dataset_creator = mlp_identity.dataset_creator DatasetOperator = TrainingOperator.from_creators( model_creator=model_creator, optimizer_creator=optimizer_creator, loss_creator=nn.MSELoss, ) trainer = TorchTrainer( training_operator_cls=DatasetOperator, use_local=use_local, num_workers=2, ) dataset = dataset_creator() for i in range(5): trainer.train(dataset=dataset, num_steps=100) x = mlp_identity.to_mat(0.5) prediction = float(trainer.get_model()(x)[0][0]) assert 0.4 <= prediction <= 0.6 trainer.shutdown() @pytest.mark.parametrize(""use_local"", [True, False])"," This test tries training the mlp_identity example. We check the accuracy of the model as an all inclusive way of ensuring that we are properly sharding and iterating over the entire dataset (instead of repeating the first set of points for example). ",42,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_dataset(ray_start_4_cpus, use_local): model_creator = mlp_identity.model_creator optimizer_creator = mlp_identity.optimizer_creator dataset_creator = mlp_identity.dataset_creator DatasetOperator = TrainingOperator.from_creators( model_creator=model_creator, optimizer_creator=optimizer_creator, loss_creator=nn.MSELoss, ) trainer = TorchTrainer( training_operator_cls=DatasetOperator, use_local=use_local, num_workers=2, ) dataset = dataset_creator() for i in range(5): trainer.train(dataset=dataset, num_steps=100) x = mlp_identity.to_mat(0.5) prediction = float(trainer.get_model()(x)[0][0]) assert 0.4 <= prediction <= 0.6 trainer.shutdown() @pytest.mark.parametrize(""use_local"", [True, False]) ``` ###Assistant : This test tries training the mlp_identity example. We check the accuracy of the model as an all inclusive way of ensuring that we are properly sharding and iterating over the entire dataset (instead of repeating the first set of points for example). " 341,"def lovasz_softmax_flat(probas, labels, classes='present', weighted=None): if probas.numel() == 0: # only void pixels, the gradients should be 0 return probas * 0. C = probas.size(1) losses = [] class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes for c in class_to_sum: fg = (labels == c).float() # foreground for class c if (classes is 'present' and fg.sum() == 0): continue if C == 1: if len(classes) > 1: raise ValueError('Sigmoid output possible only with 1 class') class_pred = probas[:, 0] else: class_pred = probas[:, c] errors = (Variable(fg) - class_pred).abs() errors_sorted, perm = torch.sort(errors, 0, descending=True) perm = perm.data fg_sorted = fg[perm] if weighted is not None: losses.append(weighted[c]*torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted)))) else: losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted)))) return mean(losses) "," Multi-class Lovasz-Softmax loss probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) labels: [P] Tensor, ground truth labels (between 0 and C - 1) classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. ",45,115,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def lovasz_softmax_flat(probas, labels, classes='present', weighted=None): if probas.numel() == 0: # only void pixels, the gradients should be 0 return probas * 0. C = probas.size(1) losses = [] class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes for c in class_to_sum: fg = (labels == c).float() # foreground for class c if (classes is 'present' and fg.sum() == 0): continue if C == 1: if len(classes) > 1: raise ValueError('Sigmoid output possible only with 1 class') class_pred = probas[:, 0] else: class_pred = probas[:, c] errors = (Variable(fg) - class_pred).abs() errors_sorted, perm = torch.sort(errors, 0, descending=True) perm = perm.data fg_sorted = fg[perm] if weighted is not None: losses.append(weighted[c]*torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted)))) else: losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted)))) return mean(losses) ``` ###Assistant : Multi-class Lovasz-Softmax loss probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) labels: [P] Tensor, ground truth labels (between 0 and C - 1) classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. " 342,"def mixin_distributed_feature_parser(parser): gp = add_arg_group(parser, title='Distributed') gp.add_argument( '--quiet-remote-logs', action='store_true', default=False, help='Do not display the streaming of remote logs on local console', ) gp.add_argument( '--upload-files', type=str, nargs='*', metavar='FILE', help=, ) gp.add_argument( '--disable-remote', action='store_true', default=False, help='If set, remote pod invocation is avoided. This is used by pods created by JinaD' if _SHOW_ALL_ARGS else argparse.SUPPRESS, ) ","Mixing in arguments required by :class:`BaseDeployment` into the given parser. :param parser: the parser instance to which we add arguments The files on the host to be uploaded to the remote workspace. This can be useful when your Deployment has more file dependencies beyond a single YAML file, e.g. Python files, data files. Note, - currently only flatten structure is supported, which means if you upload `[./foo/a.py, ./foo/b.pp, ./bar/c.yml]`, then they will be put under the _same_ workspace on the remote, losing all hierarchies. - by default, `--uses` YAML file is always uploaded. - uploaded files are by default isolated across the runs. To ensure files are submitted to the same workspace across different runs, use `--workspace-id` to specify the workspace. ",121,53,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mixin_distributed_feature_parser(parser): gp = add_arg_group(parser, title='Distributed') gp.add_argument( '--quiet-remote-logs', action='store_true', default=False, help='Do not display the streaming of remote logs on local console', ) gp.add_argument( '--upload-files', type=str, nargs='*', metavar='FILE', help=, ) gp.add_argument( '--disable-remote', action='store_true', default=False, help='If set, remote pod invocation is avoided. This is used by pods created by JinaD' if _SHOW_ALL_ARGS else argparse.SUPPRESS, ) ``` ###Assistant : Mixing in arguments required by :class:`BaseDeployment` into the given parser. :param parser: the parser instance to which we add arguments The files on the host to be uploaded to the remote workspace. This can be useful when your Deployment has more file dependencies beyond a single YAML file, e.g. Python files, data files. Note, - currently only flatten structure is supported, which means if you upload `[./foo/a.py, ./foo/b.pp, ./bar/c.yml]`, then they will be put under the _same_ workspace on the remote, losing all hierarchies. - by default, `--uses` YAML file is always uploaded. - uploaded files are by default isolated across the runs. To ensure files are submitted to the same workspace across different runs, use `--workspace-id` to specify the workspace. " 343,"def get_bootstrap_modules(): # Import 'struct' modules to get real paths to module file names. mod_struct = __import__('struct') # Basic modules necessary for the bootstrap process. loader_mods = TOC() loaderpath = os.path.join(HOMEPATH, 'PyInstaller', 'loader') # On some platforms (Windows, Debian/Ubuntu) '_struct' and zlib modules are built-in modules (linked statically) # and thus does not have attribute __file__. 'struct' module is required for reading Python bytecode from # executable. 'zlib' is required to decompress this bytecode. for mod_name in ['_struct', 'zlib']: mod = __import__(mod_name) # C extension. if hasattr(mod, '__file__'): mod_file = os.path.abspath(mod.__file__) if os.path.basename(os.path.dirname(mod_file)) == 'lib-dynload': # Divert extensions originating from python's lib-dynload directory, to match behavior of #5604. mod_name = os.path.join('lib-dynload', mod_name) loader_mods.append((mod_name, mod_file, 'EXTENSION')) # NOTE:These modules should be kept simple without any complicated dependencies. loader_mods += [ ('struct', os.path.abspath(mod_struct.__file__), 'PYMODULE'), ('pyimod01_os_path', os.path.join(loaderpath, 'pyimod01_os_path.py'), 'PYMODULE'), ('pyimod02_archive', os.path.join(loaderpath, 'pyimod02_archive.py'), 'PYMODULE'), ('pyimod03_importers', os.path.join(loaderpath, 'pyimod03_importers.py'), 'PYMODULE'), ('pyimod04_ctypes', os.path.join(loaderpath, 'pyimod04_ctypes.py'), 'PYMODULE'), ('pyiboot01_bootstrap', os.path.join(loaderpath, 'pyiboot01_bootstrap.py'), 'PYSOURCE'), ] return loader_mods "," Get TOC with the bootstrapping modules and their dependencies. :return: TOC with modules ",13,155,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_bootstrap_modules(): # Import 'struct' modules to get real paths to module file names. mod_struct = __import__('struct') # Basic modules necessary for the bootstrap process. loader_mods = TOC() loaderpath = os.path.join(HOMEPATH, 'PyInstaller', 'loader') # On some platforms (Windows, Debian/Ubuntu) '_struct' and zlib modules are built-in modules (linked statically) # and thus does not have attribute __file__. 'struct' module is required for reading Python bytecode from # executable. 'zlib' is required to decompress this bytecode. for mod_name in ['_struct', 'zlib']: mod = __import__(mod_name) # C extension. if hasattr(mod, '__file__'): mod_file = os.path.abspath(mod.__file__) if os.path.basename(os.path.dirname(mod_file)) == 'lib-dynload': # Divert extensions originating from python's lib-dynload directory, to match behavior of #5604. mod_name = os.path.join('lib-dynload', mod_name) loader_mods.append((mod_name, mod_file, 'EXTENSION')) # NOTE:These modules should be kept simple without any complicated dependencies. loader_mods += [ ('struct', os.path.abspath(mod_struct.__file__), 'PYMODULE'), ('pyimod01_os_path', os.path.join(loaderpath, 'pyimod01_os_path.py'), 'PYMODULE'), ('pyimod02_archive', os.path.join(loaderpath, 'pyimod02_archive.py'), 'PYMODULE'), ('pyimod03_importers', os.path.join(loaderpath, 'pyimod03_importers.py'), 'PYMODULE'), ('pyimod04_ctypes', os.path.join(loaderpath, 'pyimod04_ctypes.py'), 'PYMODULE'), ('pyiboot01_bootstrap', os.path.join(loaderpath, 'pyiboot01_bootstrap.py'), 'PYSOURCE'), ] return loader_mods ``` ###Assistant : Get TOC with the bootstrapping modules and their dependencies. :return: TOC with modules " 344,"def getquoted(self): if self.is_geometry: # Psycopg will figure out whether to use E'\\000' or '\000'. return b""%s(%s)"" % ( b""ST_GeogFromWKB"" if self.geography else b""ST_GeomFromEWKB"", self._adapter.getquoted(), ) else: # For rasters, add explicit type cast to WKB string. return b""'%s'::raster"" % self.ewkb.encode() "," Return a properly quoted string for use in PostgreSQL/PostGIS. ",9,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def getquoted(self): if self.is_geometry: # Psycopg will figure out whether to use E'\\000' or '\000'. return b""%s(%s)"" % ( b""ST_GeogFromWKB"" if self.geography else b""ST_GeomFromEWKB"", self._adapter.getquoted(), ) else: # For rasters, add explicit type cast to WKB string. return b""'%s'::raster"" % self.ewkb.encode() ``` ###Assistant : Return a properly quoted string for use in PostgreSQL/PostGIS. " 345,"def _set_skip_list(self) -> None: if self._skip_num == 1 and not self._alignments.data: logger.debug(""No frames to be skipped"") return skip_list = [] for idx, filename in enumerate(self._images.file_list): if idx % self._skip_num != 0: logger.trace(""Adding image '%s' to skip list due to extract_every_n = %s"", filename, self._skip_num) skip_list.append(idx) # Items may be in the alignments file if skip-existing[-faces] is selected elif os.path.basename(filename) in self._alignments.data: self._existing_count += 1 logger.trace(""Removing image: '%s' due to previously existing"", filename) skip_list.append(idx) if self._existing_count != 0: logger.info(""Skipping %s frames due to skip_existing/skip_existing_faces."", self._existing_count) logger.debug(""Adding skip list: %s"", skip_list) self._images.add_skip_list(skip_list) "," Add the skip list to the image loader Checks against `extract_every_n` and the existence of alignments data (can exist if `skip_existing` or `skip_existing_faces` has been provided) and compiles a list of frame indices that should not be processed, providing these to :class:`lib.image.ImagesLoader`. ",42,90,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _set_skip_list(self) -> None: if self._skip_num == 1 and not self._alignments.data: logger.debug(""No frames to be skipped"") return skip_list = [] for idx, filename in enumerate(self._images.file_list): if idx % self._skip_num != 0: logger.trace(""Adding image '%s' to skip list due to extract_every_n = %s"", filename, self._skip_num) skip_list.append(idx) # Items may be in the alignments file if skip-existing[-faces] is selected elif os.path.basename(filename) in self._alignments.data: self._existing_count += 1 logger.trace(""Removing image: '%s' due to previously existing"", filename) skip_list.append(idx) if self._existing_count != 0: logger.info(""Skipping %s frames due to skip_existing/skip_existing_faces."", self._existing_count) logger.debug(""Adding skip list: %s"", skip_list) self._images.add_skip_list(skip_list) ``` ###Assistant : Add the skip list to the image loader Checks against `extract_every_n` and the existence of alignments data (can exist if `skip_existing` or `skip_existing_faces` has been provided) and compiles a list of frame indices that should not be processed, providing these to :class:`lib.image.ImagesLoader`. " 346,"def caplog(caplog): config = setup_logging() for name, logger_config in config[""loggers""].items(): if not logger_config.get(""propagate"", True): logger = get_logger(name) logger.handlers.append(caplog.handler) yield caplog "," Overrides caplog to apply to all of our loggers that do not propagate and consequently would not be captured by caplog. ",21,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def caplog(caplog): config = setup_logging() for name, logger_config in config[""loggers""].items(): if not logger_config.get(""propagate"", True): logger = get_logger(name) logger.handlers.append(caplog.handler) yield caplog ``` ###Assistant : Overrides caplog to apply to all of our loggers that do not propagate and consequently would not be captured by caplog. " 347,"def _show_mesh(self, mesh_ids, face_index, detected_face, top_left): state = ""normal"" if (self._tk_vars[""selected_editor""].get() != ""Mask"" or self._optional_annotations[""mesh""]) else ""hidden"" kwargs = dict(polygon=dict(fill="""", width=2, outline=self._canvas.control_colors[""Mesh""]), line=dict(fill=self._canvas.control_colors[""Mesh""], width=2)) edited = (self._tk_vars[""edited""].get() and self._tk_vars[""selected_editor""].get() not in (""Mask"", ""View"")) landmarks = self._viewport.get_landmarks(self.frame_index, face_index, detected_face, top_left, edited) for key, kwarg in kwargs.items(): for idx, mesh_id in enumerate(mesh_ids[key]): self._canvas.coords(mesh_id, *landmarks[key][idx].flatten()) self._canvas.itemconfig(mesh_id, state=state, **kwarg) self._canvas.addtag_withtag(f""active_mesh_{key}"", mesh_id) "," Display the mesh annotation for the given face, at the given location. Parameters ---------- mesh_ids: dict Dictionary containing the `polygon` and `line` tkinter canvas identifiers that make up the mesh for the given face face_index: int The face index within the frame for the given face detected_face: :class:`~lib.align.DetectedFace` The detected face object that contains the landmarks for generating the mesh top_left: tuple The (x, y) top left co-ordinates of the mesh's bounding box ",73,57,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _show_mesh(self, mesh_ids, face_index, detected_face, top_left): state = ""normal"" if (self._tk_vars[""selected_editor""].get() != ""Mask"" or self._optional_annotations[""mesh""]) else ""hidden"" kwargs = dict(polygon=dict(fill="""", width=2, outline=self._canvas.control_colors[""Mesh""]), line=dict(fill=self._canvas.control_colors[""Mesh""], width=2)) edited = (self._tk_vars[""edited""].get() and self._tk_vars[""selected_editor""].get() not in (""Mask"", ""View"")) landmarks = self._viewport.get_landmarks(self.frame_index, face_index, detected_face, top_left, edited) for key, kwarg in kwargs.items(): for idx, mesh_id in enumerate(mesh_ids[key]): self._canvas.coords(mesh_id, *landmarks[key][idx].flatten()) self._canvas.itemconfig(mesh_id, state=state, **kwarg) self._canvas.addtag_withtag(f""active_mesh_{key}"", mesh_id) ``` ###Assistant : Display the mesh annotation for the given face, at the given location. Parameters ---------- mesh_ids: dict Dictionary containing the `polygon` and `line` tkinter canvas identifiers that make up the mesh for the given face face_index: int The face index within the frame for the given face detected_face: :class:`~lib.align.DetectedFace` The detected face object that contains the landmarks for generating the mesh top_left: tuple The (x, y) top left co-ordinates of the mesh's bounding box " 348,"def _get_curr_status(self) -> Tuple[DeploymentStatusInfo, bool]: # TODO(edoakes): we could make this more efficient in steady-state by # having a ""healthy"" flag that gets flipped if an update or replica # failure happens. target_version = self._target_version target_replica_count = self._target_replicas all_running_replica_cnt = self._replicas.count(states=[ReplicaState.RUNNING]) running_at_target_version_replica_cnt = self._replicas.count( states=[ReplicaState.RUNNING], version=target_version ) failed_to_start_count = self._replica_constructor_retry_counter failed_to_start_threshold = min( MAX_DEPLOYMENT_CONSTRUCTOR_RETRY_COUNT, target_replica_count * 3 ) # Got to make a call to complete current deploy() goal after # start failure threshold reached, while we might still have # pending replicas in current goal. if ( failed_to_start_count >= failed_to_start_threshold and failed_to_start_threshold != 0 ): if running_at_target_version_replica_cnt > 0: # At least one RUNNING replica at target state, partial # success; We can stop tracking constructor failures and # leave it to the controller to fully scale to target # number of replicas and only return as completed once # reached target replica count self._replica_constructor_retry_counter = -1 else: return ( DeploymentStatusInfo( status=DeploymentStatus.FAILED, message=( ""The Deployment constructor failed "" f""{failed_to_start_count} times in a row. See "" ""logs for details."" ), ), False, ) # If we have pending ops, the current goal is *not* ready. if ( self._replicas.count( states=[ ReplicaState.STARTING, ReplicaState.UPDATING, ReplicaState.RECOVERING, ReplicaState.STOPPING, ] ) == 0 ): # Check for deleting. if target_replica_count == 0 and all_running_replica_cnt == 0: return DeploymentStatusInfo(status=DeploymentStatus.UPDATING), True # Check for a non-zero number of deployments. elif target_replica_count == running_at_target_version_replica_cnt: return DeploymentStatusInfo(status=DeploymentStatus.RUNNING), False return ( DeploymentStatusInfo( status=DeploymentStatus.UPDATING, message=( f""Running replicas of target version: "" f""{running_at_target_version_replica_cnt}, target "" ""replicas: {target_replica_count}"" ), ), False, ) ","Get the current deployment status. Checks the difference between the target vs. running replica count for the target version. TODO(edoakes): we should report the status as FAILED if replicas are repeatedly failing health checks. Need a reasonable heuristic here. Returns: (DeploymentStatusInfo, was_deleted) ",42,248,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_curr_status(self) -> Tuple[DeploymentStatusInfo, bool]: # TODO(edoakes): we could make this more efficient in steady-state by # having a ""healthy"" flag that gets flipped if an update or replica # failure happens. target_version = self._target_version target_replica_count = self._target_replicas all_running_replica_cnt = self._replicas.count(states=[ReplicaState.RUNNING]) running_at_target_version_replica_cnt = self._replicas.count( states=[ReplicaState.RUNNING], version=target_version ) failed_to_start_count = self._replica_constructor_retry_counter failed_to_start_threshold = min( MAX_DEPLOYMENT_CONSTRUCTOR_RETRY_COUNT, target_replica_count * 3 ) # Got to make a call to complete current deploy() goal after # start failure threshold reached, while we might still have # pending replicas in current goal. if ( failed_to_start_count >= failed_to_start_threshold and failed_to_start_threshold != 0 ): if running_at_target_version_replica_cnt > 0: # At least one RUNNING replica at target state, partial # success; We can stop tracking constructor failures and # leave it to the controller to fully scale to target # number of replicas and only return as completed once # reached target replica count self._replica_constructor_retry_counter = -1 else: return ( DeploymentStatusInfo( status=DeploymentStatus.FAILED, message=( ""The Deployment constructor failed "" f""{failed_to_start_count} times in a row. See "" ""logs for details."" ), ), False, ) # If we have pending ops, the current goal is *not* ready. if ( self._replicas.count( states=[ ReplicaState.STARTING, ReplicaState.UPDATING, ReplicaState.RECOVERING, ReplicaState.STOPPING, ] ) == 0 ): # Check for deleting. if target_replica_count == 0 and all_running_replica_cnt == 0: return DeploymentStatusInfo(status=DeploymentStatus.UPDATING), True # Check for a non-zero number of deployments. elif target_replica_count == running_at_target_version_replica_cnt: return DeploymentStatusInfo(status=DeploymentStatus.RUNNING), False return ( DeploymentStatusInfo( status=DeploymentStatus.UPDATING, message=( f""Running replicas of target version: "" f""{running_at_target_version_replica_cnt}, target "" ""replicas: {target_replica_count}"" ), ), False, ) ``` ###Assistant : Get the current deployment status. Checks the difference between the target vs. running replica count for the target version. TODO(edoakes): we should report the status as FAILED if replicas are repeatedly failing health checks. Need a reasonable heuristic here. Returns: (DeploymentStatusInfo, was_deleted) " 349,"def generate_square_subsequent_mask(self, length): return paddle.tensor.triu( (paddle.ones( (length, length), dtype=paddle.get_default_dtype()) * -np.inf), 1) "," Generate a square mask for the sequence. The mask ensures that the predictions for position i can depend only on the known outputs at positions less than i. Parameters: length (int|Tensor): The length of sequence. Returns: Tensor: Generated square mask according to the given length. Examples: .. code-block:: python import paddle from paddle.nn.layer.transformer import Transformer length = 5 d_model, n_head, dim_feedforward = 8, 4, 64 transformer_paddle = Transformer( d_model, n_head, dim_feedforward=dim_feedforward) mask = transformer_paddle.generate_square_subsequent_mask(length) print(mask) # [[ 0. -inf -inf -inf -inf] # [ 0. 0. -inf -inf -inf] # [ 0. 0. 0. -inf -inf] # [ 0. 0. 0. 0. -inf] # [ 0. 0. 0. 0. 0.]] ",110,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def generate_square_subsequent_mask(self, length): return paddle.tensor.triu( (paddle.ones( (length, length), dtype=paddle.get_default_dtype()) * -np.inf), 1) ``` ###Assistant : Generate a square mask for the sequence. The mask ensures that the predictions for position i can depend only on the known outputs at positions less than i. Parameters: length (int|Tensor): The length of sequence. Returns: Tensor: Generated square mask according to the given length. Examples: .. code-block:: python import paddle from paddle.nn.layer.transformer import Transformer length = 5 d_model, n_head, dim_feedforward = 8, 4, 64 transformer_paddle = Transformer( d_model, n_head, dim_feedforward=dim_feedforward) mask = transformer_paddle.generate_square_subsequent_mask(length) print(mask) # [[ 0. -inf -inf -inf -inf] # [ 0. 0. -inf -inf -inf] # [ 0. 0. 0. -inf -inf] # [ 0. 0. 0. 0. -inf] # [ 0. 0. 0. 0. 0.]] " 350,"def find_requirement(self, req, upgrade): # type: (InstallRequirement, bool) -> Optional[InstallationCandidate] hashes = req.hashes(trust_internet=False) best_candidate_result = self.find_best_candidate( req.name, specifier=req.specifier, hashes=hashes, ) best_candidate = best_candidate_result.best_candidate installed_version = None # type: Optional[_BaseVersion] if req.satisfied_by is not None: installed_version = parse_version(req.satisfied_by.version) ","Try to find a Link matching req Expects req, an InstallRequirement and upgrade, a boolean Returns a InstallationCandidate if found, Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise ",25,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_requirement(self, req, upgrade): # type: (InstallRequirement, bool) -> Optional[InstallationCandidate] hashes = req.hashes(trust_internet=False) best_candidate_result = self.find_best_candidate( req.name, specifier=req.specifier, hashes=hashes, ) best_candidate = best_candidate_result.best_candidate installed_version = None # type: Optional[_BaseVersion] if req.satisfied_by is not None: installed_version = parse_version(req.satisfied_by.version) ``` ###Assistant : Try to find a Link matching req Expects req, an InstallRequirement and upgrade, a boolean Returns a InstallationCandidate if found, Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise " 351,"def wrap(self, source, outfile): if self.wrapcode: return self._wrap_div(self._wrap_pre(self._wrap_code(source))) else: return self._wrap_div(self._wrap_pre(source)) "," Wrap the ``source``, which is a generator yielding individual lines, in custom generators. See docstring for `format`. Can be overridden. ",20,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def wrap(self, source, outfile): if self.wrapcode: return self._wrap_div(self._wrap_pre(self._wrap_code(source))) else: return self._wrap_div(self._wrap_pre(source)) ``` ###Assistant : Wrap the ``source``, which is a generator yielding individual lines, in custom generators. See docstring for `format`. Can be overridden. " 352,"def get_assessment_criteria(course): return frappe.get_all( ""Course Assessment Criteria"", fields=[""assessment_criteria"", ""weightage""], filters={""parent"": course}, order_by=""idx"", ) @frappe.whitelist()","Returns Assessmemt Criteria and their Weightage from Course Master. :param Course: Course ",12,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_assessment_criteria(course): return frappe.get_all( ""Course Assessment Criteria"", fields=[""assessment_criteria"", ""weightage""], filters={""parent"": course}, order_by=""idx"", ) @frappe.whitelist() ``` ###Assistant : Returns Assessmemt Criteria and their Weightage from Course Master. :param Course: Course " 353,"def rm_filesystems(name, device, config=""/etc/filesystems""): modified = False view_lines = [] if ""AIX"" not in __grains__[""kernel""]: return modified criteria = _FileSystemsEntry(name=name, dev=device) try: fsys_filedict = _filesystems(config, False) for fsys_view in fsys_filedict.items(): try: if criteria.match(fsys_view): modified = True else: view_lines.append(fsys_view) except _FileSystemsEntry.ParseError: view_lines.append(fsys_view) except OSError as exc: raise CommandExecutionError(""Couldn't read from {}: {}"".format(config, exc)) if modified: try: with salt.utils.files.fopen(config, ""wb"") as ofile: for fsys_view in view_lines: entry = fsys_view[1] list_strgs = _FileSystemsEntry.dict_to_list_lines(entry) ofile.writelines(salt.utils.data.encode(list_strgs)) except OSError as exc: raise CommandExecutionError(""Couldn't write to {}: {}"".format(config, exc)) except Exception as exc: raise CommandExecutionError(""rm_filesystems error exception {exc}"") return modified "," .. versionadded:: 2018.3.3 Remove the mount point from the filesystems CLI Example: .. code-block:: bash salt '*' mount.rm_filesystems /mnt/foo /dev/sdg ",20,93,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rm_filesystems(name, device, config=""/etc/filesystems""): modified = False view_lines = [] if ""AIX"" not in __grains__[""kernel""]: return modified criteria = _FileSystemsEntry(name=name, dev=device) try: fsys_filedict = _filesystems(config, False) for fsys_view in fsys_filedict.items(): try: if criteria.match(fsys_view): modified = True else: view_lines.append(fsys_view) except _FileSystemsEntry.ParseError: view_lines.append(fsys_view) except OSError as exc: raise CommandExecutionError(""Couldn't read from {}: {}"".format(config, exc)) if modified: try: with salt.utils.files.fopen(config, ""wb"") as ofile: for fsys_view in view_lines: entry = fsys_view[1] list_strgs = _FileSystemsEntry.dict_to_list_lines(entry) ofile.writelines(salt.utils.data.encode(list_strgs)) except OSError as exc: raise CommandExecutionError(""Couldn't write to {}: {}"".format(config, exc)) except Exception as exc: raise CommandExecutionError(""rm_filesystems error exception {exc}"") return modified ``` ###Assistant : .. versionadded:: 2018.3.3 Remove the mount point from the filesystems CLI Example: .. code-block:: bash salt '*' mount.rm_filesystems /mnt/foo /dev/sdg " 354,"def complete_graph(n, create_using=None): _, nodes = n G = empty_graph(nodes, create_using) if len(nodes) > 1: if G.is_directed(): edges = itertools.permutations(nodes, 2) else: edges = itertools.combinations(nodes, 2) G.add_edges_from(edges) return G ","Return the complete graph `K_n` with n nodes. A complete graph on `n` nodes means that all pairs of distinct nodes have an edge connecting them. Parameters ---------- n : int or iterable container of nodes If n is an integer, nodes are from range(n). If n is a container of nodes, those nodes appear in the graph. create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Examples -------- >>> G = nx.complete_graph(9) >>> len(G) 9 >>> G.size() 36 >>> G = nx.complete_graph(range(11, 14)) >>> list(G.nodes()) [11, 12, 13] >>> G = nx.complete_graph(4, nx.DiGraph()) >>> G.is_directed() True ",106,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def complete_graph(n, create_using=None): _, nodes = n G = empty_graph(nodes, create_using) if len(nodes) > 1: if G.is_directed(): edges = itertools.permutations(nodes, 2) else: edges = itertools.combinations(nodes, 2) G.add_edges_from(edges) return G ``` ###Assistant : Return the complete graph `K_n` with n nodes. A complete graph on `n` nodes means that all pairs of distinct nodes have an edge connecting them. Parameters ---------- n : int or iterable container of nodes If n is an integer, nodes are from range(n). If n is a container of nodes, those nodes appear in the graph. create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Examples -------- >>> G = nx.complete_graph(9) >>> len(G) 9 >>> G.size() 36 >>> G = nx.complete_graph(range(11, 14)) >>> list(G.nodes()) [11, 12, 13] >>> G = nx.complete_graph(4, nx.DiGraph()) >>> G.is_directed() True " 355,"def fit_predict(self, X, y=None, **fit_params): self._validate_params() fit_params_steps = self._check_fit_params(**fit_params) Xt = self._fit(X, y, **fit_params_steps) fit_params_last_step = fit_params_steps[self.steps[-1][0]] with _print_elapsed_time(""Pipeline"", self._log_message(len(self.steps) - 1)): y_pred = self.steps[-1][1].fit_predict(Xt, y, **fit_params_last_step) return y_pred ","Transform the data, and apply `fit_predict` with the final estimator. Call `fit_transform` of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls `fit_predict` method. Only valid if the final estimator implements `fit_predict`. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. **fit_params : dict of string -> object Parameters passed to the ``fit`` method of each step, where each parameter name is prefixed such that parameter ``p`` for step ``s`` has key ``s__p``. Returns ------- y_pred : ndarray Result of calling `fit_predict` on the final estimator. ",118,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fit_predict(self, X, y=None, **fit_params): self._validate_params() fit_params_steps = self._check_fit_params(**fit_params) Xt = self._fit(X, y, **fit_params_steps) fit_params_last_step = fit_params_steps[self.steps[-1][0]] with _print_elapsed_time(""Pipeline"", self._log_message(len(self.steps) - 1)): y_pred = self.steps[-1][1].fit_predict(Xt, y, **fit_params_last_step) return y_pred ``` ###Assistant : Transform the data, and apply `fit_predict` with the final estimator. Call `fit_transform` of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls `fit_predict` method. Only valid if the final estimator implements `fit_predict`. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. **fit_params : dict of string -> object Parameters passed to the ``fit`` method of each step, where each parameter name is prefixed such that parameter ``p`` for step ``s`` has key ``s__p``. Returns ------- y_pred : ndarray Result of calling `fit_predict` on the final estimator. " 356,"def test_issue4849(entity_ruler_factory): nlp = English() patterns = [ {""label"": ""PERSON"", ""pattern"": ""joe biden"", ""id"": ""joe-biden""}, {""label"": ""PERSON"", ""pattern"": ""bernie sanders"", ""id"": ""bernie-sanders""}, ] ruler = nlp.add_pipe( entity_ruler_factory, name=""entity_ruler"", config={""phrase_matcher_attr"": ""LOWER""}, ) ruler.add_patterns(patterns) text = # USING 1 PROCESS count_ents = 0 for doc in nlp.pipe([text], n_process=1): count_ents += len([ent for ent in doc.ents if ent.ent_id > 0]) assert count_ents == 2 # USING 2 PROCESSES if isinstance(get_current_ops, NumpyOps): count_ents = 0 for doc in nlp.pipe([text], n_process=2): count_ents += len([ent for ent in doc.ents if ent.ent_id > 0]) assert count_ents == 2 @pytest.mark.issue(5918) @pytest.mark.parametrize(""entity_ruler_factory"", ENTITY_RULERS)"," The left is starting to take aim at Democratic front-runner Joe Biden. Sen. Bernie Sanders joined in her criticism: ""There is no 'middle ground' when it comes to climate policy."" ",30,94,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_issue4849(entity_ruler_factory): nlp = English() patterns = [ {""label"": ""PERSON"", ""pattern"": ""joe biden"", ""id"": ""joe-biden""}, {""label"": ""PERSON"", ""pattern"": ""bernie sanders"", ""id"": ""bernie-sanders""}, ] ruler = nlp.add_pipe( entity_ruler_factory, name=""entity_ruler"", config={""phrase_matcher_attr"": ""LOWER""}, ) ruler.add_patterns(patterns) text = # USING 1 PROCESS count_ents = 0 for doc in nlp.pipe([text], n_process=1): count_ents += len([ent for ent in doc.ents if ent.ent_id > 0]) assert count_ents == 2 # USING 2 PROCESSES if isinstance(get_current_ops, NumpyOps): count_ents = 0 for doc in nlp.pipe([text], n_process=2): count_ents += len([ent for ent in doc.ents if ent.ent_id > 0]) assert count_ents == 2 @pytest.mark.issue(5918) @pytest.mark.parametrize(""entity_ruler_factory"", ENTITY_RULERS) ``` ###Assistant : The left is starting to take aim at Democratic front-runner Joe Biden. Sen. Bernie Sanders joined in her criticism: ""There is no 'middle ground' when it comes to climate policy."" " 357,"def get_serializer_context(self): context = super().get_serializer_context() if hasattr(self.queryset.model, 'custom_fields'): content_type = ContentType.objects.get_for_model(self.queryset.model) context.update({ 'custom_fields': content_type.custom_fields.all(), }) return context "," For models which support custom fields, populate the `custom_fields` context. ",10,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_serializer_context(self): context = super().get_serializer_context() if hasattr(self.queryset.model, 'custom_fields'): content_type = ContentType.objects.get_for_model(self.queryset.model) context.update({ 'custom_fields': content_type.custom_fields.all(), }) return context ``` ###Assistant : For models which support custom fields, populate the `custom_fields` context. " 358,"def execute (func, args, msg=None, verbose=0, dry_run=0): if msg is None: msg = ""%s%r"" % (func.__name__, args) if msg[-2:] == ',)': # correct for singleton tuple msg = msg[0:-2] + ')' log.info(msg) if not dry_run: func(*args) ","Perform some action that affects the outside world (eg. by writing to the filesystem). Such actions are special because they are disabled by the 'dry_run' flag. This method takes care of all that bureaucracy for you; all you have to do is supply the function to call and an argument tuple for it (to embody the ""external action"" being performed), and an optional message to print. ",66,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def execute (func, args, msg=None, verbose=0, dry_run=0): if msg is None: msg = ""%s%r"" % (func.__name__, args) if msg[-2:] == ',)': # correct for singleton tuple msg = msg[0:-2] + ')' log.info(msg) if not dry_run: func(*args) ``` ###Assistant : Perform some action that affects the outside world (eg. by writing to the filesystem). Such actions are special because they are disabled by the 'dry_run' flag. This method takes care of all that bureaucracy for you; all you have to do is supply the function to call and an argument tuple for it (to embody the ""external action"" being performed), and an optional message to print. " 359,"def call(self, features, cols_to_output_tensors=None, training=None): if training is None: training = backend.learning_phase() if not isinstance(features, dict): raise ValueError( ""We expected a dictionary here. Instead we got: "", features ) transformation_cache = ( tf.__internal__.feature_column.FeatureTransformationCache(features) ) output_tensors = [] for column in self._feature_columns: with backend.name_scope(column.name): try: tensor = column.get_dense_tensor( transformation_cache, self._state_manager, training=training, ) except TypeError: tensor = column.get_dense_tensor( transformation_cache, self._state_manager ) processed_tensors = self._process_dense_tensor(column, tensor) if cols_to_output_tensors is not None: cols_to_output_tensors[column] = processed_tensors output_tensors.append(processed_tensors) return self._verify_and_concat_tensors(output_tensors) ","Returns a dense tensor corresponding to the `feature_columns`. Example usage: >>> t1 = tf.feature_column.embedding_column( ... tf.feature_column.categorical_column_with_hash_bucket(""t1"", 2), ... dimension=8) >>> t2 = tf.feature_column.numeric_column('t2') >>> feature_layer = tf.compat.v1.keras.layers.DenseFeatures([t1, t2]) >>> features = {""t1"": tf.constant([""a"", ""b""]), ""t2"": tf.constant([1, 2])} >>> dense_tensor = feature_layer(features, training=True) Args: features: A mapping from key to tensors. `FeatureColumn`s look up via these keys. For example `numeric_column('price')` will look at 'price' key in this dict. Values can be a `SparseTensor` or a `Tensor` depends on corresponding `FeatureColumn`. cols_to_output_tensors: If not `None`, this will be filled with a dict mapping feature columns to output tensors created. training: Python boolean or None, indicating whether to the layer is being run in training mode. This argument is passed to the call method of any `FeatureColumn` that takes a `training` argument. For example, if a `FeatureColumn` performed dropout, the column could expose a `training` argument to control whether the dropout should be applied. If `None`, defaults to `tf.keras.backend.learning_phase()`. Returns: A `Tensor` which represents input layer of a model. Its shape is (batch_size, first_layer_dimension) and its dtype is `float32`. first_layer_dimension is determined based on given `feature_columns`. Raises: ValueError: If features are not a dictionary. ",191,74,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call(self, features, cols_to_output_tensors=None, training=None): if training is None: training = backend.learning_phase() if not isinstance(features, dict): raise ValueError( ""We expected a dictionary here. Instead we got: "", features ) transformation_cache = ( tf.__internal__.feature_column.FeatureTransformationCache(features) ) output_tensors = [] for column in self._feature_columns: with backend.name_scope(column.name): try: tensor = column.get_dense_tensor( transformation_cache, self._state_manager, training=training, ) except TypeError: tensor = column.get_dense_tensor( transformation_cache, self._state_manager ) processed_tensors = self._process_dense_tensor(column, tensor) if cols_to_output_tensors is not None: cols_to_output_tensors[column] = processed_tensors output_tensors.append(processed_tensors) return self._verify_and_concat_tensors(output_tensors) ``` ###Assistant : Returns a dense tensor corresponding to the `feature_columns`. Example usage: >>> t1 = tf.feature_column.embedding_column( ... tf.feature_column.categorical_column_with_hash_bucket(""t1"", 2), ... dimension=8) >>> t2 = tf.feature_column.numeric_column('t2') >>> feature_layer = tf.compat.v1.keras.layers.DenseFeatures([t1, t2]) >>> features = {""t1"": tf.constant([""a"", ""b""]), ""t2"": tf.constant([1, 2])} >>> dense_tensor = feature_layer(features, training=True) Args: features: A mapping from key to tensors. `FeatureColumn`s look up via these keys. For example `numeric_column('price')` will look at 'price' key in this dict. Values can be a `SparseTensor` or a `Tensor` depends on corresponding `FeatureColumn`. cols_to_output_tensors: If not `None`, this will be filled with a dict mapping feature columns to output tensors created. training: Python boolean or None, indicating whether to the layer is being run in training mode. This argument is passed to the call method of any `FeatureColumn` that takes a `training` argument. For example, if a `FeatureColumn` performed dropout, the column could expose a `training` argument to control whether the dropout should be applied. If `None`, defaults to `tf.keras.backend.learning_phase()`. Returns: A `Tensor` which represents input layer of a model. Its shape is (batch_size, first_layer_dimension) and its dtype is `float32`. first_layer_dimension is determined based on given `feature_columns`. Raises: ValueError: If features are not a dictionary. " 360,"def patch_pickle() -> Iterator[None]: orig_loads = pkl.loads try: setattr(pkl, ""loads"", loads) yield finally: setattr(pkl, ""loads"", orig_loads) "," Temporarily patch pickle to use our unpickler. ",7,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def patch_pickle() -> Iterator[None]: orig_loads = pkl.loads try: setattr(pkl, ""loads"", loads) yield finally: setattr(pkl, ""loads"", orig_loads) ``` ###Assistant : Temporarily patch pickle to use our unpickler. " 361,"def get_filesystem_type(filepath): # We import it locally so that click autocomplete works import psutil root_type = ""unknown"" for part in psutil.disk_partitions(): if part.mountpoint == '/': root_type = part.fstype continue if filepath.startswith(part.mountpoint): return part.fstype return root_type "," Determine the type of filesystem used - we might want to use different parameters if tmpfs is used. :param filepath: path to check :return: type of filesystem ",27,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_filesystem_type(filepath): # We import it locally so that click autocomplete works import psutil root_type = ""unknown"" for part in psutil.disk_partitions(): if part.mountpoint == '/': root_type = part.fstype continue if filepath.startswith(part.mountpoint): return part.fstype return root_type ``` ###Assistant : Determine the type of filesystem used - we might want to use different parameters if tmpfs is used. :param filepath: path to check :return: type of filesystem " 362,"def panther_similarity(G, source, k=5, path_length=5, c=0.5, delta=0.1, eps=None): r import numpy as np num_nodes = G.number_of_nodes() if num_nodes < k: warnings.warn( f""Number of nodes is {num_nodes}, but requested k is {k}. "" ""Setting k to number of nodes."" ) k = num_nodes # According to [1], they empirically determined # a good value for ``eps`` to be sqrt( 1 / |E| ) if eps is None: eps = np.sqrt(1.0 / G.number_of_edges()) inv_node_map = {name: index for index, name in enumerate(G.nodes)} node_map = np.array(G) # Calculate the sample size ``R`` for how many paths # to randomly generate t_choose_2 = math.comb(path_length, 2) sample_size = int((c / eps**2) * (np.log2(t_choose_2) + 1 + np.log(1 / delta))) index_map = {} _ = list( generate_random_paths( G, sample_size, path_length=path_length, index_map=index_map ) ) S = np.zeros(num_nodes) inv_sample_size = 1 / sample_size source_paths = set(index_map[source]) # Calculate the path similarities # between ``source`` (v) and ``node`` (v_j) # using our inverted index mapping of # vertices to paths for node, paths in index_map.items(): # Only consider paths where both # ``node`` and ``source`` are present common_paths = source_paths.intersection(paths) S[inv_node_map[node]] = len(common_paths) * inv_sample_size # Retrieve top ``k`` similar # Note: the below performed anywhere from 4-10x faster # (depending on input sizes) vs the equivalent ``np.argsort(S)[::-1]`` top_k_unsorted = np.argpartition(S, -k)[-k:] top_k_sorted = top_k_unsorted[np.argsort(S[top_k_unsorted])][::-1] # Add back the similarity scores top_k_sorted_names = map(lambda n: node_map[n], top_k_sorted) top_k_with_val = dict(zip(top_k_sorted_names, S[top_k_sorted])) # Remove the self-similarity top_k_with_val.pop(source, None) return top_k_with_val ","Returns the Panther similarity of nodes in the graph `G` to node ``v``. Panther is a similarity metric that says ""two objects are considered to be similar if they frequently appear on the same paths."" [1]_. Parameters ---------- G : NetworkX graph A NetworkX graph source : node Source node for which to find the top `k` similar other nodes k : int (default = 5) The number of most similar nodes to return path_length : int (default = 5) How long the randomly generated paths should be (``T`` in [1]_) c : float (default = 0.5) A universal positive constant used to scale the number of sample random paths to generate. delta : float (default = 0.1) The probability that the similarity $S$ is not an epsilon-approximation to (R, phi), where $R$ is the number of random paths and $\phi$ is the probability that an element sampled from a set $A \subseteq D$, where $D$ is the domain. eps : float or None (default = None) The error bound. Per [1]_, a good value is ``sqrt(1/|E|)``. Therefore, if no value is provided, the recommended computed value will be used. Returns ------- similarity : dictionary Dictionary of nodes to similarity scores (as floats). Note: the self-similarity (i.e., ``v``) will not be included in the returned dictionary. Examples -------- >>> G = nx.star_graph(10) >>> sim = nx.panther_similarity(G, 0) References ---------- .. [1] Zhang, J., Tang, J., Ma, C., Tong, H., Jing, Y., & Li, J. Panther: Fast top-k similarity search on large networks. In Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454). Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267. ",275,240,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def panther_similarity(G, source, k=5, path_length=5, c=0.5, delta=0.1, eps=None): r import numpy as np num_nodes = G.number_of_nodes() if num_nodes < k: warnings.warn( f""Number of nodes is {num_nodes}, but requested k is {k}. "" ""Setting k to number of nodes."" ) k = num_nodes # According to [1], they empirically determined # a good value for ``eps`` to be sqrt( 1 / |E| ) if eps is None: eps = np.sqrt(1.0 / G.number_of_edges()) inv_node_map = {name: index for index, name in enumerate(G.nodes)} node_map = np.array(G) # Calculate the sample size ``R`` for how many paths # to randomly generate t_choose_2 = math.comb(path_length, 2) sample_size = int((c / eps**2) * (np.log2(t_choose_2) + 1 + np.log(1 / delta))) index_map = {} _ = list( generate_random_paths( G, sample_size, path_length=path_length, index_map=index_map ) ) S = np.zeros(num_nodes) inv_sample_size = 1 / sample_size source_paths = set(index_map[source]) # Calculate the path similarities # between ``source`` (v) and ``node`` (v_j) # using our inverted index mapping of # vertices to paths for node, paths in index_map.items(): # Only consider paths where both # ``node`` and ``source`` are present common_paths = source_paths.intersection(paths) S[inv_node_map[node]] = len(common_paths) * inv_sample_size # Retrieve top ``k`` similar # Note: the below performed anywhere from 4-10x faster # (depending on input sizes) vs the equivalent ``np.argsort(S)[::-1]`` top_k_unsorted = np.argpartition(S, -k)[-k:] top_k_sorted = top_k_unsorted[np.argsort(S[top_k_unsorted])][::-1] # Add back the similarity scores top_k_sorted_names = map(lambda n: node_map[n], top_k_sorted) top_k_with_val = dict(zip(top_k_sorted_names, S[top_k_sorted])) # Remove the self-similarity top_k_with_val.pop(source, None) return top_k_with_val ``` ###Assistant : Returns the Panther similarity of nodes in the graph `G` to node ``v``. Panther is a similarity metric that says ""two objects are considered to be similar if they frequently appear on the same paths."" [1]_. Parameters ---------- G : NetworkX graph A NetworkX graph source : node Source node for which to find the top `k` similar other nodes k : int (default = 5) The number of most similar nodes to return path_length : int (default = 5) How long the randomly generated paths should be (``T`` in [1]_) c : float (default = 0.5) A universal positive constant used to scale the number of sample random paths to generate. delta : float (default = 0.1) The probability that the similarity $S$ is not an epsilon-approximation to (R, phi), where $R$ is the number of random paths and $\phi$ is the probability that an element sampled from a set $A \subseteq D$, where $D$ is the domain. eps : float or None (default = None) The error bound. Per [1]_, a good value is ``sqrt(1/|E|)``. Therefore, if no value is provided, the recommended computed value will be used. Returns ------- similarity : dictionary Dictionary of nodes to similarity scores (as floats). Note: the self-similarity (i.e., ``v``) will not be included in the returned dictionary. Examples -------- >>> G = nx.star_graph(10) >>> sim = nx.panther_similarity(G, 0) References ---------- .. [1] Zhang, J., Tang, J., Ma, C., Tong, H., Jing, Y., & Li, J. Panther: Fast top-k similarity search on large networks. In Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454). Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267. " 363,"def _phonemize_preprocess(self, text) -> Tuple[List[str], List]: text = text.strip() if self._keep_puncs: # a tuple (text, punctuation marks) return self._punctuator.strip_to_restore(text) return [self._punctuator.strip(text)], [] ","Preprocess the text before phonemization 1. remove spaces 2. remove punctuation Override this if you need a different behaviour ",19,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _phonemize_preprocess(self, text) -> Tuple[List[str], List]: text = text.strip() if self._keep_puncs: # a tuple (text, punctuation marks) return self._punctuator.strip_to_restore(text) return [self._punctuator.strip(text)], [] ``` ###Assistant : Preprocess the text before phonemization 1. remove spaces 2. remove punctuation Override this if you need a different behaviour " 364,"def ensure_pathy(path): from pathy import Pathy # noqa: F811 return Pathy.fluid(path) ","Temporary helper to prevent importing Pathy globally (which can cause slow and annoying Google Cloud warning).",16,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ensure_pathy(path): from pathy import Pathy # noqa: F811 return Pathy.fluid(path) ``` ###Assistant : Temporary helper to prevent importing Pathy globally (which can cause slow and annoying Google Cloud warning). " 365,"def index(self, request, extra_context=None): app_list = self.get_app_list(request) context = { **self.each_context(request), ""title"": self.index_title, ""subtitle"": None, ""app_list"": app_list, **(extra_context or {}), } request.current_app = self.name return TemplateResponse( request, self.index_template or ""admin/index.html"", context ) "," Display the main admin index page, which lists all of the installed apps that have been registered in this site. ",20,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def index(self, request, extra_context=None): app_list = self.get_app_list(request) context = { **self.each_context(request), ""title"": self.index_title, ""subtitle"": None, ""app_list"": app_list, **(extra_context or {}), } request.current_app = self.name return TemplateResponse( request, self.index_template or ""admin/index.html"", context ) ``` ###Assistant : Display the main admin index page, which lists all of the installed apps that have been registered in this site. " 366,"def generate_config_from_template(config_dir, config_path, environ, ownership): for v in (""SYNAPSE_SERVER_NAME"", ""SYNAPSE_REPORT_STATS""): if v not in environ: error( ""Environment variable '%s' is mandatory when generating a config file."" % (v,) ) # populate some params from data files (if they exist, else create new ones) environ = environ.copy() secrets = { ""registration"": ""SYNAPSE_REGISTRATION_SHARED_SECRET"", ""macaroon"": ""SYNAPSE_MACAROON_SECRET_KEY"", } for name, secret in secrets.items(): if secret not in environ: filename = ""/data/%s.%s.key"" % (environ[""SYNAPSE_SERVER_NAME""], name) # if the file already exists, load in the existing value; otherwise, # generate a new secret and write it to a file if os.path.exists(filename): log(""Reading %s from %s"" % (secret, filename)) with open(filename) as handle: value = handle.read() else: log(""Generating a random secret for {}"".format(secret)) value = codecs.encode(os.urandom(32), ""hex"").decode() with open(filename, ""w"") as handle: handle.write(value) environ[secret] = value environ[""SYNAPSE_APPSERVICES""] = glob.glob(""/data/appservices/*.yaml"") if not os.path.exists(config_dir): os.mkdir(config_dir) # Convert SYNAPSE_NO_TLS to boolean if exists if ""SYNAPSE_NO_TLS"" in environ: tlsanswerstring = str.lower(environ[""SYNAPSE_NO_TLS""]) if tlsanswerstring in (""true"", ""on"", ""1"", ""yes""): environ[""SYNAPSE_NO_TLS""] = True else: if tlsanswerstring in (""false"", ""off"", ""0"", ""no""): environ[""SYNAPSE_NO_TLS""] = False else: error( 'Environment variable ""SYNAPSE_NO_TLS"" found but value ""' + tlsanswerstring + '"" unrecognized; exiting.' ) if ""SYNAPSE_LOG_CONFIG"" not in environ: environ[""SYNAPSE_LOG_CONFIG""] = config_dir + ""/log.config"" log(""Generating synapse config file "" + config_path) convert(""/conf/homeserver.yaml"", config_path, environ) log_config_file = environ[""SYNAPSE_LOG_CONFIG""] log(""Generating log config file "" + log_config_file) convert(""/conf/log.config"", log_config_file, environ) # Hopefully we already have a signing key, but generate one if not. args = [ sys.executable, ""-m"", ""synapse.app.homeserver"", ""--config-path"", config_path, # tell synapse to put generated keys in /data rather than /compiled ""--keys-directory"", config_dir, ""--generate-keys"", ] if ownership is not None: log(f""Setting ownership on /data to {ownership}"") subprocess.check_output([""chown"", ""-R"", ownership, ""/data""]) args = [""gosu"", ownership] + args subprocess.check_output(args) ","Generate a homeserver.yaml from environment variables Args: config_dir (str): where to put generated config files config_path (str): where to put the main config file environ (dict): environment dictionary ownership (str|None): "":"" string which will be used to set ownership of the generated configs. If None, ownership will not change. ",49,279,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def generate_config_from_template(config_dir, config_path, environ, ownership): for v in (""SYNAPSE_SERVER_NAME"", ""SYNAPSE_REPORT_STATS""): if v not in environ: error( ""Environment variable '%s' is mandatory when generating a config file."" % (v,) ) # populate some params from data files (if they exist, else create new ones) environ = environ.copy() secrets = { ""registration"": ""SYNAPSE_REGISTRATION_SHARED_SECRET"", ""macaroon"": ""SYNAPSE_MACAROON_SECRET_KEY"", } for name, secret in secrets.items(): if secret not in environ: filename = ""/data/%s.%s.key"" % (environ[""SYNAPSE_SERVER_NAME""], name) # if the file already exists, load in the existing value; otherwise, # generate a new secret and write it to a file if os.path.exists(filename): log(""Reading %s from %s"" % (secret, filename)) with open(filename) as handle: value = handle.read() else: log(""Generating a random secret for {}"".format(secret)) value = codecs.encode(os.urandom(32), ""hex"").decode() with open(filename, ""w"") as handle: handle.write(value) environ[secret] = value environ[""SYNAPSE_APPSERVICES""] = glob.glob(""/data/appservices/*.yaml"") if not os.path.exists(config_dir): os.mkdir(config_dir) # Convert SYNAPSE_NO_TLS to boolean if exists if ""SYNAPSE_NO_TLS"" in environ: tlsanswerstring = str.lower(environ[""SYNAPSE_NO_TLS""]) if tlsanswerstring in (""true"", ""on"", ""1"", ""yes""): environ[""SYNAPSE_NO_TLS""] = True else: if tlsanswerstring in (""false"", ""off"", ""0"", ""no""): environ[""SYNAPSE_NO_TLS""] = False else: error( 'Environment variable ""SYNAPSE_NO_TLS"" found but value ""' + tlsanswerstring + '"" unrecognized; exiting.' ) if ""SYNAPSE_LOG_CONFIG"" not in environ: environ[""SYNAPSE_LOG_CONFIG""] = config_dir + ""/log.config"" log(""Generating synapse config file "" + config_path) convert(""/conf/homeserver.yaml"", config_path, environ) log_config_file = environ[""SYNAPSE_LOG_CONFIG""] log(""Generating log config file "" + log_config_file) convert(""/conf/log.config"", log_config_file, environ) # Hopefully we already have a signing key, but generate one if not. args = [ sys.executable, ""-m"", ""synapse.app.homeserver"", ""--config-path"", config_path, # tell synapse to put generated keys in /data rather than /compiled ""--keys-directory"", config_dir, ""--generate-keys"", ] if ownership is not None: log(f""Setting ownership on /data to {ownership}"") subprocess.check_output([""chown"", ""-R"", ownership, ""/data""]) args = [""gosu"", ownership] + args subprocess.check_output(args) ``` ###Assistant : Generate a homeserver.yaml from environment variables Args: config_dir (str): where to put generated config files config_path (str): where to put the main config file environ (dict): environment dictionary ownership (str|None): "":"" string which will be used to set ownership of the generated configs. If None, ownership will not change. " 367,"def donation_vector(donate_argnums, args, kwargs) -> Tuple[bool, ...]: res: List[bool] = [] for i, arg in enumerate(args): donate = bool(i in donate_argnums) res.extend((donate,) * tree_structure(arg).num_leaves) res.extend((False,) * tree_structure(kwargs).num_leaves) return tuple(res) ",Returns a tuple with a boolean value for each leaf in args.,12,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def donation_vector(donate_argnums, args, kwargs) -> Tuple[bool, ...]: res: List[bool] = [] for i, arg in enumerate(args): donate = bool(i in donate_argnums) res.extend((donate,) * tree_structure(arg).num_leaves) res.extend((False,) * tree_structure(kwargs).num_leaves) return tuple(res) ``` ###Assistant : Returns a tuple with a boolean value for each leaf in args. " 368,"def set_exception(self, exception): if self._state != _PENDING: raise exceptions.InvalidStateError(f'{self._state}: {self!r}') if isinstance(exception, type): exception = exception() if type(exception) is StopIteration: raise TypeError(""StopIteration interacts badly with generators "" ""and cannot be raised into a Future"") self._exception = exception self._state = _FINISHED self.__schedule_callbacks() self.__log_traceback = True ","Mark the future done and set an exception. If the future is already done when this method is called, raises InvalidStateError. ",21,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_exception(self, exception): if self._state != _PENDING: raise exceptions.InvalidStateError(f'{self._state}: {self!r}') if isinstance(exception, type): exception = exception() if type(exception) is StopIteration: raise TypeError(""StopIteration interacts badly with generators "" ""and cannot be raised into a Future"") self._exception = exception self._state = _FINISHED self.__schedule_callbacks() self.__log_traceback = True ``` ###Assistant : Mark the future done and set an exception. If the future is already done when this method is called, raises InvalidStateError. " 369,"def statistics(self, refresh=False, approximate=False): # Prepare array with arguments for capi function smin, smax, smean, sstd = c_double(), c_double(), c_double(), c_double() stats_args = [ self._ptr, c_int(approximate), byref(smin), byref(smax), byref(smean), byref(sstd), c_void_p(), c_void_p(), ] if refresh or self._stats_refresh: func = capi.compute_band_statistics else: # Add additional argument to force computation if there is no # existing PAM file to take the values from. force = True stats_args.insert(2, c_int(force)) func = capi.get_band_statistics # Computation of statistics fails for empty bands. try: func(*stats_args) result = smin.value, smax.value, smean.value, sstd.value except GDALException: result = (None, None, None, None) self._stats_refresh = False return result "," Compute statistics on the pixel values of this band. The return value is a tuple with the following structure: (minimum, maximum, mean, standard deviation). If approximate=True, the statistics may be computed based on overviews or a subset of image tiles. If refresh=True, the statistics will be computed from the data directly, and the cache will be updated where applicable. For empty bands (where all pixel values are nodata), all statistics values are returned as None. For raster formats using Persistent Auxiliary Metadata (PAM) services, the statistics might be cached in an auxiliary file. ",93,98,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def statistics(self, refresh=False, approximate=False): # Prepare array with arguments for capi function smin, smax, smean, sstd = c_double(), c_double(), c_double(), c_double() stats_args = [ self._ptr, c_int(approximate), byref(smin), byref(smax), byref(smean), byref(sstd), c_void_p(), c_void_p(), ] if refresh or self._stats_refresh: func = capi.compute_band_statistics else: # Add additional argument to force computation if there is no # existing PAM file to take the values from. force = True stats_args.insert(2, c_int(force)) func = capi.get_band_statistics # Computation of statistics fails for empty bands. try: func(*stats_args) result = smin.value, smax.value, smean.value, sstd.value except GDALException: result = (None, None, None, None) self._stats_refresh = False return result ``` ###Assistant : Compute statistics on the pixel values of this band. The return value is a tuple with the following structure: (minimum, maximum, mean, standard deviation). If approximate=True, the statistics may be computed based on overviews or a subset of image tiles. If refresh=True, the statistics will be computed from the data directly, and the cache will be updated where applicable. For empty bands (where all pixel values are nodata), all statistics values are returned as None. For raster formats using Persistent Auxiliary Metadata (PAM) services, the statistics might be cached in an auxiliary file. " 370,"def test_smaller_request_deduplicated(self) -> None: req1 = ensureDeferred( self.state_datastore._get_state_for_group_using_inflight_cache( 42, StateFilter.from_types(((""test.type"", None),)) ) ) self.pump(by=0.1) # This should have gone to the database self.assertEqual(len(self.get_state_group_calls), 1) self.assertFalse(req1.called) req2 = ensureDeferred( self.state_datastore._get_state_for_group_using_inflight_cache( 42, StateFilter.from_types(((""test.type"", ""b""),)) ) ) self.pump(by=0.1) # No more calls should have gone to the database, because the second # request was already in the in-flight cache! self.assertEqual(len(self.get_state_group_calls), 1) self.assertFalse(req1.called) self.assertFalse(req2.called) groups, sf, d = self.get_state_group_calls[0] self.assertEqual(groups, (42,)) # The state filter is expanded internally for increased cache hit rate, # so we the database sees a wider state filter than requested. self.assertEqual(sf, ALL_NON_MEMBERS_STATE_FILTER) # Now we can complete the request self._complete_request_fake(groups, sf, d) self.assertEqual( self.get_success(req1), {(""test.type"", ""a""): ""AAA"", (""test.type"", ""b""): ""BBB""}, ) self.assertEqual(self.get_success(req2), {(""test.type"", ""b""): ""BBB""}) "," Tests that duplicate requests for state are deduplicated. This test: - requests some state (state group 42, 'all' state filter) - requests a subset of that state, before the first request finishes - checks to see that only one database query was made - completes the database query - checks that both requests see the correct retrieved state ",58,116,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_smaller_request_deduplicated(self) -> None: req1 = ensureDeferred( self.state_datastore._get_state_for_group_using_inflight_cache( 42, StateFilter.from_types(((""test.type"", None),)) ) ) self.pump(by=0.1) # This should have gone to the database self.assertEqual(len(self.get_state_group_calls), 1) self.assertFalse(req1.called) req2 = ensureDeferred( self.state_datastore._get_state_for_group_using_inflight_cache( 42, StateFilter.from_types(((""test.type"", ""b""),)) ) ) self.pump(by=0.1) # No more calls should have gone to the database, because the second # request was already in the in-flight cache! self.assertEqual(len(self.get_state_group_calls), 1) self.assertFalse(req1.called) self.assertFalse(req2.called) groups, sf, d = self.get_state_group_calls[0] self.assertEqual(groups, (42,)) # The state filter is expanded internally for increased cache hit rate, # so we the database sees a wider state filter than requested. self.assertEqual(sf, ALL_NON_MEMBERS_STATE_FILTER) # Now we can complete the request self._complete_request_fake(groups, sf, d) self.assertEqual( self.get_success(req1), {(""test.type"", ""a""): ""AAA"", (""test.type"", ""b""): ""BBB""}, ) self.assertEqual(self.get_success(req2), {(""test.type"", ""b""): ""BBB""}) ``` ###Assistant : Tests that duplicate requests for state are deduplicated. This test: - requests some state (state group 42, 'all' state filter) - requests a subset of that state, before the first request finishes - checks to see that only one database query was made - completes the database query - checks that both requests see the correct retrieved state " 371,"def get_observation(self, agent): speed = 0 distance = self._config[""scenario_config""][""misc""][""max_distance""] if agent in self.simulation.veh_subscriptions: speed = round( self.simulation.veh_subscriptions[agent][tc.VAR_SPEED] * MS_TO_KMH ) leader = self.simulation.veh_subscriptions[agent][tc.VAR_LEADER] if leader: # compatible with traci veh, dist = leader if veh: # compatible with libsumo distance = round(dist) ret = [speed, distance] logger.debug(""Agent %s --> Obs: %s"", agent, pformat(ret)) return ret "," Returns the observation of a given agent. See http://sumo.sourceforge.net/pydoc/traci._simulation.html ",9,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_observation(self, agent): speed = 0 distance = self._config[""scenario_config""][""misc""][""max_distance""] if agent in self.simulation.veh_subscriptions: speed = round( self.simulation.veh_subscriptions[agent][tc.VAR_SPEED] * MS_TO_KMH ) leader = self.simulation.veh_subscriptions[agent][tc.VAR_LEADER] if leader: # compatible with traci veh, dist = leader if veh: # compatible with libsumo distance = round(dist) ret = [speed, distance] logger.debug(""Agent %s --> Obs: %s"", agent, pformat(ret)) return ret ``` ###Assistant : Returns the observation of a given agent. See http://sumo.sourceforge.net/pydoc/traci._simulation.html " 372,"def register(self, name, color_list): if name in self._BUILTIN_COLOR_SEQUENCES: raise ValueError(f""{name!r} is a reserved name for a builtin "" ""color sequence"") color_list = list(color_list) # force copy and coerce type to list for color in color_list: try: to_rgba(color) except ValueError: raise ValueError( f""{color!r} is not a valid color specification"") self._color_sequences[name] = color_list "," Register a new color sequence. The color sequence registry stores a copy of the given *color_list*, so that future changes to the original list do not affect the registered color sequence. Think of this as the registry taking a snapshot of *color_list* at registration. Parameters ---------- name : str The name for the color sequence. color_list : list of colors An iterable returning valid Matplotlib colors when iterating over. Note however that the returned color sequence will always be a list regardless of the input type. ",86,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def register(self, name, color_list): if name in self._BUILTIN_COLOR_SEQUENCES: raise ValueError(f""{name!r} is a reserved name for a builtin "" ""color sequence"") color_list = list(color_list) # force copy and coerce type to list for color in color_list: try: to_rgba(color) except ValueError: raise ValueError( f""{color!r} is not a valid color specification"") self._color_sequences[name] = color_list ``` ###Assistant : Register a new color sequence. The color sequence registry stores a copy of the given *color_list*, so that future changes to the original list do not affect the registered color sequence. Think of this as the registry taking a snapshot of *color_list* at registration. Parameters ---------- name : str The name for the color sequence. color_list : list of colors An iterable returning valid Matplotlib colors when iterating over. Note however that the returned color sequence will always be a list regardless of the input type. " 373,"def test_get_states_no_attributes(hass_recorder): hass = hass_recorder() now, future, states = _setup_get_states(hass) for state in states: state.attributes = {} # Get states returns everything before POINT for all entities for state1, state2 in zip( states, sorted( history.get_states(hass, future, no_attributes=True), key=lambda state: state.entity_id, ), ): assert state1 == state2 # Get states returns everything before POINT for tested entities entities = [f""test.point_in_time_{i % 5}"" for i in range(5)] for state1, state2 in zip( states, sorted( history.get_states(hass, future, entities, no_attributes=True), key=lambda state: state.entity_id, ), ): assert state1 == state2 # Test get_state here because we have a DB setup assert states[0] == history.get_state( hass, future, states[0].entity_id, no_attributes=True ) time_before_recorder_ran = now - timedelta(days=1000) assert history.get_states(hass, time_before_recorder_ran, no_attributes=True) == [] assert ( history.get_state(hass, time_before_recorder_ran, ""demo.id"", no_attributes=True) is None ) @pytest.mark.parametrize( ""attributes, no_attributes, limit"", [ ({""attr"": True}, False, 5000), ({}, True, 5000), ({""attr"": True}, False, 3), ({}, True, 3), ], )",Test getting states without attributes at a specific point in time.,11,145,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_states_no_attributes(hass_recorder): hass = hass_recorder() now, future, states = _setup_get_states(hass) for state in states: state.attributes = {} # Get states returns everything before POINT for all entities for state1, state2 in zip( states, sorted( history.get_states(hass, future, no_attributes=True), key=lambda state: state.entity_id, ), ): assert state1 == state2 # Get states returns everything before POINT for tested entities entities = [f""test.point_in_time_{i % 5}"" for i in range(5)] for state1, state2 in zip( states, sorted( history.get_states(hass, future, entities, no_attributes=True), key=lambda state: state.entity_id, ), ): assert state1 == state2 # Test get_state here because we have a DB setup assert states[0] == history.get_state( hass, future, states[0].entity_id, no_attributes=True ) time_before_recorder_ran = now - timedelta(days=1000) assert history.get_states(hass, time_before_recorder_ran, no_attributes=True) == [] assert ( history.get_state(hass, time_before_recorder_ran, ""demo.id"", no_attributes=True) is None ) @pytest.mark.parametrize( ""attributes, no_attributes, limit"", [ ({""attr"": True}, False, 5000), ({}, True, 5000), ({""attr"": True}, False, 3), ({}, True, 3), ], ) ``` ###Assistant : Test getting states without attributes at a specific point in time. " 374,"def set_until(self, frame, lineno=None): # the name ""until"" is borrowed from gdb if lineno is None: lineno = frame.f_lineno + 1 self._set_stopinfo(frame, frame, lineno) ","Stop when the line with the lineno greater than the current one is reached or when returning from current frame.",20,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_until(self, frame, lineno=None): # the name ""until"" is borrowed from gdb if lineno is None: lineno = frame.f_lineno + 1 self._set_stopinfo(frame, frame, lineno) ``` ###Assistant : Stop when the line with the lineno greater than the current one is reached or when returning from current frame. " 375,"async def get_device_state(self, hass): websession = async_get_clientsession(hass, self._verify_ssl) rendered_headers = template.render_complex(self._headers, parse_result=False) rendered_params = template.render_complex(self._params) ",Get the latest data from REST API and update the state.,11,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def get_device_state(self, hass): websession = async_get_clientsession(hass, self._verify_ssl) rendered_headers = template.render_complex(self._headers, parse_result=False) rendered_params = template.render_complex(self._params) ``` ###Assistant : Get the latest data from REST API and update the state. " 376,"def reset_channel(self) -> None: if self.channel: self.channel.close() self.channel = grpc.insecure_channel(self.real_addr, options=GRPC_OPTIONS) grpc.channel_ready_future(self.channel) self.task_servicer.set_channel(self.channel) self.data_servicer.set_channel(self.channel) self.logs_servicer.set_channel(self.channel) "," Manually close and reopen the channel to the real ray server. This simulates a disconnection between the client and the server. ",21,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def reset_channel(self) -> None: if self.channel: self.channel.close() self.channel = grpc.insecure_channel(self.real_addr, options=GRPC_OPTIONS) grpc.channel_ready_future(self.channel) self.task_servicer.set_channel(self.channel) self.data_servicer.set_channel(self.channel) self.logs_servicer.set_channel(self.channel) ``` ###Assistant : Manually close and reopen the channel to the real ray server. This simulates a disconnection between the client and the server. " 377,"def wait_scroll_pos_changed(self, x=None, y=None): __tracebackhide__ = (lambda e: e.errisinstance(testprocess.WaitForTimeout)) if (x is None and y is not None) or (y is None and x is not None): raise ValueError(""Either both x/y or neither must be given!"") if x is None and y is None: point = 'Py*.QtCore.QPoint(*, *)' # not counting 0/0 here elif x == '0' and y == '0': point = 'Py*.QtCore.QPoint()' else: point = 'Py*.QtCore.QPoint({}, {})'.format(x, y) self.wait_for(category='webview', message='Scroll position changed to ' + point) ","Wait until a ""Scroll position changed"" message was found. With QtWebEngine, on older Qt versions which lack QWebEnginePage.scrollPositionChanged, this also skips the test. ",23,78,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def wait_scroll_pos_changed(self, x=None, y=None): __tracebackhide__ = (lambda e: e.errisinstance(testprocess.WaitForTimeout)) if (x is None and y is not None) or (y is None and x is not None): raise ValueError(""Either both x/y or neither must be given!"") if x is None and y is None: point = 'Py*.QtCore.QPoint(*, *)' # not counting 0/0 here elif x == '0' and y == '0': point = 'Py*.QtCore.QPoint()' else: point = 'Py*.QtCore.QPoint({}, {})'.format(x, y) self.wait_for(category='webview', message='Scroll position changed to ' + point) ``` ###Assistant : Wait until a ""Scroll position changed"" message was found. With QtWebEngine, on older Qt versions which lack QWebEnginePage.scrollPositionChanged, this also skips the test. " 378,"def test_write_tfrecords(ray_start_regular_shared, tmp_path): import tensorflow as tf # The dataset we will write to a .tfrecords file. ds = ray.data.from_items( [ # Row one. { ""int_item"": 1, ""int_list"": [2, 2, 3], ""float_item"": 1.0, ""float_list"": [2.0, 3.0, 4.0], ""bytes_item"": b""abc"", ""bytes_list"": [b""abc"", b""1234""], }, # Row two. { ""int_item"": 2, ""int_list"": [3, 3, 4], ""float_item"": 2.0, ""float_list"": [2.0, 2.0, 3.0], ""bytes_item"": b""def"", ""bytes_list"": [b""def"", b""1234""], }, ] ) # The corresponding tf.train.Example that we would expect to read # from this dataset. expected_records = [ # Record one (corresponding to row one). tf.train.Example( features=tf.train.Features( feature={ ""int_item"": tf.train.Feature( int64_list=tf.train.Int64List(value=[1]) ), ""int_list"": tf.train.Feature( int64_list=tf.train.Int64List(value=[2, 2, 3]) ), ""float_item"": tf.train.Feature( float_list=tf.train.FloatList(value=[1.0]) ), ""float_list"": tf.train.Feature( float_list=tf.train.FloatList(value=[2.0, 3.0, 4.0]) ), ""bytes_item"": tf.train.Feature( bytes_list=tf.train.BytesList(value=[b""abc""]) ), ""bytes_list"": tf.train.Feature( bytes_list=tf.train.BytesList(value=[b""abc"", b""1234""]) ), } ) ), # Record two (corresponding to row two). tf.train.Example( features=tf.train.Features( feature={ ""int_item"": tf.train.Feature( int64_list=tf.train.Int64List(value=[2]) ), ""int_list"": tf.train.Feature( int64_list=tf.train.Int64List(value=[3, 3, 4]) ), ""float_item"": tf.train.Feature( float_list=tf.train.FloatList(value=[2.0]) ), ""float_list"": tf.train.Feature( float_list=tf.train.FloatList(value=[2.0, 2.0, 3.0]) ), ""bytes_item"": tf.train.Feature( bytes_list=tf.train.BytesList(value=[b""def""]) ), ""bytes_list"": tf.train.Feature( bytes_list=tf.train.BytesList(value=[b""def"", b""1234""]) ), } ) ), ] # Perform the test. # Write the dataset to a .tfrecords file. ds.write_tfrecords(tmp_path) # Read the Examples back out from the .tfrecords file. # This follows the offical TFRecords tutorial: # https://www.tensorflow.org/tutorials/load_data/tfrecord#reading_a_tfrecord_file_2 filenames = sorted(os.listdir(tmp_path)) filepaths = [os.path.join(tmp_path, filename) for filename in filenames] raw_dataset = tf.data.TFRecordDataset(filepaths) tfrecords = [] for raw_record in raw_dataset: example = tf.train.Example() example.ParseFromString(raw_record.numpy()) tfrecords.append(example) assert tfrecords == expected_records ","Test that write_tfrecords writes TFRecords correctly. Test this by writing a Dataset to a TFRecord (function under test), reading it back out into a tf.train.Example, and checking that the result is analogous to the original Dataset. ",36,231,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_write_tfrecords(ray_start_regular_shared, tmp_path): import tensorflow as tf # The dataset we will write to a .tfrecords file. ds = ray.data.from_items( [ # Row one. { ""int_item"": 1, ""int_list"": [2, 2, 3], ""float_item"": 1.0, ""float_list"": [2.0, 3.0, 4.0], ""bytes_item"": b""abc"", ""bytes_list"": [b""abc"", b""1234""], }, # Row two. { ""int_item"": 2, ""int_list"": [3, 3, 4], ""float_item"": 2.0, ""float_list"": [2.0, 2.0, 3.0], ""bytes_item"": b""def"", ""bytes_list"": [b""def"", b""1234""], }, ] ) # The corresponding tf.train.Example that we would expect to read # from this dataset. expected_records = [ # Record one (corresponding to row one). tf.train.Example( features=tf.train.Features( feature={ ""int_item"": tf.train.Feature( int64_list=tf.train.Int64List(value=[1]) ), ""int_list"": tf.train.Feature( int64_list=tf.train.Int64List(value=[2, 2, 3]) ), ""float_item"": tf.train.Feature( float_list=tf.train.FloatList(value=[1.0]) ), ""float_list"": tf.train.Feature( float_list=tf.train.FloatList(value=[2.0, 3.0, 4.0]) ), ""bytes_item"": tf.train.Feature( bytes_list=tf.train.BytesList(value=[b""abc""]) ), ""bytes_list"": tf.train.Feature( bytes_list=tf.train.BytesList(value=[b""abc"", b""1234""]) ), } ) ), # Record two (corresponding to row two). tf.train.Example( features=tf.train.Features( feature={ ""int_item"": tf.train.Feature( int64_list=tf.train.Int64List(value=[2]) ), ""int_list"": tf.train.Feature( int64_list=tf.train.Int64List(value=[3, 3, 4]) ), ""float_item"": tf.train.Feature( float_list=tf.train.FloatList(value=[2.0]) ), ""float_list"": tf.train.Feature( float_list=tf.train.FloatList(value=[2.0, 2.0, 3.0]) ), ""bytes_item"": tf.train.Feature( bytes_list=tf.train.BytesList(value=[b""def""]) ), ""bytes_list"": tf.train.Feature( bytes_list=tf.train.BytesList(value=[b""def"", b""1234""]) ), } ) ), ] # Perform the test. # Write the dataset to a .tfrecords file. ds.write_tfrecords(tmp_path) # Read the Examples back out from the .tfrecords file. # This follows the offical TFRecords tutorial: # https://www.tensorflow.org/tutorials/load_data/tfrecord#reading_a_tfrecord_file_2 filenames = sorted(os.listdir(tmp_path)) filepaths = [os.path.join(tmp_path, filename) for filename in filenames] raw_dataset = tf.data.TFRecordDataset(filepaths) tfrecords = [] for raw_record in raw_dataset: example = tf.train.Example() example.ParseFromString(raw_record.numpy()) tfrecords.append(example) assert tfrecords == expected_records ``` ###Assistant : Test that write_tfrecords writes TFRecords correctly. Test this by writing a Dataset to a TFRecord (function under test), reading it back out into a tf.train.Example, and checking that the result is analogous to the original Dataset. " 379,"def test_float_conversion_dtype(self): x = np.array([-1, 1]) # Test all combinations of dtypes conversions dtype_combin = np.array( np.meshgrid( OutputPreprocessing.float_dtype_list, OutputPreprocessing.float_dtype_list, ) ).T.reshape(-1, 2) for dtype_in, dtype_out in dtype_combin: x = x.astype(dtype_in) y = gr.processing_utils._convert(x, dtype_out) assert y.dtype == np.dtype(dtype_out) ",Test any convertion from a float dtype to an other.,10,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_float_conversion_dtype(self): x = np.array([-1, 1]) # Test all combinations of dtypes conversions dtype_combin = np.array( np.meshgrid( OutputPreprocessing.float_dtype_list, OutputPreprocessing.float_dtype_list, ) ).T.reshape(-1, 2) for dtype_in, dtype_out in dtype_combin: x = x.astype(dtype_in) y = gr.processing_utils._convert(x, dtype_out) assert y.dtype == np.dtype(dtype_out) ``` ###Assistant : Test any convertion from a float dtype to an other. " 380,"def fix_mime_types(): # Known to be problematic when Visual Studio is installed: # # https://github.com/spotDL/spotify-downloader/issues/1540 mimetypes.add_type(""application/javascript"", "".js"") # Not known to be problematic, but used by spotDL: mimetypes.add_type(""text/css"", "".css"") mimetypes.add_type(""image/svg+xml"", "".svg"") mimetypes.add_type(""text/html"", "".html"") @app.server.websocket(""/api/ws"")","Fix incorrect entries in the `mimetypes` registry. On Windows, the Python standard library's `mimetypes` reads in mappings from file extension to MIME type from the Windows registry. Other applications can and do write incorrect values to this registry, which causes `mimetypes.guess_type` to return incorrect values, which causes spotDL to fail to render on the frontend. This method hard-codes the correct mappings for certain MIME types that are known to be either used by TensorBoard or problematic in general. ",78,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fix_mime_types(): # Known to be problematic when Visual Studio is installed: # # https://github.com/spotDL/spotify-downloader/issues/1540 mimetypes.add_type(""application/javascript"", "".js"") # Not known to be problematic, but used by spotDL: mimetypes.add_type(""text/css"", "".css"") mimetypes.add_type(""image/svg+xml"", "".svg"") mimetypes.add_type(""text/html"", "".html"") @app.server.websocket(""/api/ws"") ``` ###Assistant : Fix incorrect entries in the `mimetypes` registry. On Windows, the Python standard library's `mimetypes` reads in mappings from file extension to MIME type from the Windows registry. Other applications can and do write incorrect values to this registry, which causes `mimetypes.guess_type` to return incorrect values, which causes spotDL to fail to render on the frontend. This method hard-codes the correct mappings for certain MIME types that are known to be either used by TensorBoard or problematic in general. " 381,"def _get_threads(self) -> MultiThread: # TODO Check if multiple threads actually speeds anything up save_queue = queue_manager.get_queue(""convert_out"") patch_queue = queue_manager.get_queue(""patch"") return MultiThread(self._converter.process, patch_queue, save_queue, thread_count=self._pool_processes, name=""patch"") "," Get the threads for patching the converted faces onto the frames. Returns :class:`lib.multithreading.MultiThread` The threads that perform the patching of swapped faces onto the output frames ",26,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_threads(self) -> MultiThread: # TODO Check if multiple threads actually speeds anything up save_queue = queue_manager.get_queue(""convert_out"") patch_queue = queue_manager.get_queue(""patch"") return MultiThread(self._converter.process, patch_queue, save_queue, thread_count=self._pool_processes, name=""patch"") ``` ###Assistant : Get the threads for patching the converted faces onto the frames. Returns :class:`lib.multithreading.MultiThread` The threads that perform the patching of swapped faces onto the output frames " 382,"def verify_metadata(self): # send the info to the server and report the result (code, result) = self.post_to_server(self.build_post_data('verify')) log.info('Server response (%s): %s', code, result) "," Send the metadata to the package index server to be checked. ",11,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def verify_metadata(self): # send the info to the server and report the result (code, result) = self.post_to_server(self.build_post_data('verify')) log.info('Server response (%s): %s', code, result) ``` ###Assistant : Send the metadata to the package index server to be checked. " 383,"def test_subscriptions_add_for_principal_invite_only(self) -> None: invitee = self.example_user(""iago"") current_streams = self.get_streams(invitee) invite_streams = self.make_random_stream_names(current_streams) self.assert_adding_subscriptions_for_principal( invitee.id, invitee.realm, invite_streams, invite_only=True, policy_name=""Private, protected history"", ) "," You can subscribe other people to invite only streams. ",9,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_subscriptions_add_for_principal_invite_only(self) -> None: invitee = self.example_user(""iago"") current_streams = self.get_streams(invitee) invite_streams = self.make_random_stream_names(current_streams) self.assert_adding_subscriptions_for_principal( invitee.id, invitee.realm, invite_streams, invite_only=True, policy_name=""Private, protected history"", ) ``` ###Assistant : You can subscribe other people to invite only streams. " 384,"def get_module_dict_key_from_name(name): key = name.replace(""."", ""__ludwig_punct_period__"") return key + FEATURE_NAME_SUFFIX ",Returns a key that's guaranteed to be compatible with torch.,10,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_module_dict_key_from_name(name): key = name.replace(""."", ""__ludwig_punct_period__"") return key + FEATURE_NAME_SUFFIX ``` ###Assistant : Returns a key that's guaranteed to be compatible with torch. " 385,"def test_hdk_import(import_strategy, has_other_engines): remove_other_engines = if not has_other_engines: import_strategy = f""{remove_other_engines}\n{import_strategy}"" res = subprocess.run( [sys.executable, ""-c"", import_strategy], stderr=subprocess.PIPE, stdout=subprocess.PIPE, ) if res.returncode != 0: pytest.fail(str(res.stderr)) @pytest.mark.parametrize( ""import_strategy, expected_to_fail"", [ pytest.param( , True, id=""import_pydbe_first-pyarrow_gandiva_second"", ), pytest.param( , False, id=""import_pyarrow_gandiva_first-pydbe_second"", ), ], )"," Test import of HDK engine. The import of DbWorker requires to set special dlopen flags which make it then incompatible to import some other libraries further (like ``pyarrow.gandiva``). This test verifies that it's not the case when a user naturally imports Modin with HDK engine. Parameters ---------- import_strategy : str There are several scenarios of how a user can import Modin with HDK engine: configure Modin first to use HDK engine and then import ``modin.pandas`` or vice versa. This parameters holds a python code, implementing one of these scenarios. has_other_engines : bool The problem with import may appear depending on whether other engines are installed. This parameter indicates whether to remove modules for non-hdk engines before the test. Notes ----- The failed import flow may cause segfault, which causes to crash the pytest itself. This makes us to run the test in a separate process and check its exit-code to decide the success of the test. import sys sys.modules['ray'] = None sys.modules['dask'] = None from modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import DbWorker import pyarrow.gandiva import pyarrow.gandiva from modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import DbWorker ",176,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_hdk_import(import_strategy, has_other_engines): remove_other_engines = if not has_other_engines: import_strategy = f""{remove_other_engines}\n{import_strategy}"" res = subprocess.run( [sys.executable, ""-c"", import_strategy], stderr=subprocess.PIPE, stdout=subprocess.PIPE, ) if res.returncode != 0: pytest.fail(str(res.stderr)) @pytest.mark.parametrize( ""import_strategy, expected_to_fail"", [ pytest.param( , True, id=""import_pydbe_first-pyarrow_gandiva_second"", ), pytest.param( , False, id=""import_pyarrow_gandiva_first-pydbe_second"", ), ], ) ``` ###Assistant : Test import of HDK engine. The import of DbWorker requires to set special dlopen flags which make it then incompatible to import some other libraries further (like ``pyarrow.gandiva``). This test verifies that it's not the case when a user naturally imports Modin with HDK engine. Parameters ---------- import_strategy : str There are several scenarios of how a user can import Modin with HDK engine: configure Modin first to use HDK engine and then import ``modin.pandas`` or vice versa. This parameters holds a python code, implementing one of these scenarios. has_other_engines : bool The problem with import may appear depending on whether other engines are installed. This parameter indicates whether to remove modules for non-hdk engines before the test. Notes ----- The failed import flow may cause segfault, which causes to crash the pytest itself. This makes us to run the test in a separate process and check its exit-code to decide the success of the test. import sys sys.modules['ray'] = None sys.modules['dask'] = None from modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import DbWorker import pyarrow.gandiva import pyarrow.gandiva from modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import DbWorker " 386,"def _iter_tree_entries_next(root_full, dir_rel, memo, on_error, follow_links): dir_full = os.path.join(root_full, dir_rel) dir_real = os.path.realpath(dir_full) # Remember each encountered ancestor directory and its canonical # (real) path. If a canonical path is encountered more than once, # recursion has occurred. if dir_real not in memo: memo[dir_real] = dir_rel else: raise RecursionError( real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel ) for node_name in os.listdir(dir_full): node_rel = os.path.join(dir_rel, node_name) node_full = os.path.join(root_full, node_rel) # Inspect child node. try: node_lstat = os.lstat(node_full) except OSError as e: if on_error is not None: on_error(e) continue if stat.S_ISLNK(node_lstat.st_mode): # Child node is a link, inspect the target node. is_link = True try: node_stat = os.stat(node_full) except OSError as e: if on_error is not None: on_error(e) continue else: is_link = False node_stat = node_lstat if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link): # Child node is a directory, recurse into it and yield its # descendant files. yield TreeEntry(node_name, node_rel, node_lstat, node_stat) for entry in _iter_tree_entries_next( root_full, node_rel, memo, on_error, follow_links ): yield entry elif stat.S_ISREG(node_stat.st_mode) or is_link: # Child node is either a file or an unfollowed link, yield it. yield TreeEntry(node_name, node_rel, node_lstat, node_stat) # NOTE: Make sure to remove the canonical (real) path of the directory # from the ancestors memo once we are done with it. This allows the # same directory to appear multiple times. If this is not done, the # second occurrence of the directory will be incorrectly interpreted # as a recursion. See . del memo[dir_real] "," Scan the directory for all descendant files. *root_full* (:class:`str`) the absolute path to the root directory. *dir_rel* (:class:`str`) the path to the directory to scan relative to *root_full*. *memo* (:class:`dict`) keeps track of ancestor directories encountered. Maps each ancestor real path (:class:`str`) to relative path (:class:`str`). *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. *follow_links* (:class:`bool`) is whether to walk symbolic links that resolve to directories. Yields each entry (:class:`.TreeEntry`). ",74,240,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _iter_tree_entries_next(root_full, dir_rel, memo, on_error, follow_links): dir_full = os.path.join(root_full, dir_rel) dir_real = os.path.realpath(dir_full) # Remember each encountered ancestor directory and its canonical # (real) path. If a canonical path is encountered more than once, # recursion has occurred. if dir_real not in memo: memo[dir_real] = dir_rel else: raise RecursionError( real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel ) for node_name in os.listdir(dir_full): node_rel = os.path.join(dir_rel, node_name) node_full = os.path.join(root_full, node_rel) # Inspect child node. try: node_lstat = os.lstat(node_full) except OSError as e: if on_error is not None: on_error(e) continue if stat.S_ISLNK(node_lstat.st_mode): # Child node is a link, inspect the target node. is_link = True try: node_stat = os.stat(node_full) except OSError as e: if on_error is not None: on_error(e) continue else: is_link = False node_stat = node_lstat if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link): # Child node is a directory, recurse into it and yield its # descendant files. yield TreeEntry(node_name, node_rel, node_lstat, node_stat) for entry in _iter_tree_entries_next( root_full, node_rel, memo, on_error, follow_links ): yield entry elif stat.S_ISREG(node_stat.st_mode) or is_link: # Child node is either a file or an unfollowed link, yield it. yield TreeEntry(node_name, node_rel, node_lstat, node_stat) # NOTE: Make sure to remove the canonical (real) path of the directory # from the ancestors memo once we are done with it. This allows the # same directory to appear multiple times. If this is not done, the # second occurrence of the directory will be incorrectly interpreted # as a recursion. See . del memo[dir_real] ``` ###Assistant : Scan the directory for all descendant files. *root_full* (:class:`str`) the absolute path to the root directory. *dir_rel* (:class:`str`) the path to the directory to scan relative to *root_full*. *memo* (:class:`dict`) keeps track of ancestor directories encountered. Maps each ancestor real path (:class:`str`) to relative path (:class:`str`). *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. *follow_links* (:class:`bool`) is whether to walk symbolic links that resolve to directories. Yields each entry (:class:`.TreeEntry`). " 387,"def _remove_gens(base, strong_gens, basic_orbits=None, strong_gens_distr=None): from sympy.combinatorics.perm_groups import _orbit base_len = len(base) degree = strong_gens[0].size if strong_gens_distr is None: strong_gens_distr = _distribute_gens_by_base(base, strong_gens) if basic_orbits is None: basic_orbits = [] for i in range(base_len): basic_orbit = _orbit(degree, strong_gens_distr[i], base[i]) basic_orbits.append(basic_orbit) strong_gens_distr.append([]) res = strong_gens[:] for i in range(base_len - 1, -1, -1): gens_copy = strong_gens_distr[i][:] for gen in strong_gens_distr[i]: if gen not in strong_gens_distr[i + 1]: temp_gens = gens_copy[:] temp_gens.remove(gen) if temp_gens == []: continue temp_orbit = _orbit(degree, temp_gens, base[i]) if temp_orbit == basic_orbits[i]: gens_copy.remove(gen) res.remove(gen) return res "," Remove redundant generators from a strong generating set. Parameters ========== ``base`` - a base ``strong_gens`` - a strong generating set relative to ``base`` ``basic_orbits`` - basic orbits ``strong_gens_distr`` - strong generators distributed by membership in basic stabilizers Returns ======= A strong generating set with respect to ``base`` which is a subset of ``strong_gens``. Examples ======== >>> from sympy.combinatorics import SymmetricGroup >>> from sympy.combinatorics.util import _remove_gens >>> from sympy.combinatorics.testutil import _verify_bsgs >>> S = SymmetricGroup(15) >>> base, strong_gens = S.schreier_sims_incremental() >>> new_gens = _remove_gens(base, strong_gens) >>> len(new_gens) 14 >>> _verify_bsgs(S, base, new_gens) True Notes ===== This procedure is outlined in [1],p.95. References ========== .. [1] Holt, D., Eick, B., O'Brien, E. ""Handbook of computational group theory"" ",115,88,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _remove_gens(base, strong_gens, basic_orbits=None, strong_gens_distr=None): from sympy.combinatorics.perm_groups import _orbit base_len = len(base) degree = strong_gens[0].size if strong_gens_distr is None: strong_gens_distr = _distribute_gens_by_base(base, strong_gens) if basic_orbits is None: basic_orbits = [] for i in range(base_len): basic_orbit = _orbit(degree, strong_gens_distr[i], base[i]) basic_orbits.append(basic_orbit) strong_gens_distr.append([]) res = strong_gens[:] for i in range(base_len - 1, -1, -1): gens_copy = strong_gens_distr[i][:] for gen in strong_gens_distr[i]: if gen not in strong_gens_distr[i + 1]: temp_gens = gens_copy[:] temp_gens.remove(gen) if temp_gens == []: continue temp_orbit = _orbit(degree, temp_gens, base[i]) if temp_orbit == basic_orbits[i]: gens_copy.remove(gen) res.remove(gen) return res ``` ###Assistant : Remove redundant generators from a strong generating set. Parameters ========== ``base`` - a base ``strong_gens`` - a strong generating set relative to ``base`` ``basic_orbits`` - basic orbits ``strong_gens_distr`` - strong generators distributed by membership in basic stabilizers Returns ======= A strong generating set with respect to ``base`` which is a subset of ``strong_gens``. Examples ======== >>> from sympy.combinatorics import SymmetricGroup >>> from sympy.combinatorics.util import _remove_gens >>> from sympy.combinatorics.testutil import _verify_bsgs >>> S = SymmetricGroup(15) >>> base, strong_gens = S.schreier_sims_incremental() >>> new_gens = _remove_gens(base, strong_gens) >>> len(new_gens) 14 >>> _verify_bsgs(S, base, new_gens) True Notes ===== This procedure is outlined in [1],p.95. References ========== .. [1] Holt, D., Eick, B., O'Brien, E. ""Handbook of computational group theory"" " 388,"def from_dict(cls, file_dict): return cls( file_dict[""filename""], file_dict[""content""], file_dict.get(""content-type"", ""text/plain""), ) "," Create a SimpleUploadedFile object from a dictionary with keys: - filename - content-type - content ",15,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def from_dict(cls, file_dict): return cls( file_dict[""filename""], file_dict[""content""], file_dict.get(""content-type"", ""text/plain""), ) ``` ###Assistant : Create a SimpleUploadedFile object from a dictionary with keys: - filename - content-type - content " 389,"def test_change_list_sorting_callable(self): response = self.client.get( reverse(""admin:admin_views_article_changelist""), {""o"": 2} ) self.assertContentBefore( response, ""Oldest content"", ""Middle content"", ""Results of sorting on callable are out of order."", ) self.assertContentBefore( response, ""Middle content"", ""Newest content"", ""Results of sorting on callable are out of order."", ) "," Ensure we can sort on a list_display field that is a callable (column 2 is callable_year in ArticleAdmin) ",18,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_change_list_sorting_callable(self): response = self.client.get( reverse(""admin:admin_views_article_changelist""), {""o"": 2} ) self.assertContentBefore( response, ""Oldest content"", ""Middle content"", ""Results of sorting on callable are out of order."", ) self.assertContentBefore( response, ""Middle content"", ""Newest content"", ""Results of sorting on callable are out of order."", ) ``` ###Assistant : Ensure we can sort on a list_display field that is a callable (column 2 is callable_year in ArticleAdmin) " 390,"def pre_encode(self) -> Optional[Callable[[np.ndarray], List[bytes]]]: dummy = np.zeros((20, 20, 3), dtype=""uint8"") test = self._writer.pre_encode(dummy) retval: Optional[Callable[[np.ndarray], List[bytes]]] = None if test is None else self._writer.pre_encode logger.debug(""Writer pre_encode function: %s"", retval) return retval "," python function: Selected writer's pre-encode function, if it has one, otherwise ``None`` ",12,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def pre_encode(self) -> Optional[Callable[[np.ndarray], List[bytes]]]: dummy = np.zeros((20, 20, 3), dtype=""uint8"") test = self._writer.pre_encode(dummy) retval: Optional[Callable[[np.ndarray], List[bytes]]] = None if test is None else self._writer.pre_encode logger.debug(""Writer pre_encode function: %s"", retval) return retval ``` ###Assistant : python function: Selected writer's pre-encode function, if it has one, otherwise ``None`` " 391,"def disconnect(self): if self.is_connected is False: return self.connection.close() self.is_connected = False return self.is_connected "," Close any existing connections. ",4,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def disconnect(self): if self.is_connected is False: return self.connection.close() self.is_connected = False return self.is_connected ``` ###Assistant : Close any existing connections. " 392,"def resolve(self) -> Tuple[List, Dict]: objects_mapping = [] for obj_ref in self.workflow_outputs: obj, ref = _resolve_object_ref(obj_ref.ref) objects_mapping.append(obj) workflow_ref_mapping = _resolve_dynamic_workflow_refs(self.workflow_refs) with serialization_context.workflow_args_resolving_context( objects_mapping, workflow_ref_mapping ): # reconstruct input arguments under correct serialization context flattened_args: List[Any] = ray.get(self.args) # dereference arguments like Ray remote functions flattened_args = [ ray.get(a) if isinstance(a, ObjectRef) else a for a in flattened_args ] return signature.recover_args(flattened_args) "," This function resolves the inputs for the code inside a workflow step (works on the callee side). For outputs from other workflows, we resolve them into object instances inplace. For each ObjectRef argument, the function returns both the ObjectRef and the object instance. If the ObjectRef is a chain of nested ObjectRefs, then we resolve it recursively until we get the object instance, and we return the *direct* ObjectRef of the instance. This function does not resolve ObjectRef inside another object (e.g. list of ObjectRefs) to give users some flexibility. Returns: Instances of arguments. ",94,60,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def resolve(self) -> Tuple[List, Dict]: objects_mapping = [] for obj_ref in self.workflow_outputs: obj, ref = _resolve_object_ref(obj_ref.ref) objects_mapping.append(obj) workflow_ref_mapping = _resolve_dynamic_workflow_refs(self.workflow_refs) with serialization_context.workflow_args_resolving_context( objects_mapping, workflow_ref_mapping ): # reconstruct input arguments under correct serialization context flattened_args: List[Any] = ray.get(self.args) # dereference arguments like Ray remote functions flattened_args = [ ray.get(a) if isinstance(a, ObjectRef) else a for a in flattened_args ] return signature.recover_args(flattened_args) ``` ###Assistant : This function resolves the inputs for the code inside a workflow step (works on the callee side). For outputs from other workflows, we resolve them into object instances inplace. For each ObjectRef argument, the function returns both the ObjectRef and the object instance. If the ObjectRef is a chain of nested ObjectRefs, then we resolve it recursively until we get the object instance, and we return the *direct* ObjectRef of the instance. This function does not resolve ObjectRef inside another object (e.g. list of ObjectRefs) to give users some flexibility. Returns: Instances of arguments. " 393,"def _maybe_create_attribute(self, name, default_value): if not hasattr(self, name): self.__setattr__(name, default_value) ","Create the attribute with the default value if it hasn't been created. This is useful for fields that is used for tracking purpose, _trainable_weights, or _layers. Note that user could create a layer subclass and assign an internal field before invoking the Layer.__init__(), the __setattr__() need to create the tracking fields and __init__() need to not override them. Args: name: String, the name of the attribute. default_value: Object, the default value of the attribute. ",74,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _maybe_create_attribute(self, name, default_value): if not hasattr(self, name): self.__setattr__(name, default_value) ``` ###Assistant : Create the attribute with the default value if it hasn't been created. This is useful for fields that is used for tracking purpose, _trainable_weights, or _layers. Note that user could create a layer subclass and assign an internal field before invoking the Layer.__init__(), the __setattr__() need to create the tracking fields and __init__() need to not override them. Args: name: String, the name of the attribute. default_value: Object, the default value of the attribute. " 394,"def add_nodes_from(self, nodes_for_adding, **attr): for n in nodes_for_adding: try: newnode = n not in self._node newdict = attr except TypeError: n, ndict = n newnode = n not in self._node newdict = attr.copy() newdict.update(ndict) if newnode: if n is None: raise ValueError(""None cannot be a node"") self._succ[n] = self.adjlist_inner_dict_factory() self._pred[n] = self.adjlist_inner_dict_factory() self._node[n] = self.node_attr_dict_factory() self._node[n].update(newdict) ","Add multiple nodes. Parameters ---------- nodes_for_adding : iterable container A container of nodes (list, dict, set, etc.). OR A container of (node, attribute dict) tuples. Node attributes are updated using the attribute dict. attr : keyword arguments, optional (default= no attributes) Update attributes for all nodes in nodes. Node attributes specified in nodes as a tuple take precedence over attributes specified via keyword arguments. See Also -------- add_node Notes ------- When adding nodes from an iterator over the graph you are changing, a `RuntimeError` can be raised with message: `RuntimeError: dictionary changed size during iteration`. This happens when the graph's underlying dictionary is modified during iteration. To avoid this error, evaluate the iterator into a separate object, e.g. by using `list(iterator_of_nodes)`, and pass this object to `G.add_nodes_from`. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_nodes_from(""Hello"") >>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)]) >>> G.add_nodes_from(K3) >>> sorted(G.nodes(), key=str) [0, 1, 2, 'H', 'e', 'l', 'o'] Use keywords to update specific node attributes for every node. >>> G.add_nodes_from([1, 2], size=10) >>> G.add_nodes_from([3, 4], weight=0.4) Use (node, attrdict) tuples to update attributes for specific nodes. >>> G.add_nodes_from([(1, dict(size=11)), (2, {""color"": ""blue""})]) >>> G.nodes[1][""size""] 11 >>> H = nx.Graph() >>> H.add_nodes_from(G.nodes(data=True)) >>> H.nodes[1][""size""] 11 Evaluate an iterator over a graph if using it to modify the same graph >>> G = nx.DiGraph([(0, 1), (1, 2), (3, 4)]) >>> # wrong way - will raise RuntimeError >>> # G.add_nodes_from(n + 1 for n in G.nodes) >>> # correct way >>> G.add_nodes_from(list(n + 1 for n in G.nodes)) ",260,56,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add_nodes_from(self, nodes_for_adding, **attr): for n in nodes_for_adding: try: newnode = n not in self._node newdict = attr except TypeError: n, ndict = n newnode = n not in self._node newdict = attr.copy() newdict.update(ndict) if newnode: if n is None: raise ValueError(""None cannot be a node"") self._succ[n] = self.adjlist_inner_dict_factory() self._pred[n] = self.adjlist_inner_dict_factory() self._node[n] = self.node_attr_dict_factory() self._node[n].update(newdict) ``` ###Assistant : Add multiple nodes. Parameters ---------- nodes_for_adding : iterable container A container of nodes (list, dict, set, etc.). OR A container of (node, attribute dict) tuples. Node attributes are updated using the attribute dict. attr : keyword arguments, optional (default= no attributes) Update attributes for all nodes in nodes. Node attributes specified in nodes as a tuple take precedence over attributes specified via keyword arguments. See Also -------- add_node Notes ------- When adding nodes from an iterator over the graph you are changing, a `RuntimeError` can be raised with message: `RuntimeError: dictionary changed size during iteration`. This happens when the graph's underlying dictionary is modified during iteration. To avoid this error, evaluate the iterator into a separate object, e.g. by using `list(iterator_of_nodes)`, and pass this object to `G.add_nodes_from`. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_nodes_from(""Hello"") >>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)]) >>> G.add_nodes_from(K3) >>> sorted(G.nodes(), key=str) [0, 1, 2, 'H', 'e', 'l', 'o'] Use keywords to update specific node attributes for every node. >>> G.add_nodes_from([1, 2], size=10) >>> G.add_nodes_from([3, 4], weight=0.4) Use (node, attrdict) tuples to update attributes for specific nodes. >>> G.add_nodes_from([(1, dict(size=11)), (2, {""color"": ""blue""})]) >>> G.nodes[1][""size""] 11 >>> H = nx.Graph() >>> H.add_nodes_from(G.nodes(data=True)) >>> H.nodes[1][""size""] 11 Evaluate an iterator over a graph if using it to modify the same graph >>> G = nx.DiGraph([(0, 1), (1, 2), (3, 4)]) >>> # wrong way - will raise RuntimeError >>> # G.add_nodes_from(n + 1 for n in G.nodes) >>> # correct way >>> G.add_nodes_from(list(n + 1 for n in G.nodes)) " 395,"def test_submitted_email_notifications_sent(self): self.login(self.submitter) self.submit() self.assertEqual(len(mail.outbox), 4) task_submission_emails = [ email for email in mail.outbox if ""task"" in email.subject ] task_submission_emailed_addresses = [ address for email in task_submission_emails for address in email.to ] workflow_submission_emails = [ email for email in mail.outbox if ""workflow"" in email.subject ] workflow_submission_emailed_addresses = [ address for email in workflow_submission_emails for address in email.to ] self.assertEqual(len(task_submission_emails), 3) # the moderator is in the Group assigned to the GroupApproval task, so should get an email self.assertIn(self.moderator.email, task_submission_emailed_addresses) self.assertIn(self.moderator2.email, task_submission_emailed_addresses) # with `WAGTAILADMIN_NOTIFICATION_INCLUDE_SUPERUSERS`, the superuser should get a task email self.assertIn(self.superuser.email, task_submission_emailed_addresses) # the submitter triggered this workflow update, so should not get an email self.assertNotIn(self.submitter.email, task_submission_emailed_addresses) self.assertEqual(len(workflow_submission_emails), 1) # the moderator should not get a workflow email self.assertNotIn(self.moderator.email, workflow_submission_emailed_addresses) self.assertNotIn(self.moderator2.email, workflow_submission_emailed_addresses) # with `WAGTAILADMIN_NOTIFICATION_INCLUDE_SUPERUSERS`, the superuser should get a workflow email self.assertIn(self.superuser.email, workflow_submission_emailed_addresses) # as the submitter was the triggering user, the submitter should not get an email notification self.assertNotIn(self.submitter.email, workflow_submission_emailed_addresses) ",Test that 'submitted' notifications for WorkflowState and TaskState are both sent correctly,12,153,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_submitted_email_notifications_sent(self): self.login(self.submitter) self.submit() self.assertEqual(len(mail.outbox), 4) task_submission_emails = [ email for email in mail.outbox if ""task"" in email.subject ] task_submission_emailed_addresses = [ address for email in task_submission_emails for address in email.to ] workflow_submission_emails = [ email for email in mail.outbox if ""workflow"" in email.subject ] workflow_submission_emailed_addresses = [ address for email in workflow_submission_emails for address in email.to ] self.assertEqual(len(task_submission_emails), 3) # the moderator is in the Group assigned to the GroupApproval task, so should get an email self.assertIn(self.moderator.email, task_submission_emailed_addresses) self.assertIn(self.moderator2.email, task_submission_emailed_addresses) # with `WAGTAILADMIN_NOTIFICATION_INCLUDE_SUPERUSERS`, the superuser should get a task email self.assertIn(self.superuser.email, task_submission_emailed_addresses) # the submitter triggered this workflow update, so should not get an email self.assertNotIn(self.submitter.email, task_submission_emailed_addresses) self.assertEqual(len(workflow_submission_emails), 1) # the moderator should not get a workflow email self.assertNotIn(self.moderator.email, workflow_submission_emailed_addresses) self.assertNotIn(self.moderator2.email, workflow_submission_emailed_addresses) # with `WAGTAILADMIN_NOTIFICATION_INCLUDE_SUPERUSERS`, the superuser should get a workflow email self.assertIn(self.superuser.email, workflow_submission_emailed_addresses) # as the submitter was the triggering user, the submitter should not get an email notification self.assertNotIn(self.submitter.email, workflow_submission_emailed_addresses) ``` ###Assistant : Test that 'submitted' notifications for WorkflowState and TaskState are both sent correctly " 396,"def forward(self, masks_queries_logits, class_queries_logits, mask_labels, class_labels) -> List[Tuple[Tensor]]: indices: List[Tuple[np.array]] = [] preds_masks = masks_queries_logits preds_probs = class_queries_logits.softmax(dim=-1) # downsample all masks in one go -> save memory mask_labels = nn.functional.interpolate(mask_labels, size=preds_masks.shape[-2:], mode=""nearest"") # iterate through batch size for pred_probs, pred_mask, target_mask, labels in zip(preds_probs, preds_masks, mask_labels, class_labels): # Compute the classification cost. Contrary to the loss, we don't use the NLL, # but approximate it in 1 - proba[target class]. # The 1 is a constant that doesn't change the matching, it can be ommitted. cost_class = -pred_probs[:, labels] # flatten spatial dimension ""q h w -> q (h w)"" num_queries, height, width = pred_mask.shape pred_mask_flat = pred_mask.view(num_queries, height * width) # [num_queries, H*W] # same for target_mask ""c h w -> c (h w)"" num_channels, height, width = target_mask.shape target_mask_flat = target_mask.view(num_channels, height * width) # [num_total_labels, H*W] # compute the focal loss between each mask pairs -> shape [NUM_QUERIES, CLASSES] cost_mask = pair_wise_sigmoid_focal_loss(pred_mask_flat, target_mask_flat) # Compute the dice loss betwen each mask pairs -> shape [NUM_QUERIES, CLASSES] cost_dice = pair_wise_dice_loss(pred_mask_flat, target_mask_flat) # final cost matrix cost_matrix = self.cost_mask * cost_mask + self.cost_class * cost_class + self.cost_dice * cost_dice # do the assigmented using the hungarian algorithm in scipy assigned_indices: Tuple[np.array] = linear_sum_assignment(cost_matrix.cpu()) indices.append(assigned_indices) # It could be stacked in one tensor matched_indices = [ (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices ] return matched_indices ","Performs the matching Params: masks_queries_logits (`torch.Tensor`): A tensor` of dim `batch_size, num_queries, num_classes` with the classification logits. class_queries_logits (`torch.Tensor`): A tensor` of dim `batch_size, num_queries, height, width` with the predicted masks. class_labels (`torch.Tensor`): A tensor` of dim `num_target_boxes` (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels. mask_labels (`torch.Tensor`): A tensor` of dim `num_target_boxes, height, width` containing the target masks. Returns: `List[Tuple[Tensor]]`: A list of size batch_size, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected labels (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes). ",114,229,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def forward(self, masks_queries_logits, class_queries_logits, mask_labels, class_labels) -> List[Tuple[Tensor]]: indices: List[Tuple[np.array]] = [] preds_masks = masks_queries_logits preds_probs = class_queries_logits.softmax(dim=-1) # downsample all masks in one go -> save memory mask_labels = nn.functional.interpolate(mask_labels, size=preds_masks.shape[-2:], mode=""nearest"") # iterate through batch size for pred_probs, pred_mask, target_mask, labels in zip(preds_probs, preds_masks, mask_labels, class_labels): # Compute the classification cost. Contrary to the loss, we don't use the NLL, # but approximate it in 1 - proba[target class]. # The 1 is a constant that doesn't change the matching, it can be ommitted. cost_class = -pred_probs[:, labels] # flatten spatial dimension ""q h w -> q (h w)"" num_queries, height, width = pred_mask.shape pred_mask_flat = pred_mask.view(num_queries, height * width) # [num_queries, H*W] # same for target_mask ""c h w -> c (h w)"" num_channels, height, width = target_mask.shape target_mask_flat = target_mask.view(num_channels, height * width) # [num_total_labels, H*W] # compute the focal loss between each mask pairs -> shape [NUM_QUERIES, CLASSES] cost_mask = pair_wise_sigmoid_focal_loss(pred_mask_flat, target_mask_flat) # Compute the dice loss betwen each mask pairs -> shape [NUM_QUERIES, CLASSES] cost_dice = pair_wise_dice_loss(pred_mask_flat, target_mask_flat) # final cost matrix cost_matrix = self.cost_mask * cost_mask + self.cost_class * cost_class + self.cost_dice * cost_dice # do the assigmented using the hungarian algorithm in scipy assigned_indices: Tuple[np.array] = linear_sum_assignment(cost_matrix.cpu()) indices.append(assigned_indices) # It could be stacked in one tensor matched_indices = [ (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices ] return matched_indices ``` ###Assistant : Performs the matching Params: masks_queries_logits (`torch.Tensor`): A tensor` of dim `batch_size, num_queries, num_classes` with the classification logits. class_queries_logits (`torch.Tensor`): A tensor` of dim `batch_size, num_queries, height, width` with the predicted masks. class_labels (`torch.Tensor`): A tensor` of dim `num_target_boxes` (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels. mask_labels (`torch.Tensor`): A tensor` of dim `num_target_boxes, height, width` containing the target masks. Returns: `List[Tuple[Tensor]]`: A list of size batch_size, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected labels (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes). " 397,"def cancel_dispatcher_process(self): if not self.celery_task_id: return canceled = [] try: # Use control and reply mechanism to cancel and obtain confirmation timeout = 5 canceled = ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id]) except socket.timeout: logger.error(f'could not reach dispatcher on {self.controller_node} within {timeout}s') except Exception: logger.exception(""error encountered when checking task status"") return bool(self.celery_task_id in canceled) # True or False, whether confirmation was obtained ",Returns True if dispatcher running this job acknowledged request and sent SIGTERM,12,58,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cancel_dispatcher_process(self): if not self.celery_task_id: return canceled = [] try: # Use control and reply mechanism to cancel and obtain confirmation timeout = 5 canceled = ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id]) except socket.timeout: logger.error(f'could not reach dispatcher on {self.controller_node} within {timeout}s') except Exception: logger.exception(""error encountered when checking task status"") return bool(self.celery_task_id in canceled) # True or False, whether confirmation was obtained ``` ###Assistant : Returns True if dispatcher running this job acknowledged request and sent SIGTERM " 398,"def _mangle_index_names(cls, names): return [ f""__index__{i}_{'__None__' if n is None else n}"" for i, n in enumerate(names) ] "," Return mangled index names for index labels. Mangled names are used for index columns because index labels cannot always be used as HDK table column names. E.e. label can be a non-string value or an unallowed string (empty strings, etc.) for a table column name. Parameters ---------- names : list of str Index labels. Returns ------- list of str Mangled names. ",61,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _mangle_index_names(cls, names): return [ f""__index__{i}_{'__None__' if n is None else n}"" for i, n in enumerate(names) ] ``` ###Assistant : Return mangled index names for index labels. Mangled names are used for index columns because index labels cannot always be used as HDK table column names. E.e. label can be a non-string value or an unallowed string (empty strings, etc.) for a table column name. Parameters ---------- names : list of str Index labels. Returns ------- list of str Mangled names. " 399,"def get_b2cs_json(data, gstin): company_state_number = gstin[0:2] out = [] for d in data: if not d.get(""place_of_supply""): frappe.throw( _( ).format(frappe.bold(""Place Of Supply"")) ) pos = d.get(""place_of_supply"").split(""-"")[0] tax_details = {} rate = d.get(""rate"", 0) tax = flt((d[""taxable_value""] * rate) / 100.0, 2) if company_state_number == pos: tax_details.update({""camt"": flt(tax / 2.0, 2), ""samt"": flt(tax / 2.0, 2)}) else: tax_details.update({""iamt"": tax}) inv = { ""sply_ty"": ""INTRA"" if company_state_number == pos else ""INTER"", ""pos"": pos, ""typ"": d.get(""type""), ""txval"": flt(d.get(""taxable_value""), 2), ""rt"": rate, ""iamt"": flt(tax_details.get(""iamt""), 2), ""camt"": flt(tax_details.get(""camt""), 2), ""samt"": flt(tax_details.get(""samt""), 2), ""csamt"": flt(d.get(""cess_amount""), 2), } if d.get(""type"") == ""E"" and d.get(""ecommerce_gstin""): inv.update({""etin"": d.get(""ecommerce_gstin"")}) out.append(inv) return out ","{0} not entered in some invoices. Please update and try again",11,101,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_b2cs_json(data, gstin): company_state_number = gstin[0:2] out = [] for d in data: if not d.get(""place_of_supply""): frappe.throw( _( ).format(frappe.bold(""Place Of Supply"")) ) pos = d.get(""place_of_supply"").split(""-"")[0] tax_details = {} rate = d.get(""rate"", 0) tax = flt((d[""taxable_value""] * rate) / 100.0, 2) if company_state_number == pos: tax_details.update({""camt"": flt(tax / 2.0, 2), ""samt"": flt(tax / 2.0, 2)}) else: tax_details.update({""iamt"": tax}) inv = { ""sply_ty"": ""INTRA"" if company_state_number == pos else ""INTER"", ""pos"": pos, ""typ"": d.get(""type""), ""txval"": flt(d.get(""taxable_value""), 2), ""rt"": rate, ""iamt"": flt(tax_details.get(""iamt""), 2), ""camt"": flt(tax_details.get(""camt""), 2), ""samt"": flt(tax_details.get(""samt""), 2), ""csamt"": flt(d.get(""cess_amount""), 2), } if d.get(""type"") == ""E"" and d.get(""ecommerce_gstin""): inv.update({""etin"": d.get(""ecommerce_gstin"")}) out.append(inv) return out ``` ###Assistant : {0} not entered in some invoices. Please update and try again " 400,"def _roll_vectorized(M, roll_indices, axis): assert axis in [0, 1] ndim = M.ndim assert ndim == 3 ndim_roll = roll_indices.ndim assert ndim_roll == 1 sh = M.shape r, c = sh[-2:] assert sh[0] == roll_indices.shape[0] vec_indices = np.arange(sh[0], dtype=np.int32) # Builds the rolled matrix M_roll = np.empty_like(M) if axis == 0: for ir in range(r): for ic in range(c): M_roll[:, ir, ic] = M[vec_indices, (-roll_indices+ir) % r, ic] else: # 1 for ir in range(r): for ic in range(c): M_roll[:, ir, ic] = M[vec_indices, ir, (-roll_indices+ic) % c] return M_roll "," Roll an array of matrices along *axis* (0: rows, 1: columns) according to an array of indices *roll_indices*. ",18,89,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _roll_vectorized(M, roll_indices, axis): assert axis in [0, 1] ndim = M.ndim assert ndim == 3 ndim_roll = roll_indices.ndim assert ndim_roll == 1 sh = M.shape r, c = sh[-2:] assert sh[0] == roll_indices.shape[0] vec_indices = np.arange(sh[0], dtype=np.int32) # Builds the rolled matrix M_roll = np.empty_like(M) if axis == 0: for ir in range(r): for ic in range(c): M_roll[:, ir, ic] = M[vec_indices, (-roll_indices+ir) % r, ic] else: # 1 for ir in range(r): for ic in range(c): M_roll[:, ir, ic] = M[vec_indices, ir, (-roll_indices+ic) % c] return M_roll ``` ###Assistant : Roll an array of matrices along *axis* (0: rows, 1: columns) according to an array of indices *roll_indices*. " 401,"def test_calibration_1_vs_all_vis_api(experiment_to_use): experiment = experiment_to_use probabilities = experiment.probabilities viz_outputs = (""pdf"", ""png"") with TemporaryDirectory() as tmpvizdir: for viz_output in viz_outputs: vis_output_pattern_pdf = os.path.join(tmpvizdir, f""*.{viz_output}"") visualize.calibration_1_vs_all( [probabilities, probabilities], experiment.ground_truth, experiment.ground_truth_metadata, experiment.output_feature_name, top_n_classes=[6], labels_limit=0, model_namess=[""Model1"", ""Model2""], output_directory=tmpvizdir, file_format=viz_output, ) figure_cnt = glob.glob(vis_output_pattern_pdf) assert 7 == len(figure_cnt) ","Ensure pdf and png figures can be saved via visualization API call. :param experiment_to_use: Object containing trained model and results to test visualization :return: None ",25,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_calibration_1_vs_all_vis_api(experiment_to_use): experiment = experiment_to_use probabilities = experiment.probabilities viz_outputs = (""pdf"", ""png"") with TemporaryDirectory() as tmpvizdir: for viz_output in viz_outputs: vis_output_pattern_pdf = os.path.join(tmpvizdir, f""*.{viz_output}"") visualize.calibration_1_vs_all( [probabilities, probabilities], experiment.ground_truth, experiment.ground_truth_metadata, experiment.output_feature_name, top_n_classes=[6], labels_limit=0, model_namess=[""Model1"", ""Model2""], output_directory=tmpvizdir, file_format=viz_output, ) figure_cnt = glob.glob(vis_output_pattern_pdf) assert 7 == len(figure_cnt) ``` ###Assistant : Ensure pdf and png figures can be saved via visualization API call. :param experiment_to_use: Object containing trained model and results to test visualization :return: None " 402,"def test_send_server_notice_delete_room(self) -> None: # user has no room memberships self._check_invite_and_join_status(self.other_user, 0, 0) # send first message channel = self.make_request( ""POST"", self.url, access_token=self.admin_user_tok, content={ ""user_id"": self.other_user, ""content"": {""msgtype"": ""m.text"", ""body"": ""test msg one""}, }, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) # user has one invite invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0) first_room_id = invited_rooms[0].room_id # user joins the room and is member now self.helper.join( room=first_room_id, user=self.other_user, tok=self.other_user_token ) self._check_invite_and_join_status(self.other_user, 0, 1) # get messages messages = self._sync_and_get_messages(first_room_id, self.other_user_token) self.assertEqual(len(messages), 1) self.assertEqual(messages[0][""content""][""body""], ""test msg one"") self.assertEqual(messages[0][""sender""], ""@notices:test"") # shut down and purge room self.get_success( self.room_shutdown_handler.shutdown_room(first_room_id, self.admin_user) ) self.get_success(self.pagination_handler.purge_room(first_room_id)) # user is not member anymore self._check_invite_and_join_status(self.other_user, 0, 0) # It doesn't really matter what API we use here, we just want to assert # that the room doesn't exist. summary = self.get_success(self.store.get_room_summary(first_room_id)) # The summary should be empty since the room doesn't exist. self.assertEqual(summary, {}) # invalidate cache of server notices room_ids # if server tries to send to a cached room_id it gives an error self.server_notices_manager.get_or_create_notice_room_for_user.invalidate_all() # send second message channel = self.make_request( ""POST"", self.url, access_token=self.admin_user_tok, content={ ""user_id"": self.other_user, ""content"": {""msgtype"": ""m.text"", ""body"": ""test msg two""}, }, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) # user has one invite invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0) second_room_id = invited_rooms[0].room_id # user joins the room and is member now self.helper.join( room=second_room_id, user=self.other_user, tok=self.other_user_token ) self._check_invite_and_join_status(self.other_user, 0, 1) # get message messages = self._sync_and_get_messages(second_room_id, self.other_user_token) self.assertEqual(len(messages), 1) self.assertEqual(messages[0][""content""][""body""], ""test msg two"") self.assertEqual(messages[0][""sender""], ""@notices:test"") # second room has new ID self.assertNotEqual(first_room_id, second_room_id) "," Tests that the user get server notice in a new room after the first server notice room was deleted. ",19,240,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_send_server_notice_delete_room(self) -> None: # user has no room memberships self._check_invite_and_join_status(self.other_user, 0, 0) # send first message channel = self.make_request( ""POST"", self.url, access_token=self.admin_user_tok, content={ ""user_id"": self.other_user, ""content"": {""msgtype"": ""m.text"", ""body"": ""test msg one""}, }, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) # user has one invite invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0) first_room_id = invited_rooms[0].room_id # user joins the room and is member now self.helper.join( room=first_room_id, user=self.other_user, tok=self.other_user_token ) self._check_invite_and_join_status(self.other_user, 0, 1) # get messages messages = self._sync_and_get_messages(first_room_id, self.other_user_token) self.assertEqual(len(messages), 1) self.assertEqual(messages[0][""content""][""body""], ""test msg one"") self.assertEqual(messages[0][""sender""], ""@notices:test"") # shut down and purge room self.get_success( self.room_shutdown_handler.shutdown_room(first_room_id, self.admin_user) ) self.get_success(self.pagination_handler.purge_room(first_room_id)) # user is not member anymore self._check_invite_and_join_status(self.other_user, 0, 0) # It doesn't really matter what API we use here, we just want to assert # that the room doesn't exist. summary = self.get_success(self.store.get_room_summary(first_room_id)) # The summary should be empty since the room doesn't exist. self.assertEqual(summary, {}) # invalidate cache of server notices room_ids # if server tries to send to a cached room_id it gives an error self.server_notices_manager.get_or_create_notice_room_for_user.invalidate_all() # send second message channel = self.make_request( ""POST"", self.url, access_token=self.admin_user_tok, content={ ""user_id"": self.other_user, ""content"": {""msgtype"": ""m.text"", ""body"": ""test msg two""}, }, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) # user has one invite invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0) second_room_id = invited_rooms[0].room_id # user joins the room and is member now self.helper.join( room=second_room_id, user=self.other_user, tok=self.other_user_token ) self._check_invite_and_join_status(self.other_user, 0, 1) # get message messages = self._sync_and_get_messages(second_room_id, self.other_user_token) self.assertEqual(len(messages), 1) self.assertEqual(messages[0][""content""][""body""], ""test msg two"") self.assertEqual(messages[0][""sender""], ""@notices:test"") # second room has new ID self.assertNotEqual(first_room_id, second_room_id) ``` ###Assistant : Tests that the user get server notice in a new room after the first server notice room was deleted. " 403,"def is_request_failed_response(resp): return len( resp.get('failures', []) ) > 0 or APIUtils.METASEQ_FAIL_MESSAGE_TEXT in resp.get('text', '') "," Whether the requests to Metaseq worker have failed. It checks this based on the existences of the failure reasons as they get accumulated in `_make_request` functionn calls. ",27,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_request_failed_response(resp): return len( resp.get('failures', []) ) > 0 or APIUtils.METASEQ_FAIL_MESSAGE_TEXT in resp.get('text', '') ``` ###Assistant : Whether the requests to Metaseq worker have failed. It checks this based on the existences of the failure reasons as they get accumulated in `_make_request` functionn calls. " 404,"def draw(self, renderer): if not self.get_visible(): return self._recompute_transform() width = self.convert_xunits(self.width) height = self.convert_yunits(self.height) # If the width and height of ellipse are not equal, take into account # stretching when calculating angles to draw between"," Draw the arc to the given *renderer*. Notes ----- Ellipses are normally drawn using an approximation that uses eight cubic Bezier splines. The error of this approximation is 1.89818e-6, according to this unverified source: Lancaster, Don. *Approximating a Circle or an Ellipse Using Four Bezier Cubic Splines.* https://www.tinaja.com/glib/ellipse4.pdf There is a use case where very large ellipses must be drawn with very high accuracy, and it is too expensive to render the entire ellipse with enough segments (either splines or line segments). Therefore, in the case where either radius of the ellipse is large enough that the error of the spline approximation will be visible (greater than one pixel offset from the ideal), a different technique is used. In that case, only the visible parts of the ellipse are drawn, with each visible arc using a fixed number of spline segments (8). The algorithm proceeds as follows: 1. The points where the ellipse intersects the axes (or figure) bounding box are located. (This is done by performing an inverse transformation on the bbox such that it is relative to the unit circle -- this makes the intersection calculation much easier than doing rotated ellipse intersection directly.) This uses the ""line intersecting a circle"" algorithm from: Vince, John. *Geometry for Computer Graphics: Formulae, Examples & Proofs.* London: Springer-Verlag, 2005. 2. The angles of each of the intersection points are calculated. 3. Proceeding counterclockwise starting in the positive x-direction, each of the visible arc-segments between the pairs of vertices are drawn using the Bezier arc approximation technique implemented in `.Path.arc`. ",258,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def draw(self, renderer): if not self.get_visible(): return self._recompute_transform() width = self.convert_xunits(self.width) height = self.convert_yunits(self.height) # If the width and height of ellipse are not equal, take into account # stretching when calculating angles to draw between ``` ###Assistant : Draw the arc to the given *renderer*. Notes ----- Ellipses are normally drawn using an approximation that uses eight cubic Bezier splines. The error of this approximation is 1.89818e-6, according to this unverified source: Lancaster, Don. *Approximating a Circle or an Ellipse Using Four Bezier Cubic Splines.* https://www.tinaja.com/glib/ellipse4.pdf There is a use case where very large ellipses must be drawn with very high accuracy, and it is too expensive to render the entire ellipse with enough segments (either splines or line segments). Therefore, in the case where either radius of the ellipse is large enough that the error of the spline approximation will be visible (greater than one pixel offset from the ideal), a different technique is used. In that case, only the visible parts of the ellipse are drawn, with each visible arc using a fixed number of spline segments (8). The algorithm proceeds as follows: 1. The points where the ellipse intersects the axes (or figure) bounding box are located. (This is done by performing an inverse transformation on the bbox such that it is relative to the unit circle -- this makes the intersection calculation much easier than doing rotated ellipse intersection directly.) This uses the ""line intersecting a circle"" algorithm from: Vince, John. *Geometry for Computer Graphics: Formulae, Examples & Proofs.* London: Springer-Verlag, 2005. 2. The angles of each of the intersection points are calculated. 3. Proceeding counterclockwise starting in the positive x-direction, each of the visible arc-segments between the pairs of vertices are drawn using the Bezier arc approximation technique implemented in `.Path.arc`. " 405,"def validate_parameter_constraints(parameter_constraints, params, caller_name): for param_name, param_val in params.items(): # We allow parameters to not have a constraint so that third party estimators # can inherit from sklearn estimators without having to necessarily use the # validation tools. if param_name not in parameter_constraints: continue constraints = parameter_constraints[param_name] if constraints == ""no_validation"": continue constraints = [make_constraint(constraint) for constraint in constraints] for constraint in constraints: if constraint.is_satisfied_by(param_val): # this constraint is satisfied, no need to check further. break else: # No constraint is satisfied, raise with an informative message. # Ignore constraints that we don't want to expose in the error message, # i.e. options that are for internal purpose or not officially supported. constraints = [ constraint for constraint in constraints if not constraint.hidden ] if len(constraints) == 1: constraints_str = f""{constraints[0]}"" else: constraints_str = ( f""{', '.join([str(c) for c in constraints[:-1]])} or"" f"" {constraints[-1]}"" ) raise ValueError( f""The {param_name!r} parameter of {caller_name} must be"" f"" {constraints_str}. Got {param_val!r} instead."" ) ","Validate types and values of given parameters. Parameters ---------- parameter_constraints : dict or {""no_validation""} If ""no_validation"", validation is skipped for this parameter. If a dict, it must be a dictionary `param_name: list of constraints`. A parameter is valid if it satisfies one of the constraints from the list. Constraints can be: - an Interval object, representing a continuous or discrete range of numbers - the string ""array-like"" - the string ""sparse matrix"" - the string ""random_state"" - callable - None, meaning that None is a valid value for the parameter - any type, meaning that any instance of this type is valid - an Options object, representing a set of elements of a given type - a StrOptions object, representing a set of strings - the string ""boolean"" - the string ""verbose"" - the string ""cv_object"" - the string ""missing_values"" - a HasMethods object, representing method(s) an object must have - a Hidden object, representing a constraint not meant to be exposed to the user params : dict A dictionary `param_name: param_value`. The parameters to validate against the constraints. caller_name : str The name of the estimator or function or method that called this function. ",195,160,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def validate_parameter_constraints(parameter_constraints, params, caller_name): for param_name, param_val in params.items(): # We allow parameters to not have a constraint so that third party estimators # can inherit from sklearn estimators without having to necessarily use the # validation tools. if param_name not in parameter_constraints: continue constraints = parameter_constraints[param_name] if constraints == ""no_validation"": continue constraints = [make_constraint(constraint) for constraint in constraints] for constraint in constraints: if constraint.is_satisfied_by(param_val): # this constraint is satisfied, no need to check further. break else: # No constraint is satisfied, raise with an informative message. # Ignore constraints that we don't want to expose in the error message, # i.e. options that are for internal purpose or not officially supported. constraints = [ constraint for constraint in constraints if not constraint.hidden ] if len(constraints) == 1: constraints_str = f""{constraints[0]}"" else: constraints_str = ( f""{', '.join([str(c) for c in constraints[:-1]])} or"" f"" {constraints[-1]}"" ) raise ValueError( f""The {param_name!r} parameter of {caller_name} must be"" f"" {constraints_str}. Got {param_val!r} instead."" ) ``` ###Assistant : Validate types and values of given parameters. Parameters ---------- parameter_constraints : dict or {""no_validation""} If ""no_validation"", validation is skipped for this parameter. If a dict, it must be a dictionary `param_name: list of constraints`. A parameter is valid if it satisfies one of the constraints from the list. Constraints can be: - an Interval object, representing a continuous or discrete range of numbers - the string ""array-like"" - the string ""sparse matrix"" - the string ""random_state"" - callable - None, meaning that None is a valid value for the parameter - any type, meaning that any instance of this type is valid - an Options object, representing a set of elements of a given type - a StrOptions object, representing a set of strings - the string ""boolean"" - the string ""verbose"" - the string ""cv_object"" - the string ""missing_values"" - a HasMethods object, representing method(s) an object must have - a Hidden object, representing a constraint not meant to be exposed to the user params : dict A dictionary `param_name: param_value`. The parameters to validate against the constraints. caller_name : str The name of the estimator or function or method that called this function. " 406,"def _get_module_collection_mode(mode_dict, name): mode = 'pyc' # Default mode # No settings available - return default. if not mode_dict: return mode # Search the parent modules/packages in top-down fashion, and take the last given setting. This ensures that # a setting given for the top-level package is recursively propagated to all its subpackages and submodules, # but also allows individual sub-modules to override the setting again. name_parts = name.split('.') for i in range(len(name_parts)): modlevel = ""."".join(name_parts[:i + 1]) modlevel_mode = mode_dict.get(modlevel, None) if modlevel_mode is not None: mode = modlevel_mode return mode "," Determine the module/package collection mode for the given module name , based on the provided collection mode settings dictionary. ",19,92,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_module_collection_mode(mode_dict, name): mode = 'pyc' # Default mode # No settings available - return default. if not mode_dict: return mode # Search the parent modules/packages in top-down fashion, and take the last given setting. This ensures that # a setting given for the top-level package is recursively propagated to all its subpackages and submodules, # but also allows individual sub-modules to override the setting again. name_parts = name.split('.') for i in range(len(name_parts)): modlevel = ""."".join(name_parts[:i + 1]) modlevel_mode = mode_dict.get(modlevel, None) if modlevel_mode is not None: mode = modlevel_mode return mode ``` ###Assistant : Determine the module/package collection mode for the given module name , based on the provided collection mode settings dictionary. " 407,"def exact_gaussian_kernel(x, y, stddev): r x_aligned, y_aligned = _align_matrices(x, y) diff_squared_l2_norm = tf.reduce_sum( tf.math.squared_difference(x_aligned, y_aligned), 2 ) return tf.exp(-diff_squared_l2_norm / (2 * stddev * stddev)) ","Computes exact Gaussian kernel value(s) for tensors x and y and stddev. The Gaussian kernel for vectors u, v is defined as follows: K(u, v) = exp(-||u-v||^2 / (2* stddev^2)) where the norm is the l2-norm. x, y can be either vectors or matrices. If they are vectors, they must have the same dimension. If they are matrices, they must have the same number of columns. In the latter case, the method returns (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and v is a row from y. Args: x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim]. y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim]. stddev: The width of the Gaussian kernel. Returns: A single value (scalar) with shape (1, 1) (if x, y are vectors) or a matrix of shape (m, n) with entries K(u, v) (where K is the Gaussian kernel) for all (u,v) pairs where u, v are rows from x and y respectively. Raises: ValueError: if the shapes of x, y are not compatible. ",196,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def exact_gaussian_kernel(x, y, stddev): r x_aligned, y_aligned = _align_matrices(x, y) diff_squared_l2_norm = tf.reduce_sum( tf.math.squared_difference(x_aligned, y_aligned), 2 ) return tf.exp(-diff_squared_l2_norm / (2 * stddev * stddev)) ``` ###Assistant : Computes exact Gaussian kernel value(s) for tensors x and y and stddev. The Gaussian kernel for vectors u, v is defined as follows: K(u, v) = exp(-||u-v||^2 / (2* stddev^2)) where the norm is the l2-norm. x, y can be either vectors or matrices. If they are vectors, they must have the same dimension. If they are matrices, they must have the same number of columns. In the latter case, the method returns (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and v is a row from y. Args: x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim]. y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim]. stddev: The width of the Gaussian kernel. Returns: A single value (scalar) with shape (1, 1) (if x, y are vectors) or a matrix of shape (m, n) with entries K(u, v) (where K is the Gaussian kernel) for all (u,v) pairs where u, v are rows from x and y respectively. Raises: ValueError: if the shapes of x, y are not compatible. " 408,"def enqueue_job(cls, func, name, obj_type, user, schedule_at=None, *args, **kwargs): job_result: JobResult = cls.objects.create( name=name, obj_type=obj_type, user=user, job_id=uuid.uuid4() ) queue = django_rq.get_queue(""default"") if schedule_at: job_result.status = JobResultStatusChoices.STATUS_SCHEDULED job_result.scheduled_time = schedule_at job_result.save() queue.enqueue_at(schedule_at, func, job_id=str(job_result.job_id), job_result=job_result, **kwargs) else: queue.enqueue(func, job_id=str(job_result.job_id), job_result=job_result, **kwargs) return job_result "," Create a JobResult instance and enqueue a job using the given callable func: The callable object to be enqueued for execution name: Name for the JobResult instance obj_type: ContentType to link to the JobResult instance obj_type user: User object to link to the JobResult instance schedule_at: Schedule the job to be executed at the passed date and time args: additional args passed to the callable kwargs: additional kargs passed to the callable ",72,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def enqueue_job(cls, func, name, obj_type, user, schedule_at=None, *args, **kwargs): job_result: JobResult = cls.objects.create( name=name, obj_type=obj_type, user=user, job_id=uuid.uuid4() ) queue = django_rq.get_queue(""default"") if schedule_at: job_result.status = JobResultStatusChoices.STATUS_SCHEDULED job_result.scheduled_time = schedule_at job_result.save() queue.enqueue_at(schedule_at, func, job_id=str(job_result.job_id), job_result=job_result, **kwargs) else: queue.enqueue(func, job_id=str(job_result.job_id), job_result=job_result, **kwargs) return job_result ``` ###Assistant : Create a JobResult instance and enqueue a job using the given callable func: The callable object to be enqueued for execution name: Name for the JobResult instance obj_type: ContentType to link to the JobResult instance obj_type user: User object to link to the JobResult instance schedule_at: Schedule the job to be executed at the passed date and time args: additional args passed to the callable kwargs: additional kargs passed to the callable " 409,"def text_style(self) -> Style: # TODO: Feels like there may be opportunity for caching here. style = Style() for node in reversed(self.ancestors): style += node.styles.text_style return style ","Get the text style object. A widget's style is influenced by its parent. For instance if a widgets background has an alpha, then its parent's background color will show through. Additionally, widgets will inherit their parent's text style (i.e. bold, italic etc). Returns: Style: Rich Style object. ",47,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def text_style(self) -> Style: # TODO: Feels like there may be opportunity for caching here. style = Style() for node in reversed(self.ancestors): style += node.styles.text_style return style ``` ###Assistant : Get the text style object. A widget's style is influenced by its parent. For instance if a widgets background has an alpha, then its parent's background color will show through. Additionally, widgets will inherit their parent's text style (i.e. bold, italic etc). Returns: Style: Rich Style object. " 410,"def get_app_list(self, request): app_dict = self._build_app_dict(request) # Sort the apps alphabetically. app_list = sorted(app_dict.values(), key=lambda x: x[""name""].lower()) # Sort the models alphabetically within each app. for app in app_list: app[""models""].sort(key=lambda x: x[""name""]) return app_list "," Return a sorted list of all the installed apps that have been registered in this site. ",16,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_app_list(self, request): app_dict = self._build_app_dict(request) # Sort the apps alphabetically. app_list = sorted(app_dict.values(), key=lambda x: x[""name""].lower()) # Sort the models alphabetically within each app. for app in app_list: app[""models""].sort(key=lambda x: x[""name""]) return app_list ``` ###Assistant : Return a sorted list of all the installed apps that have been registered in this site. " 411,"def find_library_file (self, dirs, lib, debug=0): raise NotImplementedError # -- Filename generation methods ----------------------------------- # The default implementation of the filename generating methods are # prejudiced towards the Unix/DOS/Windows view of the world: # * object files are named by replacing the source file extension # (eg. .c/.cpp -> .o/.obj) # * library files (shared or static) are named by plugging the # library name and extension into a format string, eg. # ""lib%s.%s"" % (lib_name, "".a"") for Unix static libraries # * executables are named by appending an extension (possibly # empty) to the program name: eg. progname + "".exe"" for # Windows # # To reduce redundant code, these methods expect to find # several attributes in the current object (presumably defined # as class attributes): # * src_extensions - # list of C/C++ source file extensions, eg. ['.c', '.cpp'] # * obj_extension - # object file extension, eg. '.o' or '.obj' # * static_lib_extension - # extension for static library files, eg. '.a' or '.lib' # * shared_lib_extension - # extension for shared library/object files, eg. '.so', '.dll' # * static_lib_format - # format string for generating static library filenames, # eg. 'lib%s.%s' or '%s.%s' # * shared_lib_format # format string for generating shared library filenames # (probably same as static_lib_format, since the extension # is one of the intended parameters to the format string) # * exe_extension - # extension for executable files, eg. '' or '.exe' ","Search the specified list of directories for a static or shared library file 'lib' and return the full path to that file. If 'debug' true, look for a debugging version (if that makes sense on the current platform). Return None if 'lib' wasn't found in any of the specified directories. ",50,241,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_library_file (self, dirs, lib, debug=0): raise NotImplementedError # -- Filename generation methods ----------------------------------- # The default implementation of the filename generating methods are # prejudiced towards the Unix/DOS/Windows view of the world: # * object files are named by replacing the source file extension # (eg. .c/.cpp -> .o/.obj) # * library files (shared or static) are named by plugging the # library name and extension into a format string, eg. # ""lib%s.%s"" % (lib_name, "".a"") for Unix static libraries # * executables are named by appending an extension (possibly # empty) to the program name: eg. progname + "".exe"" for # Windows # # To reduce redundant code, these methods expect to find # several attributes in the current object (presumably defined # as class attributes): # * src_extensions - # list of C/C++ source file extensions, eg. ['.c', '.cpp'] # * obj_extension - # object file extension, eg. '.o' or '.obj' # * static_lib_extension - # extension for static library files, eg. '.a' or '.lib' # * shared_lib_extension - # extension for shared library/object files, eg. '.so', '.dll' # * static_lib_format - # format string for generating static library filenames, # eg. 'lib%s.%s' or '%s.%s' # * shared_lib_format # format string for generating shared library filenames # (probably same as static_lib_format, since the extension # is one of the intended parameters to the format string) # * exe_extension - # extension for executable files, eg. '' or '.exe' ``` ###Assistant : Search the specified list of directories for a static or shared library file 'lib' and return the full path to that file. If 'debug' true, look for a debugging version (if that makes sense on the current platform). Return None if 'lib' wasn't found in any of the specified directories. " 412,"def test_simple_lock(self): # First to acquire this lock, so it should complete lock = self.get_success(self.store.try_acquire_lock(""name"", ""key"")) assert lock is not None # Enter the context manager self.get_success(lock.__aenter__()) # Attempting to acquire the lock again fails. lock2 = self.get_success(self.store.try_acquire_lock(""name"", ""key"")) self.assertIsNone(lock2) # Calling `is_still_valid` reports true. self.assertTrue(self.get_success(lock.is_still_valid())) # Drop the lock self.get_success(lock.__aexit__(None, None, None)) # We can now acquire the lock again. lock3 = self.get_success(self.store.try_acquire_lock(""name"", ""key"")) assert lock3 is not None self.get_success(lock3.__aenter__()) self.get_success(lock3.__aexit__(None, None, None)) ","Test that we can take out a lock and that while we hold it nobody else can take it out. ",20,74,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_simple_lock(self): # First to acquire this lock, so it should complete lock = self.get_success(self.store.try_acquire_lock(""name"", ""key"")) assert lock is not None # Enter the context manager self.get_success(lock.__aenter__()) # Attempting to acquire the lock again fails. lock2 = self.get_success(self.store.try_acquire_lock(""name"", ""key"")) self.assertIsNone(lock2) # Calling `is_still_valid` reports true. self.assertTrue(self.get_success(lock.is_still_valid())) # Drop the lock self.get_success(lock.__aexit__(None, None, None)) # We can now acquire the lock again. lock3 = self.get_success(self.store.try_acquire_lock(""name"", ""key"")) assert lock3 is not None self.get_success(lock3.__aenter__()) self.get_success(lock3.__aexit__(None, None, None)) ``` ###Assistant : Test that we can take out a lock and that while we hold it nobody else can take it out. " 413,"def set_family(self, family): if family is None: family = mpl.rcParams['font.family'] if isinstance(family, str): family = [family] self._family = family "," Change the font family. May be either an alias (generic name is CSS parlance), such as: 'serif', 'sans-serif', 'cursive', 'fantasy', or 'monospace', a real font name or a list of real font names. Real font names are not supported when :rc:`text.usetex` is `True`. Default: :rc:`font.family` ",45,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_family(self, family): if family is None: family = mpl.rcParams['font.family'] if isinstance(family, str): family = [family] self._family = family ``` ###Assistant : Change the font family. May be either an alias (generic name is CSS parlance), such as: 'serif', 'sans-serif', 'cursive', 'fantasy', or 'monospace', a real font name or a list of real font names. Real font names are not supported when :rc:`text.usetex` is `True`. Default: :rc:`font.family` " 414,"def update_ema(biased_ema, value, decay): biased_ema = biased_ema * decay + (1 - decay) * value return biased_ema "," calculate biased stat and unbiased stat in each step using exponential moving average method Parameters ---------- biased_ema : float previous stat value value : float current stat value decay : float the weight of previous stat value, larger means smoother curve Returns ------- float, float ",45,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update_ema(biased_ema, value, decay): biased_ema = biased_ema * decay + (1 - decay) * value return biased_ema ``` ###Assistant : calculate biased stat and unbiased stat in each step using exponential moving average method Parameters ---------- biased_ema : float previous stat value value : float current stat value decay : float the weight of previous stat value, larger means smoother curve Returns ------- float, float " 415,"def npartitions(n, verbose=False): n = int(n) if n < 0: return 0 if n <= 5: return [1, 1, 2, 3, 5, 7][n] if '_factor' not in globals(): _pre() # Estimate number of bits in p(n). This formula could be tidied pbits = int(( math.pi*(2*n/3.)**0.5 - math.log(4*n))/math.log(10) + 1) * \ math.log(10, 2) prec = p = int(pbits*1.1 + 100) s = fzero M = max(6, int(0.24*n**0.5 + 4)) if M > 10**5: raise ValueError(""Input too big"") # Corresponds to n > 1.7e11 sq23pi = mpf_mul(mpf_sqrt(from_rational(2, 3, p), p), mpf_pi(p), p) sqrt8 = mpf_sqrt(from_int(8), p) for q in range(1, M): a = _a(n, q, p) d = _d(n, q, p, sq23pi, sqrt8) s = mpf_add(s, mpf_mul(a, d), prec) if verbose: print(""step"", q, ""of"", M, to_str(a, 10), to_str(d, 10)) # On average, the terms decrease rapidly in magnitude. # Dynamically reducing the precision greatly improves # performance. p = bitcount(abs(to_int(d))) + 50 return int(to_int(mpf_add(s, fhalf, prec))) __all__ = ['npartitions'] "," Calculate the partition function P(n), i.e. the number of ways that n can be written as a sum of positive integers. P(n) is computed using the Hardy-Ramanujan-Rademacher formula [1]_. The correctness of this implementation has been tested through $10^10$. Examples ======== >>> from sympy.ntheory import npartitions >>> npartitions(25) 1958 References ========== .. [1] http://mathworld.wolfram.com/PartitionFunctionP.html ",54,158,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def npartitions(n, verbose=False): n = int(n) if n < 0: return 0 if n <= 5: return [1, 1, 2, 3, 5, 7][n] if '_factor' not in globals(): _pre() # Estimate number of bits in p(n). This formula could be tidied pbits = int(( math.pi*(2*n/3.)**0.5 - math.log(4*n))/math.log(10) + 1) * \ math.log(10, 2) prec = p = int(pbits*1.1 + 100) s = fzero M = max(6, int(0.24*n**0.5 + 4)) if M > 10**5: raise ValueError(""Input too big"") # Corresponds to n > 1.7e11 sq23pi = mpf_mul(mpf_sqrt(from_rational(2, 3, p), p), mpf_pi(p), p) sqrt8 = mpf_sqrt(from_int(8), p) for q in range(1, M): a = _a(n, q, p) d = _d(n, q, p, sq23pi, sqrt8) s = mpf_add(s, mpf_mul(a, d), prec) if verbose: print(""step"", q, ""of"", M, to_str(a, 10), to_str(d, 10)) # On average, the terms decrease rapidly in magnitude. # Dynamically reducing the precision greatly improves # performance. p = bitcount(abs(to_int(d))) + 50 return int(to_int(mpf_add(s, fhalf, prec))) __all__ = ['npartitions'] ``` ###Assistant : Calculate the partition function P(n), i.e. the number of ways that n can be written as a sum of positive integers. P(n) is computed using the Hardy-Ramanujan-Rademacher formula [1]_. The correctness of this implementation has been tested through $10^10$. Examples ======== >>> from sympy.ntheory import npartitions >>> npartitions(25) 1958 References ========== .. [1] http://mathworld.wolfram.com/PartitionFunctionP.html " 416,"def build_sleep_command(self) -> list[str]: docker_pull(self.args, self.config.image) inspect = docker_image_inspect(self.args, self.config.image) return ['sh', '-c', f'sleep 60; exec {shlex.join(inspect.cmd)}'] "," Build and return the command to put the container to sleep. The sleep duration below was selected to: - Allow enough time to perform necessary operations in the container before waking it. - Make the delay obvious if the wake command doesn't run or succeed. - Avoid hanging indefinitely or for an unreasonably long time. NOTE: The container must have a POSIX-compliant default shell ""sh"" with a non-builtin ""sleep"" command. ",70,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def build_sleep_command(self) -> list[str]: docker_pull(self.args, self.config.image) inspect = docker_image_inspect(self.args, self.config.image) return ['sh', '-c', f'sleep 60; exec {shlex.join(inspect.cmd)}'] ``` ###Assistant : Build and return the command to put the container to sleep. The sleep duration below was selected to: - Allow enough time to perform necessary operations in the container before waking it. - Make the delay obvious if the wake command doesn't run or succeed. - Avoid hanging indefinitely or for an unreasonably long time. NOTE: The container must have a POSIX-compliant default shell ""sh"" with a non-builtin ""sleep"" command. " 417,"def test_crash_only_one_worker_during_sampling_but_ignore(self): config = ( pg.PGConfig() .rollouts( num_rollout_workers=2, num_envs_per_worker=3, # Ignore worker failures (continue with worker #2). ignore_worker_failures=True, ) .environment( env=CartPoleCrashing, env_config={ # Crash prob=80%. ""p_crash"": 0.8, # Only crash on worker with index 1. ""crash_on_worker_indices"": [1], # Make sure nothing happens during pre-checks. ""skip_env_checking"": True, }, ) .debugging(worker_cls=ForwardHealthCheckToEnvWorker) ) # Pre-checking disables, so building the Algorithm is save. algo = config.build() # Expect some errors being logged here, but in general, should continue # as we ignore worker failures. algo.train() # One worker has been removed -> Only one left. self.assertEqual(algo.workers.num_healthy_remote_workers(), 1) algo.stop() ","Expect some sub-envs to fail (and not recover), but ignore.",10,94,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_crash_only_one_worker_during_sampling_but_ignore(self): config = ( pg.PGConfig() .rollouts( num_rollout_workers=2, num_envs_per_worker=3, # Ignore worker failures (continue with worker #2). ignore_worker_failures=True, ) .environment( env=CartPoleCrashing, env_config={ # Crash prob=80%. ""p_crash"": 0.8, # Only crash on worker with index 1. ""crash_on_worker_indices"": [1], # Make sure nothing happens during pre-checks. ""skip_env_checking"": True, }, ) .debugging(worker_cls=ForwardHealthCheckToEnvWorker) ) # Pre-checking disables, so building the Algorithm is save. algo = config.build() # Expect some errors being logged here, but in general, should continue # as we ignore worker failures. algo.train() # One worker has been removed -> Only one left. self.assertEqual(algo.workers.num_healthy_remote_workers(), 1) algo.stop() ``` ###Assistant : Expect some sub-envs to fail (and not recover), but ignore. " 418,"def test_ohe_infrequent_three_levels_drop_infrequent_errors(drop): X_train = np.array([[""a""] * 5 + [""b""] * 20 + [""c""] * 10 + [""d""] * 3]).T ohe = OneHotEncoder( handle_unknown=""infrequent_if_exist"", sparse=False, max_categories=3, drop=drop ) msg = f""Unable to drop category {drop[0]!r} from feature 0 because it is infrequent"" with pytest.raises(ValueError, match=msg): ohe.fit(X_train) ",Test three levels and dropping the infrequent category.,8,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_ohe_infrequent_three_levels_drop_infrequent_errors(drop): X_train = np.array([[""a""] * 5 + [""b""] * 20 + [""c""] * 10 + [""d""] * 3]).T ohe = OneHotEncoder( handle_unknown=""infrequent_if_exist"", sparse=False, max_categories=3, drop=drop ) msg = f""Unable to drop category {drop[0]!r} from feature 0 because it is infrequent"" with pytest.raises(ValueError, match=msg): ohe.fit(X_train) ``` ###Assistant : Test three levels and dropping the infrequent category. " 419,"def is_a_private_model(model): if model in PRIVATE_MODELS: return True # Wrapper, Encoder and Decoder are all privates if model.endswith(""Wrapper""): return True if model.endswith(""Encoder""): return True if model.endswith(""Decoder""): return True return False ",Returns True if the model should not be in the main init.,12,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_a_private_model(model): if model in PRIVATE_MODELS: return True # Wrapper, Encoder and Decoder are all privates if model.endswith(""Wrapper""): return True if model.endswith(""Encoder""): return True if model.endswith(""Decoder""): return True return False ``` ###Assistant : Returns True if the model should not be in the main init. " 420,"async def test_midnight_turnover_before_midnight_outside_period(hass): config = { ""binary_sensor"": [ {""platform"": ""tod"", ""name"": ""Night"", ""after"": ""22:00"", ""before"": ""5:00""} ] } await async_setup_component(hass, ""binary_sensor"", config) await hass.async_block_till_done() state = hass.states.get(""binary_sensor.night"") assert state.state == STATE_OFF @freeze_time(""2019-01-10 10:00:00-08:00"")",Test midnight turnover setting before midnight outside period.,8,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_midnight_turnover_before_midnight_outside_period(hass): config = { ""binary_sensor"": [ {""platform"": ""tod"", ""name"": ""Night"", ""after"": ""22:00"", ""before"": ""5:00""} ] } await async_setup_component(hass, ""binary_sensor"", config) await hass.async_block_till_done() state = hass.states.get(""binary_sensor.night"") assert state.state == STATE_OFF @freeze_time(""2019-01-10 10:00:00-08:00"") ``` ###Assistant : Test midnight turnover setting before midnight outside period. " 421,"def _check_if_cleared(self) -> None: if self._blocks is None: raise ValueError( ""This Dataset's blocks have been moved, which means that you "" ""can no longer use this Dataset."" ) ",Raise an error if this BlockList has been previously cleared.,10,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_if_cleared(self) -> None: if self._blocks is None: raise ValueError( ""This Dataset's blocks have been moved, which means that you "" ""can no longer use this Dataset."" ) ``` ###Assistant : Raise an error if this BlockList has been previously cleared. " 422,"def test_join_leave(self) -> None: channel = self.make_request(""GET"", ""/sync"", access_token=self.tok) self.assertEqual(channel.code, 200, channel.result) self.assertNotIn(self.excluded_room_id, channel.json_body[""rooms""][""join""]) self.assertIn(self.included_room_id, channel.json_body[""rooms""][""join""]) self.helper.leave(self.excluded_room_id, self.user_id, tok=self.tok) self.helper.leave(self.included_room_id, self.user_id, tok=self.tok) channel = self.make_request( ""GET"", ""/sync?since="" + channel.json_body[""next_batch""], access_token=self.tok, ) self.assertEqual(channel.code, 200, channel.result) self.assertNotIn(self.excluded_room_id, channel.json_body[""rooms""][""leave""]) self.assertIn(self.included_room_id, channel.json_body[""rooms""][""leave""]) ","Tests that rooms are correctly excluded from the 'join' and 'leave' sections of sync responses. ",15,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_join_leave(self) -> None: channel = self.make_request(""GET"", ""/sync"", access_token=self.tok) self.assertEqual(channel.code, 200, channel.result) self.assertNotIn(self.excluded_room_id, channel.json_body[""rooms""][""join""]) self.assertIn(self.included_room_id, channel.json_body[""rooms""][""join""]) self.helper.leave(self.excluded_room_id, self.user_id, tok=self.tok) self.helper.leave(self.included_room_id, self.user_id, tok=self.tok) channel = self.make_request( ""GET"", ""/sync?since="" + channel.json_body[""next_batch""], access_token=self.tok, ) self.assertEqual(channel.code, 200, channel.result) self.assertNotIn(self.excluded_room_id, channel.json_body[""rooms""][""leave""]) self.assertIn(self.included_room_id, channel.json_body[""rooms""][""leave""]) ``` ###Assistant : Tests that rooms are correctly excluded from the 'join' and 'leave' sections of sync responses. " 423,"def test_short_description_is_used_as_field_label(self): response = self.client.get(""/admin/modeladmintest/author/inspect/1/"") self.assertContains(response, ""Birth information"") self.assertNotContains(response, ""author_birth_string"") "," A custom field has been added to the inspect view's `inspect_view_fields` and since this field has a `short_description` we expect it to be used as the field's label, and not use the name of the function. ",36,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_short_description_is_used_as_field_label(self): response = self.client.get(""/admin/modeladmintest/author/inspect/1/"") self.assertContains(response, ""Birth information"") self.assertNotContains(response, ""author_birth_string"") ``` ###Assistant : A custom field has been added to the inspect view's `inspect_view_fields` and since this field has a `short_description` we expect it to be used as the field's label, and not use the name of the function. " 424,"def check_stock_uom_with_bin(item, stock_uom): if stock_uom == frappe.db.get_value(""Item"", item, ""stock_uom""): return ref_uom = frappe.db.get_value(""Stock Ledger Entry"", {""item_code"": item}, ""stock_uom"") if ref_uom: if cstr(ref_uom) != cstr(stock_uom): frappe.throw( _( ""Default Unit of Measure for Item {0} cannot be changed directly because you have already made some transaction(s) with another UOM. You will need to create a new Item to use a different Default UOM."" ).format(item) ) bin_list = frappe.db.sql( , (item, stock_uom), as_dict=1, ) if bin_list: frappe.throw( _( ""Default Unit of Measure for Item {0} cannot be changed directly because you have already made some transaction(s) with another UOM. You need to either cancel the linked documents or create a new Item."" ).format(item) ) # No SLE or documents against item. Bin UOM can be changed safely. frappe.db.sql(, (stock_uom, item)) "," select * from tabBin where item_code = %s and (reserved_qty > 0 or ordered_qty > 0 or indented_qty > 0 or planned_qty > 0) and stock_uom != %s update tabBin set stock_uom=%s where item_code=%s",34,127,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_stock_uom_with_bin(item, stock_uom): if stock_uom == frappe.db.get_value(""Item"", item, ""stock_uom""): return ref_uom = frappe.db.get_value(""Stock Ledger Entry"", {""item_code"": item}, ""stock_uom"") if ref_uom: if cstr(ref_uom) != cstr(stock_uom): frappe.throw( _( ""Default Unit of Measure for Item {0} cannot be changed directly because you have already made some transaction(s) with another UOM. You will need to create a new Item to use a different Default UOM."" ).format(item) ) bin_list = frappe.db.sql( , (item, stock_uom), as_dict=1, ) if bin_list: frappe.throw( _( ""Default Unit of Measure for Item {0} cannot be changed directly because you have already made some transaction(s) with another UOM. You need to either cancel the linked documents or create a new Item."" ).format(item) ) # No SLE or documents against item. Bin UOM can be changed safely. frappe.db.sql(, (stock_uom, item)) ``` ###Assistant : select * from tabBin where item_code = %s and (reserved_qty > 0 or ordered_qty > 0 or indented_qty > 0 or planned_qty > 0) and stock_uom != %s update tabBin set stock_uom=%s where item_code=%s " 425,"def fix_group_counters(): from django.db import connection click.echo(""Correcting Group.num_comments counter"") cursor = connection.cursor() cursor.execute( , [ActivityType.NOTE.value], ) @click.command() @click.option( ""--with-docs/--without-docs"", default=False, help=""Synchronize and repair embedded documentation. This "" ""is disabled by default."", ) @configuration"," UPDATE sentry_groupedmessage SET num_comments = ( SELECT COUNT(*) from sentry_activity WHERE type = %s and group_id = sentry_groupedmessage.id ) ",19,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fix_group_counters(): from django.db import connection click.echo(""Correcting Group.num_comments counter"") cursor = connection.cursor() cursor.execute( , [ActivityType.NOTE.value], ) @click.command() @click.option( ""--with-docs/--without-docs"", default=False, help=""Synchronize and repair embedded documentation. This "" ""is disabled by default."", ) @configuration ``` ###Assistant : UPDATE sentry_groupedmessage SET num_comments = ( SELECT COUNT(*) from sentry_activity WHERE type = %s and group_id = sentry_groupedmessage.id ) " 426,"def get_filter(query=None, params=None, parser_config_overrides=None): # NOTE: this function assumes project permissions check already happened parsed_terms = [] if query is not None: try: parsed_terms = parse_search_query( query, params=params, config_overrides=parser_config_overrides ) except ParseError as e: raise InvalidSearchQuery(f""Parse error: {e.expr.name} (column {e.column():d})"") kwargs = { ""start"": None, ""end"": None, ""conditions"": [], ""having"": [], ""user_id"": None, ""organization_id"": None, ""team_id"": [], ""project_ids"": [], ""group_ids"": [], ""condition_aggregates"": [], ""aliases"": params.get(""aliases"", {}) if params is not None else {}, } projects_to_filter = [] if any( isinstance(term, ParenExpression) or SearchBoolean.is_operator(term) for term in parsed_terms ): ( condition, having, found_projects_to_filter, group_ids, ) = convert_search_boolean_to_snuba_query(parsed_terms, params) if condition: and_conditions = flatten_condition_tree(condition, SNUBA_AND) for func in and_conditions: kwargs[""conditions""].append(convert_function_to_condition(func)) if having: kwargs[""condition_aggregates""] = [ term.key.name for term in parsed_terms if isinstance(term, AggregateFilter) ] and_having = flatten_condition_tree(having, SNUBA_AND) for func in and_having: kwargs[""having""].append(convert_function_to_condition(func)) if found_projects_to_filter: projects_to_filter = list(set(found_projects_to_filter)) if group_ids is not None: kwargs[""group_ids""].extend(list(set(group_ids))) else: projects_to_filter = set() for term in parsed_terms: if isinstance(term, SearchFilter): conditions, found_projects_to_filter, group_ids = format_search_filter(term, params) if len(conditions) > 0: kwargs[""conditions""].extend(conditions) if found_projects_to_filter: projects_to_filter.update(found_projects_to_filter) if group_ids is not None: kwargs[""group_ids""].extend(group_ids) elif isinstance(term, AggregateFilter): converted_filter = convert_aggregate_filter_to_snuba_query(term, params) kwargs[""condition_aggregates""].append(term.key.name) if converted_filter: kwargs[""having""].append(converted_filter) projects_to_filter = list(projects_to_filter) # Keys included as url params take precedent if same key is included in search # They are also considered safe and to have had access rules applied unlike conditions # from the query string. if params: for key in (""start"", ""end""): kwargs[key] = params.get(key, None) if ""user_id"" in params: kwargs[""user_id""] = params[""user_id""] if ""organization_id"" in params: kwargs[""organization_id""] = params[""organization_id""] if ""team_id"" in params: kwargs[""team_id""] = params[""team_id""] # OrganizationEndpoint.get_filter() uses project_id, but eventstore.Filter uses project_ids if ""project_id"" in params: if projects_to_filter: kwargs[""project_ids""] = projects_to_filter else: kwargs[""project_ids""] = params[""project_id""] if ""environment"" in params: term = SearchFilter(SearchKey(""environment""), ""="", SearchValue(params[""environment""])) kwargs[""conditions""].append(convert_search_filter_to_snuba_query(term)) if ""group_ids"" in params: kwargs[""group_ids""] = to_list(params[""group_ids""]) # Deprecated alias, use `group_ids` instead if ISSUE_ID_ALIAS in params: kwargs[""group_ids""] = to_list(params[""issue.id""]) return eventstore.Filter(**kwargs) "," Returns an eventstore filter given the search text provided by the user and URL params ",15,307,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_filter(query=None, params=None, parser_config_overrides=None): # NOTE: this function assumes project permissions check already happened parsed_terms = [] if query is not None: try: parsed_terms = parse_search_query( query, params=params, config_overrides=parser_config_overrides ) except ParseError as e: raise InvalidSearchQuery(f""Parse error: {e.expr.name} (column {e.column():d})"") kwargs = { ""start"": None, ""end"": None, ""conditions"": [], ""having"": [], ""user_id"": None, ""organization_id"": None, ""team_id"": [], ""project_ids"": [], ""group_ids"": [], ""condition_aggregates"": [], ""aliases"": params.get(""aliases"", {}) if params is not None else {}, } projects_to_filter = [] if any( isinstance(term, ParenExpression) or SearchBoolean.is_operator(term) for term in parsed_terms ): ( condition, having, found_projects_to_filter, group_ids, ) = convert_search_boolean_to_snuba_query(parsed_terms, params) if condition: and_conditions = flatten_condition_tree(condition, SNUBA_AND) for func in and_conditions: kwargs[""conditions""].append(convert_function_to_condition(func)) if having: kwargs[""condition_aggregates""] = [ term.key.name for term in parsed_terms if isinstance(term, AggregateFilter) ] and_having = flatten_condition_tree(having, SNUBA_AND) for func in and_having: kwargs[""having""].append(convert_function_to_condition(func)) if found_projects_to_filter: projects_to_filter = list(set(found_projects_to_filter)) if group_ids is not None: kwargs[""group_ids""].extend(list(set(group_ids))) else: projects_to_filter = set() for term in parsed_terms: if isinstance(term, SearchFilter): conditions, found_projects_to_filter, group_ids = format_search_filter(term, params) if len(conditions) > 0: kwargs[""conditions""].extend(conditions) if found_projects_to_filter: projects_to_filter.update(found_projects_to_filter) if group_ids is not None: kwargs[""group_ids""].extend(group_ids) elif isinstance(term, AggregateFilter): converted_filter = convert_aggregate_filter_to_snuba_query(term, params) kwargs[""condition_aggregates""].append(term.key.name) if converted_filter: kwargs[""having""].append(converted_filter) projects_to_filter = list(projects_to_filter) # Keys included as url params take precedent if same key is included in search # They are also considered safe and to have had access rules applied unlike conditions # from the query string. if params: for key in (""start"", ""end""): kwargs[key] = params.get(key, None) if ""user_id"" in params: kwargs[""user_id""] = params[""user_id""] if ""organization_id"" in params: kwargs[""organization_id""] = params[""organization_id""] if ""team_id"" in params: kwargs[""team_id""] = params[""team_id""] # OrganizationEndpoint.get_filter() uses project_id, but eventstore.Filter uses project_ids if ""project_id"" in params: if projects_to_filter: kwargs[""project_ids""] = projects_to_filter else: kwargs[""project_ids""] = params[""project_id""] if ""environment"" in params: term = SearchFilter(SearchKey(""environment""), ""="", SearchValue(params[""environment""])) kwargs[""conditions""].append(convert_search_filter_to_snuba_query(term)) if ""group_ids"" in params: kwargs[""group_ids""] = to_list(params[""group_ids""]) # Deprecated alias, use `group_ids` instead if ISSUE_ID_ALIAS in params: kwargs[""group_ids""] = to_list(params[""issue.id""]) return eventstore.Filter(**kwargs) ``` ###Assistant : Returns an eventstore filter given the search text provided by the user and URL params " 427,"def reset(self): self.result = {""encoding"": None, ""confidence"": 0.0, ""language"": None} self.done = False self._got_data = False self._has_win_bytes = False self._input_state = InputState.PURE_ASCII self._last_char = b"""" if self._esc_charset_prober: self._esc_charset_prober.reset() if self._utf1632_prober: self._utf1632_prober.reset() for prober in self._charset_probers: prober.reset() "," Reset the UniversalDetector and all of its probers back to their initial states. This is called by ``__init__``, so you only need to call this directly in between analyses of different documents. ",32,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def reset(self): self.result = {""encoding"": None, ""confidence"": 0.0, ""language"": None} self.done = False self._got_data = False self._has_win_bytes = False self._input_state = InputState.PURE_ASCII self._last_char = b"""" if self._esc_charset_prober: self._esc_charset_prober.reset() if self._utf1632_prober: self._utf1632_prober.reset() for prober in self._charset_probers: prober.reset() ``` ###Assistant : Reset the UniversalDetector and all of its probers back to their initial states. This is called by ``__init__``, so you only need to call this directly in between analyses of different documents. " 428,"def test_redis_socket_parsing(self): for input, expected in [ (None, (""redis://localhost:6379"", ""redis://localhost:6379"")), ( ""redis+socket:///run/redis/redis.sock"", ( ""redis+socket:///run/redis/redis.sock"", ""unix:///run/redis/redis.sock"", ), ), ( ""unix:///run/redis/redis.sock"", ( ""redis+socket:///run/redis/redis.sock"", ""unix:///run/redis/redis.sock"", ), ), ]: result = _parse_redis_url(input) self.assertTupleEqual(expected, result) "," GIVEN: - Various Redis connection URI formats WHEN: - The URI is parsed THEN: - Socket based URIs are translated - Non-socket URIs are unchanged - None provided uses default ",30,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_redis_socket_parsing(self): for input, expected in [ (None, (""redis://localhost:6379"", ""redis://localhost:6379"")), ( ""redis+socket:///run/redis/redis.sock"", ( ""redis+socket:///run/redis/redis.sock"", ""unix:///run/redis/redis.sock"", ), ), ( ""unix:///run/redis/redis.sock"", ( ""redis+socket:///run/redis/redis.sock"", ""unix:///run/redis/redis.sock"", ), ), ]: result = _parse_redis_url(input) self.assertTupleEqual(expected, result) ``` ###Assistant : GIVEN: - Various Redis connection URI formats WHEN: - The URI is parsed THEN: - Socket based URIs are translated - Non-socket URIs are unchanged - None provided uses default " 429,"def path_components(self) -> tuple[str, ...]: path = urllib.parse.urlparse(self.url).path # This needs to be a tuple so that it's immutable. # Otherwise, this would fail silently: # request.path_components.append(""foo"") return tuple(url.unquote(i) for i in path.split(""/"") if i) "," The URL's path components as a tuple of strings. Components are unquoted. ",12,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def path_components(self) -> tuple[str, ...]: path = urllib.parse.urlparse(self.url).path # This needs to be a tuple so that it's immutable. # Otherwise, this would fail silently: # request.path_components.append(""foo"") return tuple(url.unquote(i) for i in path.split(""/"") if i) ``` ###Assistant : The URL's path components as a tuple of strings. Components are unquoted. " 430,"def gen_batches(n, batch_size, *, min_batch_size=0): if not isinstance(batch_size, numbers.Integral): raise TypeError( ""gen_batches got batch_size=%s, must be an integer"" % batch_size ) if batch_size <= 0: raise ValueError(""gen_batches got batch_size=%s, must be positive"" % batch_size) start = 0 for _ in range(int(n // batch_size)): end = start + batch_size if end + min_batch_size > n: continue yield slice(start, end) start = end if start < n: yield slice(start, n) ","Generator to create slices containing `batch_size` elements from 0 to `n`. The last slice may contain less than `batch_size` elements, when `batch_size` does not divide `n`. Parameters ---------- n : int Size of the sequence. batch_size : int Number of elements in each batch. min_batch_size : int, default=0 Minimum number of elements in each batch. Yields ------ slice of `batch_size` elements See Also -------- gen_even_slices: Generator to create n_packs slices going up to n. Examples -------- >>> from sklearn.utils import gen_batches >>> list(gen_batches(7, 3)) [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] >>> list(gen_batches(6, 3)) [slice(0, 3, None), slice(3, 6, None)] >>> list(gen_batches(2, 3)) [slice(0, 2, None)] >>> list(gen_batches(7, 3, min_batch_size=0)) [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] >>> list(gen_batches(7, 3, min_batch_size=2)) [slice(0, 3, None), slice(3, 7, None)] ",131,68,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def gen_batches(n, batch_size, *, min_batch_size=0): if not isinstance(batch_size, numbers.Integral): raise TypeError( ""gen_batches got batch_size=%s, must be an integer"" % batch_size ) if batch_size <= 0: raise ValueError(""gen_batches got batch_size=%s, must be positive"" % batch_size) start = 0 for _ in range(int(n // batch_size)): end = start + batch_size if end + min_batch_size > n: continue yield slice(start, end) start = end if start < n: yield slice(start, n) ``` ###Assistant : Generator to create slices containing `batch_size` elements from 0 to `n`. The last slice may contain less than `batch_size` elements, when `batch_size` does not divide `n`. Parameters ---------- n : int Size of the sequence. batch_size : int Number of elements in each batch. min_batch_size : int, default=0 Minimum number of elements in each batch. Yields ------ slice of `batch_size` elements See Also -------- gen_even_slices: Generator to create n_packs slices going up to n. Examples -------- >>> from sklearn.utils import gen_batches >>> list(gen_batches(7, 3)) [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] >>> list(gen_batches(6, 3)) [slice(0, 3, None), slice(3, 6, None)] >>> list(gen_batches(2, 3)) [slice(0, 2, None)] >>> list(gen_batches(7, 3, min_batch_size=0)) [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] >>> list(gen_batches(7, 3, min_batch_size=2)) [slice(0, 3, None), slice(3, 7, None)] " 431,"def test_condinst_maskhead_loss(self): s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'pad_shape': (s, s, 3), 'scale_factor': 1, }] condinst_bboxhead = CondInstBboxHead( num_classes=4, in_channels=1, feat_channels=1, stacked_convs=1, norm_cfg=None) mask_feature_head = _fake_mask_feature_head() condinst_maskhead = CondInstMaskHead( mask_feature_head=mask_feature_head, loss_mask=dict( type='DiceLoss', use_sigmoid=True, activate=True, eps=5e-6, loss_weight=1.0)) # Fcos head expects a multiple levels of features per image feats = [] for i in range(len(condinst_bboxhead.strides)): feats.append( torch.rand(1, 1, s // (2**(i + 3)), s // (2**(i + 3)))) feats = tuple(feats) cls_scores, bbox_preds, centernesses, param_preds =\ condinst_bboxhead.forward(feats) # Test that empty ground truth encourages the network to # predict background gt_instances = InstanceData() gt_instances.bboxes = torch.empty((0, 4)) gt_instances.labels = torch.LongTensor([]) gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s) _ = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds, centernesses, param_preds, [gt_instances], img_metas) # When truth is empty then all mask loss # should be zero for random inputs positive_infos = condinst_bboxhead.get_positive_infos() mask_outs = condinst_maskhead.forward(feats, positive_infos) empty_gt_mask_losses = condinst_maskhead.loss_by_feat( *mask_outs, [gt_instances], img_metas, positive_infos) loss_mask = empty_gt_mask_losses['loss_mask'] self.assertEqual(loss_mask, 0, 'mask loss should be zero') # When truth is non-empty then all cls, box loss and centerness loss # should be nonzero for random inputs gt_instances = InstanceData() gt_instances.bboxes = torch.Tensor( [[23.6667, 23.8757, 238.6326, 151.8874]]) gt_instances.labels = torch.LongTensor([2]) gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s) _ = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds, centernesses, param_preds, [gt_instances], img_metas) positive_infos = condinst_bboxhead.get_positive_infos() mask_outs = condinst_maskhead.forward(feats, positive_infos) one_gt_mask_losses = condinst_maskhead.loss_by_feat( *mask_outs, [gt_instances], img_metas, positive_infos) loss_mask = one_gt_mask_losses['loss_mask'] self.assertGreater(loss_mask, 0, 'mask loss should be nonzero') ",Tests condinst maskhead loss when truth is empty and non-empty.,10,228,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_condinst_maskhead_loss(self): s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'pad_shape': (s, s, 3), 'scale_factor': 1, }] condinst_bboxhead = CondInstBboxHead( num_classes=4, in_channels=1, feat_channels=1, stacked_convs=1, norm_cfg=None) mask_feature_head = _fake_mask_feature_head() condinst_maskhead = CondInstMaskHead( mask_feature_head=mask_feature_head, loss_mask=dict( type='DiceLoss', use_sigmoid=True, activate=True, eps=5e-6, loss_weight=1.0)) # Fcos head expects a multiple levels of features per image feats = [] for i in range(len(condinst_bboxhead.strides)): feats.append( torch.rand(1, 1, s // (2**(i + 3)), s // (2**(i + 3)))) feats = tuple(feats) cls_scores, bbox_preds, centernesses, param_preds =\ condinst_bboxhead.forward(feats) # Test that empty ground truth encourages the network to # predict background gt_instances = InstanceData() gt_instances.bboxes = torch.empty((0, 4)) gt_instances.labels = torch.LongTensor([]) gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s) _ = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds, centernesses, param_preds, [gt_instances], img_metas) # When truth is empty then all mask loss # should be zero for random inputs positive_infos = condinst_bboxhead.get_positive_infos() mask_outs = condinst_maskhead.forward(feats, positive_infos) empty_gt_mask_losses = condinst_maskhead.loss_by_feat( *mask_outs, [gt_instances], img_metas, positive_infos) loss_mask = empty_gt_mask_losses['loss_mask'] self.assertEqual(loss_mask, 0, 'mask loss should be zero') # When truth is non-empty then all cls, box loss and centerness loss # should be nonzero for random inputs gt_instances = InstanceData() gt_instances.bboxes = torch.Tensor( [[23.6667, 23.8757, 238.6326, 151.8874]]) gt_instances.labels = torch.LongTensor([2]) gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s) _ = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds, centernesses, param_preds, [gt_instances], img_metas) positive_infos = condinst_bboxhead.get_positive_infos() mask_outs = condinst_maskhead.forward(feats, positive_infos) one_gt_mask_losses = condinst_maskhead.loss_by_feat( *mask_outs, [gt_instances], img_metas, positive_infos) loss_mask = one_gt_mask_losses['loss_mask'] self.assertGreater(loss_mask, 0, 'mask loss should be nonzero') ``` ###Assistant : Tests condinst maskhead loss when truth is empty and non-empty. " 432,"def test_ContinuousSelector_2(): cs = ContinuousSelector(threshold=5, svd_solver='randomized') X_transformed = cs.transform(iris_data[0:16, :]) assert_equal(X_transformed.shape[1],3) ",Assert that ContinuousSelector works as expected with threshold=5.,8,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_ContinuousSelector_2(): cs = ContinuousSelector(threshold=5, svd_solver='randomized') X_transformed = cs.transform(iris_data[0:16, :]) assert_equal(X_transformed.shape[1],3) ``` ###Assistant : Assert that ContinuousSelector works as expected with threshold=5. " 433,"def test_add_view(self): add_dict = { ""title"": ""Døm ikke"", ""content"": ""

great article

"", ""date_0"": ""2008-03-18"", ""date_1"": ""10:54:39"", ""section"": self.s1.pk, } # Change User should not have access to add articles self.client.force_login(self.changeuser) # make sure the view removes test cookie self.assertIs(self.client.session.test_cookie_worked(), False) response = self.client.get(reverse(""admin:admin_views_article_add"")) self.assertEqual(response.status_code, 403) # Try POST just to make sure post = self.client.post(reverse(""admin:admin_views_article_add""), add_dict) self.assertEqual(post.status_code, 403) self.assertEqual(Article.objects.count(), 3) self.client.get(reverse(""admin:logout"")) # View User should not have access to add articles self.client.force_login(self.viewuser) response = self.client.get(reverse(""admin:admin_views_article_add"")) self.assertEqual(response.status_code, 403) # Try POST just to make sure post = self.client.post(reverse(""admin:admin_views_article_add""), add_dict) self.assertEqual(post.status_code, 403) self.assertEqual(Article.objects.count(), 3) # Now give the user permission to add but not change. self.viewuser.user_permissions.add( get_perm(Article, get_permission_codename(""add"", Article._meta)) ) response = self.client.get(reverse(""admin:admin_views_article_add"")) self.assertEqual(response.context[""title""], ""Add article"") self.assertContains(response, ""Add article | Django site admin"") self.assertContains( response, '' ) post = self.client.post( reverse(""admin:admin_views_article_add""), add_dict, follow=False ) self.assertEqual(post.status_code, 302) self.assertEqual(Article.objects.count(), 4) article = Article.objects.latest(""pk"") response = self.client.get( reverse(""admin:admin_views_article_change"", args=(article.pk,)) ) self.assertContains( response, '
  • The article “Døm ikke” was added successfully.
  • ', ) article.delete() self.client.get(reverse(""admin:logout"")) # Add user may login and POST to add view, then redirect to admin root self.client.force_login(self.adduser) addpage = self.client.get(reverse(""admin:admin_views_article_add"")) change_list_link = '›
    Articles' % reverse( ""admin:admin_views_article_changelist"" ) self.assertNotContains( addpage, change_list_link, msg_prefix=""User restricted to add permission is given link to change list view in breadcrumbs."", ) post = self.client.post(reverse(""admin:admin_views_article_add""), add_dict) self.assertRedirects(post, self.index_url) self.assertEqual(Article.objects.count(), 4) self.assertEqual(len(mail.outbox), 2) self.assertEqual(mail.outbox[0].subject, ""Greetings from a created object"") self.client.get(reverse(""admin:logout"")) # The addition was logged correctly addition_log = LogEntry.objects.all()[0] new_article = Article.objects.last() article_ct = ContentType.objects.get_for_model(Article) self.assertEqual(addition_log.user_id, self.adduser.pk) self.assertEqual(addition_log.content_type_id, article_ct.pk) self.assertEqual(addition_log.object_id, str(new_article.pk)) self.assertEqual(addition_log.object_repr, ""Døm ikke"") self.assertEqual(addition_log.action_flag, ADDITION) self.assertEqual(addition_log.get_change_message(), ""Added."") # Super can add too, but is redirected to the change list view self.client.force_login(self.superuser) addpage = self.client.get(reverse(""admin:admin_views_article_add"")) self.assertContains( addpage, change_list_link, msg_prefix=""Unrestricted user is not given link to change list view in breadcrumbs."", ) post = self.client.post(reverse(""admin:admin_views_article_add""), add_dict) self.assertRedirects(post, reverse(""admin:admin_views_article_changelist"")) self.assertEqual(Article.objects.count(), 5) self.client.get(reverse(""admin:logout"")) # 8509 - if a normal user is already logged in, it is possible # to change user into the superuser without error self.client.force_login(self.joepublicuser) # Check and make sure that if user expires, data still persists self.client.force_login(self.superuser) # make sure the view removes test cookie self.assertIs(self.client.session.test_cookie_worked(), False) ",Test add view restricts access and actually adds items.,9,342,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_add_view(self): add_dict = { ""title"": ""Døm ikke"", ""content"": ""

    great article

    "", ""date_0"": ""2008-03-18"", ""date_1"": ""10:54:39"", ""section"": self.s1.pk, } # Change User should not have access to add articles self.client.force_login(self.changeuser) # make sure the view removes test cookie self.assertIs(self.client.session.test_cookie_worked(), False) response = self.client.get(reverse(""admin:admin_views_article_add"")) self.assertEqual(response.status_code, 403) # Try POST just to make sure post = self.client.post(reverse(""admin:admin_views_article_add""), add_dict) self.assertEqual(post.status_code, 403) self.assertEqual(Article.objects.count(), 3) self.client.get(reverse(""admin:logout"")) # View User should not have access to add articles self.client.force_login(self.viewuser) response = self.client.get(reverse(""admin:admin_views_article_add"")) self.assertEqual(response.status_code, 403) # Try POST just to make sure post = self.client.post(reverse(""admin:admin_views_article_add""), add_dict) self.assertEqual(post.status_code, 403) self.assertEqual(Article.objects.count(), 3) # Now give the user permission to add but not change. self.viewuser.user_permissions.add( get_perm(Article, get_permission_codename(""add"", Article._meta)) ) response = self.client.get(reverse(""admin:admin_views_article_add"")) self.assertEqual(response.context[""title""], ""Add article"") self.assertContains(response, ""Add article | Django site admin"") self.assertContains( response, '' ) post = self.client.post( reverse(""admin:admin_views_article_add""), add_dict, follow=False ) self.assertEqual(post.status_code, 302) self.assertEqual(Article.objects.count(), 4) article = Article.objects.latest(""pk"") response = self.client.get( reverse(""admin:admin_views_article_change"", args=(article.pk,)) ) self.assertContains( response, '
  • The article “Døm ikke” was added successfully.
  • ', ) article.delete() self.client.get(reverse(""admin:logout"")) # Add user may login and POST to add view, then redirect to admin root self.client.force_login(self.adduser) addpage = self.client.get(reverse(""admin:admin_views_article_add"")) change_list_link = '› Articles' % reverse( ""admin:admin_views_article_changelist"" ) self.assertNotContains( addpage, change_list_link, msg_prefix=""User restricted to add permission is given link to change list view in breadcrumbs."", ) post = self.client.post(reverse(""admin:admin_views_article_add""), add_dict) self.assertRedirects(post, self.index_url) self.assertEqual(Article.objects.count(), 4) self.assertEqual(len(mail.outbox), 2) self.assertEqual(mail.outbox[0].subject, ""Greetings from a created object"") self.client.get(reverse(""admin:logout"")) # The addition was logged correctly addition_log = LogEntry.objects.all()[0] new_article = Article.objects.last() article_ct = ContentType.objects.get_for_model(Article) self.assertEqual(addition_log.user_id, self.adduser.pk) self.assertEqual(addition_log.content_type_id, article_ct.pk) self.assertEqual(addition_log.object_id, str(new_article.pk)) self.assertEqual(addition_log.object_repr, ""Døm ikke"") self.assertEqual(addition_log.action_flag, ADDITION) self.assertEqual(addition_log.get_change_message(), ""Added."") # Super can add too, but is redirected to the change list view self.client.force_login(self.superuser) addpage = self.client.get(reverse(""admin:admin_views_article_add"")) self.assertContains( addpage, change_list_link, msg_prefix=""Unrestricted user is not given link to change list view in breadcrumbs."", ) post = self.client.post(reverse(""admin:admin_views_article_add""), add_dict) self.assertRedirects(post, reverse(""admin:admin_views_article_changelist"")) self.assertEqual(Article.objects.count(), 5) self.client.get(reverse(""admin:logout"")) # 8509 - if a normal user is already logged in, it is possible # to change user into the superuser without error self.client.force_login(self.joepublicuser) # Check and make sure that if user expires, data still persists self.client.force_login(self.superuser) # make sure the view removes test cookie self.assertIs(self.client.session.test_cookie_worked(), False) ``` ###Assistant : Test add view restricts access and actually adds items. " 434,"def install(collection, path, artifacts_manager): # FIXME: mv to dataclasses? # type: (Candidate, str, ConcreteArtifactsManager) -> None b_artifact_path = ( artifacts_manager.get_artifact_path if collection.is_concrete_artifact else artifacts_manager.get_galaxy_artifact_path )(collection) collection_path = os.path.join(path, collection.namespace, collection.name) b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict') display.display( u""Installing '{coll!s}' to '{path!s}'"". format(coll=to_text(collection), path=collection_path), ) if os.path.exists(b_collection_path): shutil.rmtree(b_collection_path) if collection.is_dir: install_src(collection, b_artifact_path, b_collection_path, artifacts_manager) else: install_artifact( b_artifact_path, b_collection_path, artifacts_manager._b_working_directory, collection.signatures, artifacts_manager.keyring ) if (collection.is_online_index_pointer and isinstance(collection.src, GalaxyAPI)): write_source_metadata( collection, b_collection_path, artifacts_manager ) display.display( '{coll!s} was installed successfully'. format(coll=to_text(collection)), ) ","Install a collection under a given path. :param collection: Collection to be installed. :param path: Collection dirs layout path. :param artifacts_manager: Artifacts manager. ",23,76,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def install(collection, path, artifacts_manager): # FIXME: mv to dataclasses? # type: (Candidate, str, ConcreteArtifactsManager) -> None b_artifact_path = ( artifacts_manager.get_artifact_path if collection.is_concrete_artifact else artifacts_manager.get_galaxy_artifact_path )(collection) collection_path = os.path.join(path, collection.namespace, collection.name) b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict') display.display( u""Installing '{coll!s}' to '{path!s}'"". format(coll=to_text(collection), path=collection_path), ) if os.path.exists(b_collection_path): shutil.rmtree(b_collection_path) if collection.is_dir: install_src(collection, b_artifact_path, b_collection_path, artifacts_manager) else: install_artifact( b_artifact_path, b_collection_path, artifacts_manager._b_working_directory, collection.signatures, artifacts_manager.keyring ) if (collection.is_online_index_pointer and isinstance(collection.src, GalaxyAPI)): write_source_metadata( collection, b_collection_path, artifacts_manager ) display.display( '{coll!s} was installed successfully'. format(coll=to_text(collection)), ) ``` ###Assistant : Install a collection under a given path. :param collection: Collection to be installed. :param path: Collection dirs layout path. :param artifacts_manager: Artifacts manager. " 435,"def edit(self, parameter_s='',last_call=['','']): opts,args = self.parse_options(parameter_s,'prxn:') try: filename, lineno, is_temp = self._find_edit_target(self.shell, args, opts, last_call) except MacroToEdit as e: self._edit_macro(args, e.args[0]) return except InteractivelyDefined as e: print(""Editing In[%i]"" % e.index) args = str(e.index) filename, lineno, is_temp = self._find_edit_target(self.shell, args, opts, last_call) if filename is None: # nothing was found, warnings have already been issued, # just give up. return if is_temp: self._knowntemps.add(filename) elif (filename in self._knowntemps): is_temp = True # do actual editing here print('Editing...', end=' ') sys.stdout.flush() filepath = Path(filename) try: # Quote filenames that may have spaces in them when opening # the editor quoted = filename = str(filepath.absolute()) if "" "" in quoted: quoted = ""'%s'"" % quoted self.shell.hooks.editor(quoted, lineno) except TryNext: warn('Could not open editor') return # XXX TODO: should this be generalized for all string vars? # For now, this is special-cased to blocks created by cpaste if args.strip() == ""pasted_block"": self.shell.user_ns[""pasted_block""] = filepath.read_text(encoding='utf-8') if 'x' in opts: # -x prevents actual execution print() else: print('done. Executing edited code...') with preserve_keys(self.shell.user_ns, '__file__'): if not is_temp: self.shell.user_ns['__file__'] = filename if 'r' in opts: # Untranslated IPython code source = filepath.read_text(encoding='utf-8') self.shell.run_cell(source, store_history=False) else: self.shell.safe_execfile(filename, self.shell.user_ns, self.shell.user_ns) if is_temp: try: return filepath.read_text(encoding='utf-8') except IOError as msg: if Path(msg.filename) == filepath: warn('File not found. Did you forget to save?') return else: self.shell.showtraceback() ","Bring up an editor and execute the resulting code. Usage: %edit [options] [args] %edit runs IPython's editor hook. The default version of this hook is set to call the editor specified by your $EDITOR environment variable. If this isn't found, it will default to vi under Linux/Unix and to notepad under Windows. See the end of this docstring for how to change the editor hook. You can also set the value of this editor via the ``TerminalInteractiveShell.editor`` option in your configuration file. This is useful if you wish to use a different editor from your typical default with IPython (and for Windows users who typically don't set environment variables). This command allows you to conveniently edit multi-line code right in your IPython session. If called without arguments, %edit opens up an empty editor with a temporary file and will execute the contents of this file when you close it (don't forget to save it!). Options: -n : open the editor at a specified line number. By default, the IPython editor hook uses the unix syntax 'editor +N filename', but you can configure this by providing your own modified hook if your favorite editor supports line-number specifications with a different syntax. -p: this will call the editor with the same data as the previous time it was used, regardless of how long ago (in your current session) it was. -r: use 'raw' input. This option only applies to input taken from the user's history. By default, the 'processed' history is used, so that magics are loaded in their transformed version to valid Python. If this option is given, the raw input as typed as the command line is used instead. When you exit the editor, it will be executed by IPython's own processor. -x: do not execute the edited code immediately upon exit. This is mainly useful if you are editing programs which need to be called with command line arguments, which you can then do using %run. Arguments: If arguments are given, the following possibilities exist: - If the argument is a filename, IPython will load that into the editor. It will execute its contents with execfile() when you exit, loading any code in the file into your interactive namespace. - The arguments are ranges of input history, e.g. ""7 ~1/4-6"". The syntax is the same as in the %history magic. - If the argument is a string variable, its contents are loaded into the editor. You can thus edit any string which contains python code (including the result of previous edits). - If the argument is the name of an object (other than a string), IPython will try to locate the file where it was defined and open the editor at the point where it is defined. You can use `%edit function` to load an editor exactly at the point where 'function' is defined, edit it and have the file be executed automatically. - If the object is a macro (see %macro for details), this opens up your specified editor with a temporary file containing the macro's data. Upon exit, the macro is reloaded with the contents of the file. Note: opening at an exact line is only supported under Unix, and some editors (like kedit and gedit up to Gnome 2.8) do not understand the '+NUMBER' parameter necessary for this feature. Good editors like (X)Emacs, vi, jed, pico and joe all do. After executing your code, %edit will return as output the code you typed in the editor (except when it was an existing file). This way you can reload the code in further invocations of %edit as a variable, via _ or Out[], where is the prompt number of the output. Note that %edit is also available through the alias %ed. This is an example of creating a simple function inside the editor and then modifying it. First, start up the editor:: In [1]: edit Editing... done. Executing edited code... Out[1]: 'def foo():\\n print ""foo() was defined in an editing session""\\n' We can then call the function foo():: In [2]: foo() foo() was defined in an editing session Now we edit foo. IPython automatically loads the editor with the (temporary) file where foo() was previously defined:: In [3]: edit foo Editing... done. Executing edited code... And if we call foo() again we get the modified version:: In [4]: foo() foo() has now been changed! Here is an example of how to edit a code snippet successive times. First we call the editor:: In [5]: edit Editing... done. Executing edited code... hello Out[5]: ""print 'hello'\\n"" Now we call it again with the previous output (stored in _):: In [6]: edit _ Editing... done. Executing edited code... hello world Out[6]: ""print 'hello world'\\n"" Now we call it with the output #8 (stored in _8, also as Out[8]):: In [7]: edit _8 Editing... done. Executing edited code... hello again Out[7]: ""print 'hello again'\\n"" Changing the default editor hook: If you wish to write your own editor hook, you can put it in a configuration file which you load at startup time. The default hook is defined in the IPython.core.hooks module, and you can use that as a starting example for further modifications. That file also has general instructions on how to set a new hook for use once you've defined it.",882,214,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def edit(self, parameter_s='',last_call=['','']): opts,args = self.parse_options(parameter_s,'prxn:') try: filename, lineno, is_temp = self._find_edit_target(self.shell, args, opts, last_call) except MacroToEdit as e: self._edit_macro(args, e.args[0]) return except InteractivelyDefined as e: print(""Editing In[%i]"" % e.index) args = str(e.index) filename, lineno, is_temp = self._find_edit_target(self.shell, args, opts, last_call) if filename is None: # nothing was found, warnings have already been issued, # just give up. return if is_temp: self._knowntemps.add(filename) elif (filename in self._knowntemps): is_temp = True # do actual editing here print('Editing...', end=' ') sys.stdout.flush() filepath = Path(filename) try: # Quote filenames that may have spaces in them when opening # the editor quoted = filename = str(filepath.absolute()) if "" "" in quoted: quoted = ""'%s'"" % quoted self.shell.hooks.editor(quoted, lineno) except TryNext: warn('Could not open editor') return # XXX TODO: should this be generalized for all string vars? # For now, this is special-cased to blocks created by cpaste if args.strip() == ""pasted_block"": self.shell.user_ns[""pasted_block""] = filepath.read_text(encoding='utf-8') if 'x' in opts: # -x prevents actual execution print() else: print('done. Executing edited code...') with preserve_keys(self.shell.user_ns, '__file__'): if not is_temp: self.shell.user_ns['__file__'] = filename if 'r' in opts: # Untranslated IPython code source = filepath.read_text(encoding='utf-8') self.shell.run_cell(source, store_history=False) else: self.shell.safe_execfile(filename, self.shell.user_ns, self.shell.user_ns) if is_temp: try: return filepath.read_text(encoding='utf-8') except IOError as msg: if Path(msg.filename) == filepath: warn('File not found. Did you forget to save?') return else: self.shell.showtraceback() ``` ###Assistant : Bring up an editor and execute the resulting code. Usage: %edit [options] [args] %edit runs IPython's editor hook. The default version of this hook is set to call the editor specified by your $EDITOR environment variable. If this isn't found, it will default to vi under Linux/Unix and to notepad under Windows. See the end of this docstring for how to change the editor hook. You can also set the value of this editor via the ``TerminalInteractiveShell.editor`` option in your configuration file. This is useful if you wish to use a different editor from your typical default with IPython (and for Windows users who typically don't set environment variables). This command allows you to conveniently edit multi-line code right in your IPython session. If called without arguments, %edit opens up an empty editor with a temporary file and will execute the contents of this file when you close it (don't forget to save it!). Options: -n : open the editor at a specified line number. By default, the IPython editor hook uses the unix syntax 'editor +N filename', but you can configure this by providing your own modified hook if your favorite editor supports line-number specifications with a different syntax. -p: this will call the editor with the same data as the previous time it was used, regardless of how long ago (in your current session) it was. -r: use 'raw' input. This option only applies to input taken from the user's history. By default, the 'processed' history is used, so that magics are loaded in their transformed version to valid Python. If this option is given, the raw input as typed as the command line is used instead. When you exit the editor, it will be executed by IPython's own processor. -x: do not execute the edited code immediately upon exit. This is mainly useful if you are editing programs which need to be called with command line arguments, which you can then do using %run. Arguments: If arguments are given, the following possibilities exist: - If the argument is a filename, IPython will load that into the editor. It will execute its contents with execfile() when you exit, loading any code in the file into your interactive namespace. - The arguments are ranges of input history, e.g. ""7 ~1/4-6"". The syntax is the same as in the %history magic. - If the argument is a string variable, its contents are loaded into the editor. You can thus edit any string which contains python code (including the result of previous edits). - If the argument is the name of an object (other than a string), IPython will try to locate the file where it was defined and open the editor at the point where it is defined. You can use `%edit function` to load an editor exactly at the point where 'function' is defined, edit it and have the file be executed automatically. - If the object is a macro (see %macro for details), this opens up your specified editor with a temporary file containing the macro's data. Upon exit, the macro is reloaded with the contents of the file. Note: opening at an exact line is only supported under Unix, and some editors (like kedit and gedit up to Gnome 2.8) do not understand the '+NUMBER' parameter necessary for this feature. Good editors like (X)Emacs, vi, jed, pico and joe all do. After executing your code, %edit will return as output the code you typed in the editor (except when it was an existing file). This way you can reload the code in further invocations of %edit as a variable, via _ or Out[], where is the prompt number of the output. Note that %edit is also available through the alias %ed. This is an example of creating a simple function inside the editor and then modifying it. First, start up the editor:: In [1]: edit Editing... done. Executing edited code... Out[1]: 'def foo():\\n print ""foo() was defined in an editing session""\\n' We can then call the function foo():: In [2]: foo() foo() was defined in an editing session Now we edit foo. IPython automatically loads the editor with the (temporary) file where foo() was previously defined:: In [3]: edit foo Editing... done. Executing edited code... And if we call foo() again we get the modified version:: In [4]: foo() foo() has now been changed! Here is an example of how to edit a code snippet successive times. First we call the editor:: In [5]: edit Editing... done. Executing edited code... hello Out[5]: ""print 'hello'\\n"" Now we call it again with the previous output (stored in _):: In [6]: edit _ Editing... done. Executing edited code... hello world Out[6]: ""print 'hello world'\\n"" Now we call it with the output #8 (stored in _8, also as Out[8]):: In [7]: edit _8 Editing... done. Executing edited code... hello again Out[7]: ""print 'hello again'\\n"" Changing the default editor hook: If you wish to write your own editor hook, you can put it in a configuration file which you load at startup time. The default hook is defined in the IPython.core.hooks module, and you can use that as a starting example for further modifications. That file also has general instructions on how to set a new hook for use once you've defined it. " 436,"def cell_length(self) -> int: # Done on demand and cached, as this is an O(n) operation if self._cell_length is None: self._cell_length = Segment.get_line_length(self._segments) return self._cell_length ",Get the number of cells required to render this object.,10,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cell_length(self) -> int: # Done on demand and cached, as this is an O(n) operation if self._cell_length is None: self._cell_length = Segment.get_line_length(self._segments) return self._cell_length ``` ###Assistant : Get the number of cells required to render this object. " 437,"def test_show_message_twice(view): view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test')) view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test')) assert len(view._messages) == 1 ",Show the same message twice -> only one should be shown.,11,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_show_message_twice(view): view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test')) view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test')) assert len(view._messages) == 1 ``` ###Assistant : Show the same message twice -> only one should be shown. " 438,"def create_default_plugins(request, placeholders, template, lang): from cms.api import add_plugin "," Create all default plugins for the given ``placeholders`` if they have a ""default_plugins"" configuration value in settings. return all plugins, children, grandchildren (etc.) created ",24,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def create_default_plugins(request, placeholders, template, lang): from cms.api import add_plugin ``` ###Assistant : Create all default plugins for the given ``placeholders`` if they have a ""default_plugins"" configuration value in settings. return all plugins, children, grandchildren (etc.) created " 439,"def _parse_jp2_header(fp): # Find the JP2 header box reader = BoxReader(fp) header = None mimetype = None while reader.has_next_box(): tbox = reader.next_box_type() if tbox == b""jp2h"": header = reader.read_boxes() break elif tbox == b""ftyp"": if reader.read_fields("">4s"")[0] == b""jpx "": mimetype = ""image/jpx"" size = None mode = None bpc = None nc = None dpi = None # 2-tuple of DPI info, or None while header.has_next_box(): tbox = header.next_box_type() if tbox == b""ihdr"": height, width, nc, bpc = header.read_fields("">IIHB"") size = (width, height) if nc == 1 and (bpc & 0x7F) > 8: mode = ""I;16"" elif nc == 1: mode = ""L"" elif nc == 2: mode = ""LA"" elif nc == 3: mode = ""RGB"" elif nc == 4: mode = ""RGBA"" elif tbox == b""res "": res = header.read_boxes() while res.has_next_box(): tres = res.next_box_type() if tres == b""resc"": vrcn, vrcd, hrcn, hrcd, vrce, hrce = res.read_fields("">HHHHBB"") hres = _res_to_dpi(hrcn, hrcd, hrce) vres = _res_to_dpi(vrcn, vrcd, vrce) if hres is not None and vres is not None: dpi = (hres, vres) break if size is None or mode is None: raise SyntaxError(""Malformed JP2 header"") return size, mode, mimetype, dpi ## # Image plugin for JPEG2000 images. ","Parse the JP2 header box to extract size, component count, color space information, and optionally DPI information, returning a (size, mode, mimetype, dpi) tuple.",24,198,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _parse_jp2_header(fp): # Find the JP2 header box reader = BoxReader(fp) header = None mimetype = None while reader.has_next_box(): tbox = reader.next_box_type() if tbox == b""jp2h"": header = reader.read_boxes() break elif tbox == b""ftyp"": if reader.read_fields("">4s"")[0] == b""jpx "": mimetype = ""image/jpx"" size = None mode = None bpc = None nc = None dpi = None # 2-tuple of DPI info, or None while header.has_next_box(): tbox = header.next_box_type() if tbox == b""ihdr"": height, width, nc, bpc = header.read_fields("">IIHB"") size = (width, height) if nc == 1 and (bpc & 0x7F) > 8: mode = ""I;16"" elif nc == 1: mode = ""L"" elif nc == 2: mode = ""LA"" elif nc == 3: mode = ""RGB"" elif nc == 4: mode = ""RGBA"" elif tbox == b""res "": res = header.read_boxes() while res.has_next_box(): tres = res.next_box_type() if tres == b""resc"": vrcn, vrcd, hrcn, hrcd, vrce, hrce = res.read_fields("">HHHHBB"") hres = _res_to_dpi(hrcn, hrcd, hrce) vres = _res_to_dpi(vrcn, vrcd, vrce) if hres is not None and vres is not None: dpi = (hres, vres) break if size is None or mode is None: raise SyntaxError(""Malformed JP2 header"") return size, mode, mimetype, dpi ## # Image plugin for JPEG2000 images. ``` ###Assistant : Parse the JP2 header box to extract size, component count, color space information, and optionally DPI information, returning a (size, mode, mimetype, dpi) tuple. " 440,"def _populate_events(self) -> None: self.get_success( self.store.db_pool.simple_upsert( ""rooms"", {""room_id"": self.room_id}, {""room_version"": RoomVersions.V4.identifier}, ) ) self.event_ids: List[str] = [] for idx in range(20): event_json = { ""type"": f""test {idx}"", ""room_id"": self.room_id, } event = make_event_from_dict(event_json, room_version=RoomVersions.V4) event_id = event.event_id self.get_success( self.store.db_pool.simple_upsert( ""events"", {""event_id"": event_id}, { ""event_id"": event_id, ""room_id"": self.room_id, ""topological_ordering"": idx, ""stream_ordering"": idx, ""type"": event.type, ""processed"": True, ""outlier"": False, }, ) ) self.get_success( self.store.db_pool.simple_upsert( ""event_json"", {""event_id"": event_id}, { ""room_id"": self.room_id, ""json"": json.dumps(event_json), ""internal_metadata"": ""{}"", ""format_version"": EventFormatVersions.V3, }, ) ) self.event_ids.append(event_id) ","Ensure that there are test events in the database. When testing with the in-memory SQLite database, all the events are lost during the simulated outage. To ensure consistency between `room_id`s and `event_id`s before and after the outage, rows are built and inserted manually. Upserts are used to handle the non-SQLite case where events are not lost. ",56,78,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _populate_events(self) -> None: self.get_success( self.store.db_pool.simple_upsert( ""rooms"", {""room_id"": self.room_id}, {""room_version"": RoomVersions.V4.identifier}, ) ) self.event_ids: List[str] = [] for idx in range(20): event_json = { ""type"": f""test {idx}"", ""room_id"": self.room_id, } event = make_event_from_dict(event_json, room_version=RoomVersions.V4) event_id = event.event_id self.get_success( self.store.db_pool.simple_upsert( ""events"", {""event_id"": event_id}, { ""event_id"": event_id, ""room_id"": self.room_id, ""topological_ordering"": idx, ""stream_ordering"": idx, ""type"": event.type, ""processed"": True, ""outlier"": False, }, ) ) self.get_success( self.store.db_pool.simple_upsert( ""event_json"", {""event_id"": event_id}, { ""room_id"": self.room_id, ""json"": json.dumps(event_json), ""internal_metadata"": ""{}"", ""format_version"": EventFormatVersions.V3, }, ) ) self.event_ids.append(event_id) ``` ###Assistant : Ensure that there are test events in the database. When testing with the in-memory SQLite database, all the events are lost during the simulated outage. To ensure consistency between `room_id`s and `event_id`s before and after the outage, rows are built and inserted manually. Upserts are used to handle the non-SQLite case where events are not lost. " 441,"def check_if_ctx_is_empty(ctx): return all(check_if_project_is_empty(project_ctx) for project_ctx in ctx.projects.values()) # The entry point. This task is scheduled to run every week. @instrumented_task( name=""sentry.tasks.weekly_reports.schedule_organizations"", queue=""reports.prepare"", max_retries=5, acks_late=True, )"," Check if the context is empty. If it is, we don't want to send an email. ",16,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_if_ctx_is_empty(ctx): return all(check_if_project_is_empty(project_ctx) for project_ctx in ctx.projects.values()) # The entry point. This task is scheduled to run every week. @instrumented_task( name=""sentry.tasks.weekly_reports.schedule_organizations"", queue=""reports.prepare"", max_retries=5, acks_late=True, ) ``` ###Assistant : Check if the context is empty. If it is, we don't want to send an email. " 442,"def __delitem__(self, name): name = name.lower() newheaders = [] for k, v in self._headers: if k.lower() != name: newheaders.append((k, v)) self._headers = newheaders ","Delete all occurrences of a header, if present. Does not raise an exception if the header is missing. ",18,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __delitem__(self, name): name = name.lower() newheaders = [] for k, v in self._headers: if k.lower() != name: newheaders.append((k, v)) self._headers = newheaders ``` ###Assistant : Delete all occurrences of a header, if present. Does not raise an exception if the header is missing. " 443,"def update_status(self): self.cleanup() ready = True while ready: # Use a loop as `ready` might return futures one by one ready, _ = ray.wait(list(self._staging_futures.keys()), timeout=0) for ready_fut in ready: self.handle_ready_future(ready_fut) ","Update placement group status. Moves ready placement groups from `self._staging` to `self._ready`. ",12,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update_status(self): self.cleanup() ready = True while ready: # Use a loop as `ready` might return futures one by one ready, _ = ray.wait(list(self._staging_futures.keys()), timeout=0) for ready_fut in ready: self.handle_ready_future(ready_fut) ``` ###Assistant : Update placement group status. Moves ready placement groups from `self._staging` to `self._ready`. " 444,"def execute(): broken_sles = frappe.db.sql(, ( "" %"", # leading whitespace ""% "", # trailing whitespace ""%\n %"", # leading whitespace on newline ""% \n%"", # trailing whitespace on newline ), as_dict=True, ) frappe.db.MAX_WRITES_PER_TRANSACTION += len(broken_sles) if not broken_sles: return broken_serial_nos = set() for sle in broken_sles: serial_no_list = get_serial_nos(sle.serial_no) correct_sr_no = ""\n"".join(serial_no_list) if correct_sr_no == sle.serial_no: continue frappe.db.set_value(""Stock Ledger Entry"", sle.name, ""serial_no"", correct_sr_no, update_modified=False) broken_serial_nos.update(serial_no_list) if not broken_serial_nos: return broken_sr_no_records = [sr[0] for sr in frappe.db.sql(, (list(broken_serial_nos),) )] frappe.db.MAX_WRITES_PER_TRANSACTION += len(broken_sr_no_records) patch_savepoint = ""serial_no_patch"" for serial_no in broken_sr_no_records: try: frappe.db.savepoint(patch_savepoint) sn = frappe.get_doc(""Serial No"", serial_no) sn.update_serial_no_reference() sn.db_update() except Exception: frappe.db.rollback(save_point=patch_savepoint) "," select name, serial_no from `tabStock Ledger Entry` where is_cancelled = 0 and (serial_no like %s or serial_no like %s or serial_no like %s or serial_no like %s) select name from `tabSerial No` where status='Active' and coalesce(purchase_document_type, '') = '' and name in %s ",43,101,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def execute(): broken_sles = frappe.db.sql(, ( "" %"", # leading whitespace ""% "", # trailing whitespace ""%\n %"", # leading whitespace on newline ""% \n%"", # trailing whitespace on newline ), as_dict=True, ) frappe.db.MAX_WRITES_PER_TRANSACTION += len(broken_sles) if not broken_sles: return broken_serial_nos = set() for sle in broken_sles: serial_no_list = get_serial_nos(sle.serial_no) correct_sr_no = ""\n"".join(serial_no_list) if correct_sr_no == sle.serial_no: continue frappe.db.set_value(""Stock Ledger Entry"", sle.name, ""serial_no"", correct_sr_no, update_modified=False) broken_serial_nos.update(serial_no_list) if not broken_serial_nos: return broken_sr_no_records = [sr[0] for sr in frappe.db.sql(, (list(broken_serial_nos),) )] frappe.db.MAX_WRITES_PER_TRANSACTION += len(broken_sr_no_records) patch_savepoint = ""serial_no_patch"" for serial_no in broken_sr_no_records: try: frappe.db.savepoint(patch_savepoint) sn = frappe.get_doc(""Serial No"", serial_no) sn.update_serial_no_reference() sn.db_update() except Exception: frappe.db.rollback(save_point=patch_savepoint) ``` ###Assistant : select name, serial_no from `tabStock Ledger Entry` where is_cancelled = 0 and (serial_no like %s or serial_no like %s or serial_no like %s or serial_no like %s) select name from `tabSerial No` where status='Active' and coalesce(purchase_document_type, '') = '' and name in %s " 445,"def test_individual_caches_from_environ(self): config = {} self.config._environ = { ""SYNAPSE_CACHE_FACTOR_SOMETHING_OR_OTHER"": ""2"", ""SYNAPSE_NOT_CACHE"": ""BLAH"", } self.config.read_config(config, config_dir_path="""", data_dir_path="""") self.config.resize_all_caches() self.assertEqual(dict(self.config.cache_factors), {""something_or_other"": 2.0}) "," Individual cache factors will be loaded from the environment. ",9,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_individual_caches_from_environ(self): config = {} self.config._environ = { ""SYNAPSE_CACHE_FACTOR_SOMETHING_OR_OTHER"": ""2"", ""SYNAPSE_NOT_CACHE"": ""BLAH"", } self.config.read_config(config, config_dir_path="""", data_dir_path="""") self.config.resize_all_caches() self.assertEqual(dict(self.config.cache_factors), {""something_or_other"": 2.0}) ``` ###Assistant : Individual cache factors will be loaded from the environment. " 446,"def test_already_checked_on_success(self): pod_name = ""test-"" + str(random.randint(0, 1000000)) k = KubernetesPodOperator( namespace='default', image=""ubuntu:16.04"", cmds=[""bash"", ""-cx""], arguments=[""echo 10""], labels={""foo"": ""bar""}, name=pod_name, task_id=""task"" + self.get_current_task_name(), in_cluster=False, do_xcom_push=False, is_delete_operator_pod=False, ) context = create_context(k) k.execute(context) actual_pod = k.find_pod('default', context, exclude_checked=False) actual_pod = self.api_client.sanitize_for_serialization(actual_pod) assert actual_pod['metadata']['labels']['already_checked'] == 'True' "," When ``is_delete_operator_pod=False``, pod should have 'already_checked' label, whether pod is successful or not. ",13,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_already_checked_on_success(self): pod_name = ""test-"" + str(random.randint(0, 1000000)) k = KubernetesPodOperator( namespace='default', image=""ubuntu:16.04"", cmds=[""bash"", ""-cx""], arguments=[""echo 10""], labels={""foo"": ""bar""}, name=pod_name, task_id=""task"" + self.get_current_task_name(), in_cluster=False, do_xcom_push=False, is_delete_operator_pod=False, ) context = create_context(k) k.execute(context) actual_pod = k.find_pod('default', context, exclude_checked=False) actual_pod = self.api_client.sanitize_for_serialization(actual_pod) assert actual_pod['metadata']['labels']['already_checked'] == 'True' ``` ###Assistant : When ``is_delete_operator_pod=False``, pod should have 'already_checked' label, whether pod is successful or not. " 447,"def get_invoice_cc_wh_map(invoice_list): si_items = frappe.db.sql( % "", "".join([""%s""] * len(invoice_list)), tuple(inv.name for inv in invoice_list), as_dict=1, ) invoice_cc_wh_map = {} for d in si_items: if d.cost_center: invoice_cc_wh_map.setdefault(d.parent, frappe._dict()).setdefault(""cost_center"", []).append( d.cost_center ) if d.warehouse: invoice_cc_wh_map.setdefault(d.parent, frappe._dict()).setdefault(""warehouse"", []).append( d.warehouse ) return invoice_cc_wh_map ","select parent, cost_center, warehouse from `tabSales Invoice Item` where parent in (%s) and (ifnull(cost_center, '') != '' or ifnull(warehouse, '') != '')",22,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_invoice_cc_wh_map(invoice_list): si_items = frappe.db.sql( % "", "".join([""%s""] * len(invoice_list)), tuple(inv.name for inv in invoice_list), as_dict=1, ) invoice_cc_wh_map = {} for d in si_items: if d.cost_center: invoice_cc_wh_map.setdefault(d.parent, frappe._dict()).setdefault(""cost_center"", []).append( d.cost_center ) if d.warehouse: invoice_cc_wh_map.setdefault(d.parent, frappe._dict()).setdefault(""warehouse"", []).append( d.warehouse ) return invoice_cc_wh_map ``` ###Assistant : select parent, cost_center, warehouse from `tabSales Invoice Item` where parent in (%s) and (ifnull(cost_center, '') != '' or ifnull(warehouse, '') != '') " 448,"def parseSqliteTableSchema(value): retVal = False value = extractRegexResult(r""(?s)\((?P.+)\)"", value) if value: table = {} columns = OrderedDict() value = re.sub(r""\(.+?\)"", """", value).strip() for match in re.finditer(r""(?:\A|,)\s*(([\""'`]).+?\2|\w+)(?:\s+(INT|INTEGER|TINYINT|SMALLINT|MEDIUMINT|BIGINT|UNSIGNED BIG INT|INT2|INT8|INTEGER|CHARACTER|VARCHAR|VARYING CHARACTER|NCHAR|NATIVE CHARACTER|NVARCHAR|TEXT|CLOB|LONGTEXT|BLOB|NONE|REAL|DOUBLE|DOUBLE PRECISION|FLOAT|REAL|NUMERIC|DECIMAL|BOOLEAN|DATE|DATETIME|NUMERIC)\b)?"", decodeStringEscape(value), re.I): column = match.group(1).strip(match.group(2) or """") if re.search(r""(?i)\A(CONSTRAINT|PRIMARY|UNIQUE|CHECK|FOREIGN)\b"", column.strip()): continue retVal = True columns[column] = match.group(3) or ""TEXT"" table[safeSQLIdentificatorNaming(conf.tbl, True)] = columns kb.data.cachedColumns[conf.db] = table return retVal "," Parses table column names and types from specified SQLite table schema >>> kb.data.cachedColumns = {} >>> parseSqliteTableSchema(""CREATE TABLE users(\\n\\t\\tid INTEGER,\\n\\t\\tname TEXT\\n);"") True >>> tuple(kb.data.cachedColumns[conf.db][conf.tbl].items()) == (('id', 'INTEGER'), ('name', 'TEXT')) True >>> parseSqliteTableSchema(""CREATE TABLE dummy(`foo bar` BIGINT, \\""foo\\"" VARCHAR, 'bar' TEXT)""); True >>> tuple(kb.data.cachedColumns[conf.db][conf.tbl].items()) == (('foo bar', 'BIGINT'), ('foo', 'VARCHAR'), ('bar', 'TEXT')) True >>> parseSqliteTableSchema(""CREATE TABLE suppliers(\\n\\tsupplier_id INTEGER PRIMARY KEY DESC,\\n\\tname TEXT NOT NULL\\n);""); True >>> tuple(kb.data.cachedColumns[conf.db][conf.tbl].items()) == (('supplier_id', 'INTEGER'), ('name', 'TEXT')) True >>> parseSqliteTableSchema(""CREATE TABLE country_languages (\\n\\tcountry_id INTEGER NOT NULL,\\n\\tlanguage_id INTEGER NOT NULL,\\n\\tPRIMARY KEY (country_id, language_id),\\n\\tFOREIGN KEY (country_id) REFERENCES countries (country_id) ON DELETE CASCADE ON UPDATE NO ACTION,\\tFOREIGN KEY (language_id) REFERENCES languages (language_id) ON DELETE CASCADE ON UPDATE NO ACTION);""); True >>> tuple(kb.data.cachedColumns[conf.db][conf.tbl].items()) == (('country_id', 'INTEGER'), ('language_id', 'INTEGER')) True ",119,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parseSqliteTableSchema(value): retVal = False value = extractRegexResult(r""(?s)\((?P.+)\)"", value) if value: table = {} columns = OrderedDict() value = re.sub(r""\(.+?\)"", """", value).strip() for match in re.finditer(r""(?:\A|,)\s*(([\""'`]).+?\2|\w+)(?:\s+(INT|INTEGER|TINYINT|SMALLINT|MEDIUMINT|BIGINT|UNSIGNED BIG INT|INT2|INT8|INTEGER|CHARACTER|VARCHAR|VARYING CHARACTER|NCHAR|NATIVE CHARACTER|NVARCHAR|TEXT|CLOB|LONGTEXT|BLOB|NONE|REAL|DOUBLE|DOUBLE PRECISION|FLOAT|REAL|NUMERIC|DECIMAL|BOOLEAN|DATE|DATETIME|NUMERIC)\b)?"", decodeStringEscape(value), re.I): column = match.group(1).strip(match.group(2) or """") if re.search(r""(?i)\A(CONSTRAINT|PRIMARY|UNIQUE|CHECK|FOREIGN)\b"", column.strip()): continue retVal = True columns[column] = match.group(3) or ""TEXT"" table[safeSQLIdentificatorNaming(conf.tbl, True)] = columns kb.data.cachedColumns[conf.db] = table return retVal ``` ###Assistant : Parses table column names and types from specified SQLite table schema >>> kb.data.cachedColumns = {} >>> parseSqliteTableSchema(""CREATE TABLE users(\\n\\t\\tid INTEGER,\\n\\t\\tname TEXT\\n);"") True >>> tuple(kb.data.cachedColumns[conf.db][conf.tbl].items()) == (('id', 'INTEGER'), ('name', 'TEXT')) True >>> parseSqliteTableSchema(""CREATE TABLE dummy(`foo bar` BIGINT, \\""foo\\"" VARCHAR, 'bar' TEXT)""); True >>> tuple(kb.data.cachedColumns[conf.db][conf.tbl].items()) == (('foo bar', 'BIGINT'), ('foo', 'VARCHAR'), ('bar', 'TEXT')) True >>> parseSqliteTableSchema(""CREATE TABLE suppliers(\\n\\tsupplier_id INTEGER PRIMARY KEY DESC,\\n\\tname TEXT NOT NULL\\n);""); True >>> tuple(kb.data.cachedColumns[conf.db][conf.tbl].items()) == (('supplier_id', 'INTEGER'), ('name', 'TEXT')) True >>> parseSqliteTableSchema(""CREATE TABLE country_languages (\\n\\tcountry_id INTEGER NOT NULL,\\n\\tlanguage_id INTEGER NOT NULL,\\n\\tPRIMARY KEY (country_id, language_id),\\n\\tFOREIGN KEY (country_id) REFERENCES countries (country_id) ON DELETE CASCADE ON UPDATE NO ACTION,\\tFOREIGN KEY (language_id) REFERENCES languages (language_id) ON DELETE CASCADE ON UPDATE NO ACTION);""); True >>> tuple(kb.data.cachedColumns[conf.db][conf.tbl].items()) == (('country_id', 'INTEGER'), ('language_id', 'INTEGER')) True " 449,"def get_phrase(value): phrase = Phrase() try: token, value = get_word(value) phrase.append(token) except errors.HeaderParseError: phrase.defects.append(errors.InvalidHeaderDefect( ""phrase does not start with word"")) while value and value[0] not in PHRASE_ENDS: if value[0]=='.': phrase.append(DOT) phrase.defects.append(errors.ObsoleteHeaderDefect( ""period in 'phrase'"")) value = value[1:] else: try: token, value = get_word(value) except errors.HeaderParseError: if value[0] in CFWS_LEADER: token, value = get_cfws(value) phrase.defects.append(errors.ObsoleteHeaderDefect( ""comment found without atom"")) else: raise phrase.append(token) return phrase, value "," phrase = 1*word / obs-phrase obs-phrase = word *(word / ""."" / CFWS) This means a phrase can be a sequence of words, periods, and CFWS in any order as long as it starts with at least one word. If anything other than words is detected, an ObsoleteHeaderDefect is added to the token's defect list. We also accept a phrase that starts with CFWS followed by a dot; this is registered as an InvalidHeaderDefect, since it is not supported by even the obsolete grammar. ",84,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_phrase(value): phrase = Phrase() try: token, value = get_word(value) phrase.append(token) except errors.HeaderParseError: phrase.defects.append(errors.InvalidHeaderDefect( ""phrase does not start with word"")) while value and value[0] not in PHRASE_ENDS: if value[0]=='.': phrase.append(DOT) phrase.defects.append(errors.ObsoleteHeaderDefect( ""period in 'phrase'"")) value = value[1:] else: try: token, value = get_word(value) except errors.HeaderParseError: if value[0] in CFWS_LEADER: token, value = get_cfws(value) phrase.defects.append(errors.ObsoleteHeaderDefect( ""comment found without atom"")) else: raise phrase.append(token) return phrase, value ``` ###Assistant : phrase = 1*word / obs-phrase obs-phrase = word *(word / ""."" / CFWS) This means a phrase can be a sequence of words, periods, and CFWS in any order as long as it starts with at least one word. If anything other than words is detected, an ObsoleteHeaderDefect is added to the token's defect list. We also accept a phrase that starts with CFWS followed by a dot; this is registered as an InvalidHeaderDefect, since it is not supported by even the obsolete grammar. " 450,"async def test_group_media_states(hass, mz_mock): entity_id = ""media_player.speaker"" reg = er.async_get(hass) info = get_fake_chromecast_info() chromecast, _ = await async_setup_media_player_cast(hass, info) _, conn_status_cb, media_status_cb, group_media_status_cb = get_status_callbacks( chromecast, mz_mock ) connection_status = MagicMock() connection_status.status = ""CONNECTED"" conn_status_cb(connection_status) await hass.async_block_till_done() state = hass.states.get(entity_id) assert state is not None assert state.name == ""Speaker"" assert state.state == ""off"" assert entity_id == reg.async_get_entity_id(""media_player"", ""cast"", str(info.uuid)) group_media_status = MagicMock(images=None) player_media_status = MagicMock(images=None) # Player has no state, group is buffering -> Should report 'buffering' group_media_status.player_state = ""BUFFERING"" group_media_status_cb(str(FakeGroupUUID), group_media_status) await hass.async_block_till_done() state = hass.states.get(entity_id) assert state.state == ""buffering"" # Player has no state, group is playing -> Should report 'playing' group_media_status.player_state = ""PLAYING"" group_media_status_cb(str(FakeGroupUUID), group_media_status) await hass.async_block_till_done() state = hass.states.get(entity_id) assert state.state == ""playing"" # Player is paused, group is playing -> Should report 'paused' player_media_status.player_state = None player_media_status.player_is_paused = True media_status_cb(player_media_status) await hass.async_block_till_done() await hass.async_block_till_done() state = hass.states.get(entity_id) assert state.state == ""paused"" # Player is in unknown state, group is playing -> Should report 'playing' player_media_status.player_state = ""UNKNOWN"" media_status_cb(player_media_status) await hass.async_block_till_done() state = hass.states.get(entity_id) assert state.state == ""playing"" ",Test media states are read from group if entity has no state.,12,172,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_group_media_states(hass, mz_mock): entity_id = ""media_player.speaker"" reg = er.async_get(hass) info = get_fake_chromecast_info() chromecast, _ = await async_setup_media_player_cast(hass, info) _, conn_status_cb, media_status_cb, group_media_status_cb = get_status_callbacks( chromecast, mz_mock ) connection_status = MagicMock() connection_status.status = ""CONNECTED"" conn_status_cb(connection_status) await hass.async_block_till_done() state = hass.states.get(entity_id) assert state is not None assert state.name == ""Speaker"" assert state.state == ""off"" assert entity_id == reg.async_get_entity_id(""media_player"", ""cast"", str(info.uuid)) group_media_status = MagicMock(images=None) player_media_status = MagicMock(images=None) # Player has no state, group is buffering -> Should report 'buffering' group_media_status.player_state = ""BUFFERING"" group_media_status_cb(str(FakeGroupUUID), group_media_status) await hass.async_block_till_done() state = hass.states.get(entity_id) assert state.state == ""buffering"" # Player has no state, group is playing -> Should report 'playing' group_media_status.player_state = ""PLAYING"" group_media_status_cb(str(FakeGroupUUID), group_media_status) await hass.async_block_till_done() state = hass.states.get(entity_id) assert state.state == ""playing"" # Player is paused, group is playing -> Should report 'paused' player_media_status.player_state = None player_media_status.player_is_paused = True media_status_cb(player_media_status) await hass.async_block_till_done() await hass.async_block_till_done() state = hass.states.get(entity_id) assert state.state == ""paused"" # Player is in unknown state, group is playing -> Should report 'playing' player_media_status.player_state = ""UNKNOWN"" media_status_cb(player_media_status) await hass.async_block_till_done() state = hass.states.get(entity_id) assert state.state == ""playing"" ``` ###Assistant : Test media states are read from group if entity has no state. " 451,"def test_deserialize(self): block = blocks.PageChooserBlock() christmas_page = Page.objects.get(slug='christmas') self.assertEqual(block.to_python(christmas_page.id), christmas_page) # None should deserialize to None self.assertIsNone(block.to_python(None)) ",The serialized value of a PageChooserBlock (an ID) should deserialize to a Page object,14,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_deserialize(self): block = blocks.PageChooserBlock() christmas_page = Page.objects.get(slug='christmas') self.assertEqual(block.to_python(christmas_page.id), christmas_page) # None should deserialize to None self.assertIsNone(block.to_python(None)) ``` ###Assistant : The serialized value of a PageChooserBlock (an ID) should deserialize to a Page object " 452,"def _is_current_explicit_device(device_type): device_type = device_type.upper() if device_type not in [""CPU"", ""GPU""]: raise ValueError('`device_type` should be either ""CPU"" or ""GPU"".') device = _get_current_tf_device() return device is not None and device.device_type == device_type.upper() ","Check if the current device is explicitly set on the device type specified. Args: device_type: A string containing `GPU` or `CPU` (case-insensitive). Returns: A boolean indicating if the current device scope is explicitly set on the device type. Raises: ValueError: If the `device_type` string indicates an unsupported device. ",48,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _is_current_explicit_device(device_type): device_type = device_type.upper() if device_type not in [""CPU"", ""GPU""]: raise ValueError('`device_type` should be either ""CPU"" or ""GPU"".') device = _get_current_tf_device() return device is not None and device.device_type == device_type.upper() ``` ###Assistant : Check if the current device is explicitly set on the device type specified. Args: device_type: A string containing `GPU` or `CPU` (case-insensitive). Returns: A boolean indicating if the current device scope is explicitly set on the device type. Raises: ValueError: If the `device_type` string indicates an unsupported device. " 453,"def _return_datastructure_name(obj): if isinstance(obj, (text_type, binary_type)): if obj: yield to_native(obj, errors='surrogate_or_strict') return elif isinstance(obj, Mapping): for element in obj.items(): for subelement in _return_datastructure_name(element[1]): yield subelement elif is_iterable(obj): for element in obj: for subelement in _return_datastructure_name(element): yield subelement elif obj is None or isinstance(obj, bool): # This must come before int because bools are also ints return elif isinstance(obj, tuple(list(integer_types) + [float])): yield to_native(obj, nonstring='simplerepr') else: raise TypeError('Unknown parameter type: %s' % (type(obj))) "," Return native stringified values from datastructures. For use with removing sensitive values pre-jsonification.",13,72,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _return_datastructure_name(obj): if isinstance(obj, (text_type, binary_type)): if obj: yield to_native(obj, errors='surrogate_or_strict') return elif isinstance(obj, Mapping): for element in obj.items(): for subelement in _return_datastructure_name(element[1]): yield subelement elif is_iterable(obj): for element in obj: for subelement in _return_datastructure_name(element): yield subelement elif obj is None or isinstance(obj, bool): # This must come before int because bools are also ints return elif isinstance(obj, tuple(list(integer_types) + [float])): yield to_native(obj, nonstring='simplerepr') else: raise TypeError('Unknown parameter type: %s' % (type(obj))) ``` ###Assistant : Return native stringified values from datastructures. For use with removing sensitive values pre-jsonification. " 454,"async def __aiter__(self): waiter = self._waiter while True: # Shield the future from being cancelled by a task waiting on it message, ts, waiter = await asyncio.shield(waiter) yield message, ts "," Iterate over the messages in the message stream ",8,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def __aiter__(self): waiter = self._waiter while True: # Shield the future from being cancelled by a task waiting on it message, ts, waiter = await asyncio.shield(waiter) yield message, ts ``` ###Assistant : Iterate over the messages in the message stream " 455,"def get_gridspec(self): return self._subplotspec.get_gridspec() if self._subplotspec else None ","Return the `.GridSpec` associated with the subplot, or None.",9,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_gridspec(self): return self._subplotspec.get_gridspec() if self._subplotspec else None ``` ###Assistant : Return the `.GridSpec` associated with the subplot, or None. " 456,"def ESNet_x0_5(pretrained=False, use_ssld=False, **kwargs): model = ESNet(scale=0.5, stages_pattern=MODEL_STAGES_PATTERN[""ESNet""], **kwargs) return model "," ESNet_x0_5 Args: pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. If str, means the path of the pretrained model. use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. Returns: model: nn.Layer. Specific `ESNet_x0_5` model depends on args. ",40,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ESNet_x0_5(pretrained=False, use_ssld=False, **kwargs): model = ESNet(scale=0.5, stages_pattern=MODEL_STAGES_PATTERN[""ESNet""], **kwargs) return model ``` ###Assistant : ESNet_x0_5 Args: pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. If str, means the path of the pretrained model. use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. Returns: model: nn.Layer. Specific `ESNet_x0_5` model depends on args. " 457,"def require_rjieba(test_case): return unittest.skipUnless(is_rjieba_available(), ""test requires rjieba"")(test_case) "," Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed. ",15,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def require_rjieba(test_case): return unittest.skipUnless(is_rjieba_available(), ""test requires rjieba"")(test_case) ``` ###Assistant : Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed. " 458,"def get_yaxis(self): return self.yaxis get_xgridlines = _axis_method_wrapper(""xaxis"", ""get_gridlines"") get_xticklines = _axis_method_wrapper(""xaxis"", ""get_ticklines"") get_ygridlines = _axis_method_wrapper(""yaxis"", ""get_gridlines"") get_yticklines = _axis_method_wrapper(""yaxis"", ""get_ticklines"") # Adding and tracking artists "," [*Discouraged*] Return the YAxis instance. .. admonition:: Discouraged The use of this function is discouraged. You should instead directly access the attribute ``ax.yaxis``. ",23,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_yaxis(self): return self.yaxis get_xgridlines = _axis_method_wrapper(""xaxis"", ""get_gridlines"") get_xticklines = _axis_method_wrapper(""xaxis"", ""get_ticklines"") get_ygridlines = _axis_method_wrapper(""yaxis"", ""get_gridlines"") get_yticklines = _axis_method_wrapper(""yaxis"", ""get_ticklines"") # Adding and tracking artists ``` ###Assistant : [*Discouraged*] Return the YAxis instance. .. admonition:: Discouraged The use of this function is discouraged. You should instead directly access the attribute ``ax.yaxis``. " 459,"def disable_tf_random_generator(): global _USE_GENERATOR_FOR_RNG _USE_GENERATOR_FOR_RNG = False ","Disable the `tf.random.Generator` as the RNG for Keras. See `tf.keras.backend.experimental.is_tf_random_generator_enabled` for more details. ",13,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def disable_tf_random_generator(): global _USE_GENERATOR_FOR_RNG _USE_GENERATOR_FOR_RNG = False ``` ###Assistant : Disable the `tf.random.Generator` as the RNG for Keras. See `tf.keras.backend.experimental.is_tf_random_generator_enabled` for more details. " 460,"def test_parse_timezone(all_parsers): # see gh-22256 parser = all_parsers data = result = parser.read_csv(StringIO(data), parse_dates=[""dt""]) dti = DatetimeIndex( list( date_range( start=""2018-01-04 09:01:00"", end=""2018-01-04 09:05:00"", freq=""1min"", tz=timezone(timedelta(minutes=540)), ) ), freq=None, ) expected_data = {""dt"": dti, ""val"": [23350, 23400, 23400, 23400, 23400]} expected = DataFrame(expected_data) tm.assert_frame_equal(result, expected) @skip_pyarrow @pytest.mark.parametrize( ""date_string"", [""32/32/2019"", ""02/30/2019"", ""13/13/2019"", ""13/2019"", ""a3/11/2018"", ""10/11/2o17""], )","dt,val 2018-01-04 09:01:00+09:00,23350 2018-01-04 09:02:00+09:00,23400 2018-01-04 09:03:00+09:00,23400 2018-01-04 09:04:00+09:00,23400 2018-01-04 09:05:00+09:00,23400",11,54,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_parse_timezone(all_parsers): # see gh-22256 parser = all_parsers data = result = parser.read_csv(StringIO(data), parse_dates=[""dt""]) dti = DatetimeIndex( list( date_range( start=""2018-01-04 09:01:00"", end=""2018-01-04 09:05:00"", freq=""1min"", tz=timezone(timedelta(minutes=540)), ) ), freq=None, ) expected_data = {""dt"": dti, ""val"": [23350, 23400, 23400, 23400, 23400]} expected = DataFrame(expected_data) tm.assert_frame_equal(result, expected) @skip_pyarrow @pytest.mark.parametrize( ""date_string"", [""32/32/2019"", ""02/30/2019"", ""13/13/2019"", ""13/2019"", ""a3/11/2018"", ""10/11/2o17""], ) ``` ###Assistant : dt,val 2018-01-04 09:01:00+09:00,23350 2018-01-04 09:02:00+09:00,23400 2018-01-04 09:03:00+09:00,23400 2018-01-04 09:04:00+09:00,23400 2018-01-04 09:05:00+09:00,23400 " 461,"def alpn_proto_negotiated(self) -> Optional[bytes]: # pragma: no cover warnings.warn( ""Connection.alpn_proto_negotiated is deprecated, use Connection.alpn instead."", DeprecationWarning, ) return self.alpn ",*Deprecated:* An outdated alias for Connection.alpn.,6,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def alpn_proto_negotiated(self) -> Optional[bytes]: # pragma: no cover warnings.warn( ""Connection.alpn_proto_negotiated is deprecated, use Connection.alpn instead."", DeprecationWarning, ) return self.alpn ``` ###Assistant : *Deprecated:* An outdated alias for Connection.alpn. " 462,"def device_class(self) -> CoverDeviceClass: if isinstance(self.node, Awning): return CoverDeviceClass.AWNING if isinstance(self.node, Blind): return CoverDeviceClass.BLIND if isinstance(self.node, GarageDoor): return CoverDeviceClass.GARAGE if isinstance(self.node, Gate): return CoverDeviceClass.GATE if isinstance(self.node, RollerShutter): return CoverDeviceClass.SHUTTER if isinstance(self.node, Window): return CoverDeviceClass.WINDOW return CoverDeviceClass.WINDOW ","Define this cover as either awning, blind, garage, gate, shutter or window.",12,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def device_class(self) -> CoverDeviceClass: if isinstance(self.node, Awning): return CoverDeviceClass.AWNING if isinstance(self.node, Blind): return CoverDeviceClass.BLIND if isinstance(self.node, GarageDoor): return CoverDeviceClass.GARAGE if isinstance(self.node, Gate): return CoverDeviceClass.GATE if isinstance(self.node, RollerShutter): return CoverDeviceClass.SHUTTER if isinstance(self.node, Window): return CoverDeviceClass.WINDOW return CoverDeviceClass.WINDOW ``` ###Assistant : Define this cover as either awning, blind, garage, gate, shutter or window. " 463,"def get_yieldcurve(country) -> pd.DataFrame: data = investpy.bonds.get_bonds_overview(country) data.drop(columns=data.columns[0], axis=1, inplace=True) data.rename( columns={ ""name"": ""Tenor"", ""last"": ""Current"", ""last_close"": ""Previous"", ""high"": ""High"", ""low"": ""Low"", ""change"": ""Change"", ""change_percentage"": ""% Change"", }, inplace=True, ) return data ","Get country yield curve [Source: Investing.com] Returns ------- pd.DataFrame Country yield curve ",12,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_yieldcurve(country) -> pd.DataFrame: data = investpy.bonds.get_bonds_overview(country) data.drop(columns=data.columns[0], axis=1, inplace=True) data.rename( columns={ ""name"": ""Tenor"", ""last"": ""Current"", ""last_close"": ""Previous"", ""high"": ""High"", ""low"": ""Low"", ""change"": ""Change"", ""change_percentage"": ""% Change"", }, inplace=True, ) return data ``` ###Assistant : Get country yield curve [Source: Investing.com] Returns ------- pd.DataFrame Country yield curve " 464,"async def test_report_humidifier_humidity_state(hass): hass.states.async_set( ""humidifier.dry"", ""on"", { ""friendly_name"": ""Humidifier dry"", ""supported_features"": 0, ""humidity"": 25, ""min_humidity"": 20, ""max_humidity"": 90, }, ) hass.states.async_set( ""humidifier.wet"", ""on"", { ""friendly_name"": ""Humidifier wet"", ""supported_features"": 0, ""humidity"": 80, ""min_humidity"": 20, ""max_humidity"": 90, }, ) properties = await reported_properties(hass, ""humidifier.dry"") properties.assert_equal(""Alexa.RangeController"", ""rangeValue"", 25) properties = await reported_properties(hass, ""humidifier.wet"") properties.assert_equal(""Alexa.RangeController"", ""rangeValue"", 80) ","Test PercentageController, PowerLevelController reports humidifier humidity correctly.",7,53,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_report_humidifier_humidity_state(hass): hass.states.async_set( ""humidifier.dry"", ""on"", { ""friendly_name"": ""Humidifier dry"", ""supported_features"": 0, ""humidity"": 25, ""min_humidity"": 20, ""max_humidity"": 90, }, ) hass.states.async_set( ""humidifier.wet"", ""on"", { ""friendly_name"": ""Humidifier wet"", ""supported_features"": 0, ""humidity"": 80, ""min_humidity"": 20, ""max_humidity"": 90, }, ) properties = await reported_properties(hass, ""humidifier.dry"") properties.assert_equal(""Alexa.RangeController"", ""rangeValue"", 25) properties = await reported_properties(hass, ""humidifier.wet"") properties.assert_equal(""Alexa.RangeController"", ""rangeValue"", 80) ``` ###Assistant : Test PercentageController, PowerLevelController reports humidifier humidity correctly. " 465,"def serialize(self, value): if self.type == CustomFieldTypeChoices.TYPE_OBJECT and value is not None: return value.pk return value "," Prepare a value for storage as JSON data. ",8,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def serialize(self, value): if self.type == CustomFieldTypeChoices.TYPE_OBJECT and value is not None: return value.pk return value ``` ###Assistant : Prepare a value for storage as JSON data. " 466,"def revert_to_saved(self, frame_index): if frame_index not in self._updated_frame_indices: logger.debug(""Alignments not amended. Returning"") return logger.verbose(""Reverting alignments for frame_index %s"", frame_index) print(frame_index) print(len(self._sorted_frame_names)) alignments = self._alignments.data[self._sorted_frame_names[frame_index]][""faces""] faces = self._frame_faces[frame_index] reset_grid = self._add_remove_faces(alignments, faces) for detected_face, face in zip(faces, alignments): detected_face.from_alignment(face, with_thumb=True) detected_face.load_aligned(None, force=True) _ = detected_face.aligned.average_distance # cache the distances self._updated_frame_indices.remove(frame_index) if not self._updated_frame_indices: self._tk_unsaved.set(False) if reset_grid: self._tk_face_count_changed.set(True) else: self._tk_edited.set(True) self._globals.tk_update.set(True) "," Revert the frame's alignments to their saved version for the given frame index. Parameters ---------- frame_index: int The frame that should have their faces reverted to their saved version ",29,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def revert_to_saved(self, frame_index): if frame_index not in self._updated_frame_indices: logger.debug(""Alignments not amended. Returning"") return logger.verbose(""Reverting alignments for frame_index %s"", frame_index) print(frame_index) print(len(self._sorted_frame_names)) alignments = self._alignments.data[self._sorted_frame_names[frame_index]][""faces""] faces = self._frame_faces[frame_index] reset_grid = self._add_remove_faces(alignments, faces) for detected_face, face in zip(faces, alignments): detected_face.from_alignment(face, with_thumb=True) detected_face.load_aligned(None, force=True) _ = detected_face.aligned.average_distance # cache the distances self._updated_frame_indices.remove(frame_index) if not self._updated_frame_indices: self._tk_unsaved.set(False) if reset_grid: self._tk_face_count_changed.set(True) else: self._tk_edited.set(True) self._globals.tk_update.set(True) ``` ###Assistant : Revert the frame's alignments to their saved version for the given frame index. Parameters ---------- frame_index: int The frame that should have their faces reverted to their saved version " 467,"def parse_args(args): # Use the file's docstring for the help text and don't let argparse reformat it. parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('--css', type=str, required=True, help='hostname of code signing server') group = parser.add_mutually_exclusive_group() # We use 'store_false' and a destination related to the other type of # artifact to cause the flag being set to disable publishing of the other # artifact. This makes using the parsed arguments later on a little simpler # and cleaner. group.add_argument('--snaps-only', action='store_false', dest='publish_windows', help='Skip publishing other artifacts and only publish the snaps') group.add_argument('--windows-only', action='store_false', dest='publish_snaps', help='Skip publishing other artifacts and only publish the Windows installer') return parser.parse_args(args) ","Parse command line arguments. :param args: command line arguments with the program name removed. This is usually taken from sys.argv[1:]. :type args: `list` of `str` :returns: parsed arguments :rtype: argparse.Namespace ",30,102,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parse_args(args): # Use the file's docstring for the help text and don't let argparse reformat it. parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('--css', type=str, required=True, help='hostname of code signing server') group = parser.add_mutually_exclusive_group() # We use 'store_false' and a destination related to the other type of # artifact to cause the flag being set to disable publishing of the other # artifact. This makes using the parsed arguments later on a little simpler # and cleaner. group.add_argument('--snaps-only', action='store_false', dest='publish_windows', help='Skip publishing other artifacts and only publish the snaps') group.add_argument('--windows-only', action='store_false', dest='publish_snaps', help='Skip publishing other artifacts and only publish the Windows installer') return parser.parse_args(args) ``` ###Assistant : Parse command line arguments. :param args: command line arguments with the program name removed. This is usually taken from sys.argv[1:]. :type args: `list` of `str` :returns: parsed arguments :rtype: argparse.Namespace " 468,"def is_conservative(field): # Field is conservative irrespective of frame # Take the first frame in the result of the separate method of Vector if field == Vector(0): return True frame = list(field.separate())[0] return curl(field, frame).simplify() == Vector(0) "," Checks if a field is conservative. Parameters ========== field : Vector The field to check for conservative property Examples ======== >>> from sympy.physics.vector import ReferenceFrame >>> from sympy.physics.vector import is_conservative >>> R = ReferenceFrame('R') >>> is_conservative(R[1]*R[2]*R.x + R[0]*R[2]*R.y + R[0]*R[1]*R.z) True >>> is_conservative(R[2] * R.y) False ",46,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_conservative(field): # Field is conservative irrespective of frame # Take the first frame in the result of the separate method of Vector if field == Vector(0): return True frame = list(field.separate())[0] return curl(field, frame).simplify() == Vector(0) ``` ###Assistant : Checks if a field is conservative. Parameters ========== field : Vector The field to check for conservative property Examples ======== >>> from sympy.physics.vector import ReferenceFrame >>> from sympy.physics.vector import is_conservative >>> R = ReferenceFrame('R') >>> is_conservative(R[1]*R[2]*R.x + R[0]*R[2]*R.y + R[0]*R[1]*R.z) True >>> is_conservative(R[2] * R.y) False " 469,"def fixup_build_ext(cmd): if os.name == 'nt': cmd.debug = sys.executable.endswith('_d.exe') elif sysconfig.get_config_var('Py_ENABLE_SHARED'): # To further add to the shared builds fun on Unix, we can't just add # library_dirs to the Extension() instance because that doesn't get # plumbed through to the final compiler command. runshared = sysconfig.get_config_var('RUNSHARED') if runshared is None: cmd.library_dirs = ['.'] else: if sys.platform == 'darwin': cmd.library_dirs = [] else: name, equals, value = runshared.partition('=') cmd.library_dirs = [d for d in value.split(os.pathsep) if d] ","Function needed to make build_ext tests pass. When Python was built with --enable-shared on Unix, -L. is not enough to find libpython.so, because regrtest runs in a tempdir, not in the source directory where the .so lives. When Python was built with in debug mode on Windows, build_ext commands need their debug attribute set, and it is not done automatically for some reason. This function handles both of these things. Example use: cmd = build_ext(dist) support.fixup_build_ext(cmd) cmd.ensure_finalized() Unlike most other Unix platforms, Mac OS X embeds absolute paths to shared libraries into executables, so the fixup is not needed there. ",100,77,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fixup_build_ext(cmd): if os.name == 'nt': cmd.debug = sys.executable.endswith('_d.exe') elif sysconfig.get_config_var('Py_ENABLE_SHARED'): # To further add to the shared builds fun on Unix, we can't just add # library_dirs to the Extension() instance because that doesn't get # plumbed through to the final compiler command. runshared = sysconfig.get_config_var('RUNSHARED') if runshared is None: cmd.library_dirs = ['.'] else: if sys.platform == 'darwin': cmd.library_dirs = [] else: name, equals, value = runshared.partition('=') cmd.library_dirs = [d for d in value.split(os.pathsep) if d] ``` ###Assistant : Function needed to make build_ext tests pass. When Python was built with --enable-shared on Unix, -L. is not enough to find libpython.so, because regrtest runs in a tempdir, not in the source directory where the .so lives. When Python was built with in debug mode on Windows, build_ext commands need their debug attribute set, and it is not done automatically for some reason. This function handles both of these things. Example use: cmd = build_ext(dist) support.fixup_build_ext(cmd) cmd.ensure_finalized() Unlike most other Unix platforms, Mac OS X embeds absolute paths to shared libraries into executables, so the fixup is not needed there. " 470,"def batch_uses_naming_series(): use_naming_series = cint(frappe.db.get_single_value(""Stock Settings"", ""use_naming_series"")) return bool(use_naming_series) "," Verify if the Batch is to be named using a naming series :return: bool ",14,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def batch_uses_naming_series(): use_naming_series = cint(frappe.db.get_single_value(""Stock Settings"", ""use_naming_series"")) return bool(use_naming_series) ``` ###Assistant : Verify if the Batch is to be named using a naming series :return: bool " 471,"def mixin_http_gateway_parser(parser=None): gp = add_arg_group(parser, title='HTTP Gateway') gp.add_argument( '--title', type=str, help='The title of this HTTP server. It will be used in automatics docs such as Swagger UI.', ) gp.add_argument( '--description', type=str, help='The description of this HTTP server. It will be used in automatics docs such as Swagger UI.', ) gp.add_argument( '--cors', action='store_true', default=False, help=, ) gp.add_argument( '--no-debug-endpoints', action='store_true', default=False, help='If set, `/status` `/post` endpoints are removed from HTTP interface. ', ) gp.add_argument( '--no-crud-endpoints', action='store_true', default=False, help=, ) gp.add_argument( '--expose-endpoints', type=str, help=, ) gp.add_argument( '--uvicorn-kwargs', action=KVAppendAction, metavar='KEY: VALUE', nargs='*', help=, ) gp.add_argument( '--grpc-server-kwargs', action=KVAppendAction, metavar='KEY: VALUE', nargs='*', help=, ) gp.add_argument( '--ssl-certfile', type=str, help=, dest='ssl_certfile', ) gp.add_argument( '--ssl-keyfile', type=str, help=, dest='ssl_keyfile', ) ","Add the options to rest server :param parser: the parser If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access. If set, `/index`, `/search`, `/update`, `/delete` endpoints are removed from HTTP interface. Any executor that has `@requests(on=...)` bind with those values will receive data requests. A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints. Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/ Dictionary of kwargs arguments that will be passed to the grpc server when starting the server # todo update the path to the certificate file the path to the key file ",118,110,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mixin_http_gateway_parser(parser=None): gp = add_arg_group(parser, title='HTTP Gateway') gp.add_argument( '--title', type=str, help='The title of this HTTP server. It will be used in automatics docs such as Swagger UI.', ) gp.add_argument( '--description', type=str, help='The description of this HTTP server. It will be used in automatics docs such as Swagger UI.', ) gp.add_argument( '--cors', action='store_true', default=False, help=, ) gp.add_argument( '--no-debug-endpoints', action='store_true', default=False, help='If set, `/status` `/post` endpoints are removed from HTTP interface. ', ) gp.add_argument( '--no-crud-endpoints', action='store_true', default=False, help=, ) gp.add_argument( '--expose-endpoints', type=str, help=, ) gp.add_argument( '--uvicorn-kwargs', action=KVAppendAction, metavar='KEY: VALUE', nargs='*', help=, ) gp.add_argument( '--grpc-server-kwargs', action=KVAppendAction, metavar='KEY: VALUE', nargs='*', help=, ) gp.add_argument( '--ssl-certfile', type=str, help=, dest='ssl_certfile', ) gp.add_argument( '--ssl-keyfile', type=str, help=, dest='ssl_keyfile', ) ``` ###Assistant : Add the options to rest server :param parser: the parser If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access. If set, `/index`, `/search`, `/update`, `/delete` endpoints are removed from HTTP interface. Any executor that has `@requests(on=...)` bind with those values will receive data requests. A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints. Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/ Dictionary of kwargs arguments that will be passed to the grpc server when starting the server # todo update the path to the certificate file the path to the key file " 472,"def test_image_crafter_index(encoder_doc_array, tmpdir): create_test_img(path=str(tmpdir), file_name='1.jpg') with Flow().add(uses=ImageCrafter) as f: res = f.index(inputs=encoder_doc_array) assert len(res) == 1 doc = res[0] assert doc.mime_type == 'image/jpeg' assert doc.tensor is not None ","In this test, we input one ``DocumentArray`` with one ``Document``, and the `craft` method in the ``ImageCrafter`` returns chunks. In the ``ImageCrafter``, we filtered out all the modalities and only kept `image/jpeg`. So the 2 chunks should left only 1 chunk. And the tensor value of the ``Document`` is not empty once we finished crafting since we converted image uri/datauri to tensor. ",62,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_image_crafter_index(encoder_doc_array, tmpdir): create_test_img(path=str(tmpdir), file_name='1.jpg') with Flow().add(uses=ImageCrafter) as f: res = f.index(inputs=encoder_doc_array) assert len(res) == 1 doc = res[0] assert doc.mime_type == 'image/jpeg' assert doc.tensor is not None ``` ###Assistant : In this test, we input one ``DocumentArray`` with one ``Document``, and the `craft` method in the ``ImageCrafter`` returns chunks. In the ``ImageCrafter``, we filtered out all the modalities and only kept `image/jpeg`. So the 2 chunks should left only 1 chunk. And the tensor value of the ``Document`` is not empty once we finished crafting since we converted image uri/datauri to tensor. " 473,"def find_file(path, saltenv=""base"", **kwargs): actual_saltenv = saltenv if ""env"" in kwargs: # ""env"" is not supported; Use ""saltenv"". kwargs.pop(""env"") path = os.path.normpath(path) fnd = {""path"": """", ""rel"": """"} if os.path.isabs(path): return fnd if saltenv not in __opts__[""file_roots""]: if ""__env__"" in __opts__[""file_roots""]: log.debug( ""salt environment '%s' maps to __env__ file_roots directory"", saltenv ) saltenv = ""__env__"" else: return fnd "," Search the environment for the relative path. ",7,58,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_file(path, saltenv=""base"", **kwargs): actual_saltenv = saltenv if ""env"" in kwargs: # ""env"" is not supported; Use ""saltenv"". kwargs.pop(""env"") path = os.path.normpath(path) fnd = {""path"": """", ""rel"": """"} if os.path.isabs(path): return fnd if saltenv not in __opts__[""file_roots""]: if ""__env__"" in __opts__[""file_roots""]: log.debug( ""salt environment '%s' maps to __env__ file_roots directory"", saltenv ) saltenv = ""__env__"" else: return fnd ``` ###Assistant : Search the environment for the relative path. " 474,"def get_prompt_templates(cls) -> List[PromptTemplate]: return list(cls.prompt_templates.values()) "," Returns the list of supported prompt templates. :return: List of supported prompt templates. ",13,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_prompt_templates(cls) -> List[PromptTemplate]: return list(cls.prompt_templates.values()) ``` ###Assistant : Returns the list of supported prompt templates. :return: List of supported prompt templates. " 475,"def test_views(self, postgres_db): query = for cid, char in [(CID_A, 'a'), (CID_B, 'b')]: self.sql_via_http( query.format(f'test_view_{char}', char), company_id=cid, expected_resp_type=RESPONSE_TYPE.OK ) tables = self.get_tables_in('mindsdb', cid) self.assert_list( tables, { 'models', 'models_versions', f'test_view_{char}' } ) for cid, char in [(CID_A, 'a'), (CID_B, 'b')]: response = self.sql_via_http( f""select * from mindsdb.test_view_{char}"", company_id=cid, expected_resp_type=RESPONSE_TYPE.TABLE ) assert len(response['data']) == 50 response = self.sql_via_http( f""DROP VIEW mindsdb.test_view_{char}"", company_id=cid, expected_resp_type=RESPONSE_TYPE.OK ) tables = self.get_tables_in('mindsdb', cid) self.assert_list( tables, { 'models', 'models_versions' } ) self.sql_via_http( f""select * from mindsdb.test_view_{char}"", company_id=cid, expected_resp_type=RESPONSE_TYPE.ERROR ) "," CREATE VIEW mindsdb.{} FROM test_integration_{} ( select * from rentals limit 50 ) ",13,81,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_views(self, postgres_db): query = for cid, char in [(CID_A, 'a'), (CID_B, 'b')]: self.sql_via_http( query.format(f'test_view_{char}', char), company_id=cid, expected_resp_type=RESPONSE_TYPE.OK ) tables = self.get_tables_in('mindsdb', cid) self.assert_list( tables, { 'models', 'models_versions', f'test_view_{char}' } ) for cid, char in [(CID_A, 'a'), (CID_B, 'b')]: response = self.sql_via_http( f""select * from mindsdb.test_view_{char}"", company_id=cid, expected_resp_type=RESPONSE_TYPE.TABLE ) assert len(response['data']) == 50 response = self.sql_via_http( f""DROP VIEW mindsdb.test_view_{char}"", company_id=cid, expected_resp_type=RESPONSE_TYPE.OK ) tables = self.get_tables_in('mindsdb', cid) self.assert_list( tables, { 'models', 'models_versions' } ) self.sql_via_http( f""select * from mindsdb.test_view_{char}"", company_id=cid, expected_resp_type=RESPONSE_TYPE.ERROR ) ``` ###Assistant : CREATE VIEW mindsdb.{} FROM test_integration_{} ( select * from rentals limit 50 ) " 476,"def convert(filename, cache): path = Path(filename) if not path.exists(): raise IOError(f""{path} does not exist"") if path.suffix[1:] not in converter: import pytest pytest.skip(f""Don't know how to convert {path.suffix} files to png"") newpath = path.parent / f""{path.stem}_{path.suffix[1:]}.png"" # Only convert the file if the destination doesn't already exist or # is out of date. if not newpath.exists() or newpath.stat().st_mtime < path.stat().st_mtime: cache_dir = _get_cache_path() if cache else None if cache_dir is not None: _register_conversion_cache_cleaner_once() hash_value = get_file_hash(path) cached_path = cache_dir / (hash_value + newpath.suffix) if cached_path.exists(): _log.debug(""For %s: reusing cached conversion."", filename) shutil.copyfile(cached_path, newpath) return str(newpath) _log.debug(""For %s: converting to png."", filename) convert = converter[path.suffix[1:]] if path.suffix == "".svg"": contents = path.read_text() if 'style=""font:' in contents: # for svg.fonttype = none, we explicitly patch the font search # path so that fonts shipped by Matplotlib are found. convert = _svg_with_matplotlib_fonts_converter convert(path, newpath) if cache_dir is not None: _log.debug(""For %s: caching conversion result."", filename) shutil.copyfile(newpath, cached_path) return str(newpath) "," Convert the named file to png; return the name of the created file. If *cache* is True, the result of the conversion is cached in `matplotlib.get_cachedir() + '/test_cache/'`. The caching is based on a hash of the exact contents of the input file. Old cache entries are automatically deleted as needed to keep the size of the cache capped to twice the size of all baseline images. ",67,155,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def convert(filename, cache): path = Path(filename) if not path.exists(): raise IOError(f""{path} does not exist"") if path.suffix[1:] not in converter: import pytest pytest.skip(f""Don't know how to convert {path.suffix} files to png"") newpath = path.parent / f""{path.stem}_{path.suffix[1:]}.png"" # Only convert the file if the destination doesn't already exist or # is out of date. if not newpath.exists() or newpath.stat().st_mtime < path.stat().st_mtime: cache_dir = _get_cache_path() if cache else None if cache_dir is not None: _register_conversion_cache_cleaner_once() hash_value = get_file_hash(path) cached_path = cache_dir / (hash_value + newpath.suffix) if cached_path.exists(): _log.debug(""For %s: reusing cached conversion."", filename) shutil.copyfile(cached_path, newpath) return str(newpath) _log.debug(""For %s: converting to png."", filename) convert = converter[path.suffix[1:]] if path.suffix == "".svg"": contents = path.read_text() if 'style=""font:' in contents: # for svg.fonttype = none, we explicitly patch the font search # path so that fonts shipped by Matplotlib are found. convert = _svg_with_matplotlib_fonts_converter convert(path, newpath) if cache_dir is not None: _log.debug(""For %s: caching conversion result."", filename) shutil.copyfile(newpath, cached_path) return str(newpath) ``` ###Assistant : Convert the named file to png; return the name of the created file. If *cache* is True, the result of the conversion is cached in `matplotlib.get_cachedir() + '/test_cache/'`. The caching is based on a hash of the exact contents of the input file. Old cache entries are automatically deleted as needed to keep the size of the cache capped to twice the size of all baseline images. " 477,"def resize(self, size, resample=None, box=None, reducing_gap=None): if resample is None: type_special = "";"" in self.mode resample = Resampling.NEAREST if type_special else Resampling.BICUBIC elif resample not in ( Resampling.NEAREST, Resampling.BILINEAR, Resampling.BICUBIC, Resampling.LANCZOS, Resampling.BOX, Resampling.HAMMING, ): message = f""Unknown resampling filter ({resample})."" filters = [ f""{filter[1]} ({filter[0]})"" for filter in ( (Resampling.NEAREST, ""Image.Resampling.NEAREST""), (Resampling.LANCZOS, ""Image.Resampling.LANCZOS""), (Resampling.BILINEAR, ""Image.Resampling.BILINEAR""), (Resampling.BICUBIC, ""Image.Resampling.BICUBIC""), (Resampling.BOX, ""Image.Resampling.BOX""), (Resampling.HAMMING, ""Image.Resampling.HAMMING""), ) ] raise ValueError( message + "" Use "" + "", "".join(filters[:-1]) + "" or "" + filters[-1] ) if reducing_gap is not None and reducing_gap < 1.0: raise ValueError(""reducing_gap must be 1.0 or greater"") size = tuple(size) if box is None: box = (0, 0) + self.size else: box = tuple(box) if self.size == size and box == (0, 0) + self.size: return self.copy() if self.mode in (""1"", ""P""): resample = Resampling.NEAREST if self.mode in [""LA"", ""RGBA""] and resample != Resampling.NEAREST: im = self.convert({""LA"": ""La"", ""RGBA"": ""RGBa""}[self.mode]) im = im.resize(size, resample, box) return im.convert(self.mode) self.load() if reducing_gap is not None and resample != Resampling.NEAREST: factor_x = int((box[2] - box[0]) / size[0] / reducing_gap) or 1 factor_y = int((box[3] - box[1]) / size[1] / reducing_gap) or 1 if factor_x > 1 or factor_y > 1: reduce_box = self._get_safe_box(size, resample, box) factor = (factor_x, factor_y) if callable(self.reduce): self = self.reduce(factor, box=reduce_box) else: self = Image.reduce(self, factor, box=reduce_box) box = ( (box[0] - reduce_box[0]) / factor_x, (box[1] - reduce_box[1]) / factor_y, (box[2] - reduce_box[0]) / factor_x, (box[3] - reduce_box[1]) / factor_y, ) return self._new(self.im.resize(size, resample, box)) "," Returns a resized copy of this image. :param size: The requested size in pixels, as a 2-tuple: (width, height). :param resample: An optional resampling filter. This can be one of :py:data:`PIL.Image.Resampling.NEAREST`, :py:data:`PIL.Image.Resampling.BOX`, :py:data:`PIL.Image.Resampling.BILINEAR`, :py:data:`PIL.Image.Resampling.HAMMING`, :py:data:`PIL.Image.Resampling.BICUBIC` or :py:data:`PIL.Image.Resampling.LANCZOS`. If the image has mode ""1"" or ""P"", it is always set to :py:data:`PIL.Image.Resampling.NEAREST`. If the image mode specifies a number of bits, such as ""I;16"", then the default filter is :py:data:`PIL.Image.Resampling.NEAREST`. Otherwise, the default filter is :py:data:`PIL.Image.Resampling.BICUBIC`. See: :ref:`concept-filters`. :param box: An optional 4-tuple of floats providing the source image region to be scaled. The values must be within (0, 0, width, height) rectangle. If omitted or None, the entire source is used. :param reducing_gap: Apply optimization by resizing the image in two steps. First, reducing the image by integer times using :py:meth:`~PIL.Image.Image.reduce`. Second, resizing using regular resampling. The last step changes size no less than by ``reducing_gap`` times. ``reducing_gap`` may be None (no first step is performed) or should be greater than 1.0. The bigger ``reducing_gap``, the closer the result to the fair resampling. The smaller ``reducing_gap``, the faster resizing. With ``reducing_gap`` greater or equal to 3.0, the result is indistinguishable from fair resampling in most cases. The default value is None (no optimization). :returns: An :py:class:`~PIL.Image.Image` object. ",207,245,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def resize(self, size, resample=None, box=None, reducing_gap=None): if resample is None: type_special = "";"" in self.mode resample = Resampling.NEAREST if type_special else Resampling.BICUBIC elif resample not in ( Resampling.NEAREST, Resampling.BILINEAR, Resampling.BICUBIC, Resampling.LANCZOS, Resampling.BOX, Resampling.HAMMING, ): message = f""Unknown resampling filter ({resample})."" filters = [ f""{filter[1]} ({filter[0]})"" for filter in ( (Resampling.NEAREST, ""Image.Resampling.NEAREST""), (Resampling.LANCZOS, ""Image.Resampling.LANCZOS""), (Resampling.BILINEAR, ""Image.Resampling.BILINEAR""), (Resampling.BICUBIC, ""Image.Resampling.BICUBIC""), (Resampling.BOX, ""Image.Resampling.BOX""), (Resampling.HAMMING, ""Image.Resampling.HAMMING""), ) ] raise ValueError( message + "" Use "" + "", "".join(filters[:-1]) + "" or "" + filters[-1] ) if reducing_gap is not None and reducing_gap < 1.0: raise ValueError(""reducing_gap must be 1.0 or greater"") size = tuple(size) if box is None: box = (0, 0) + self.size else: box = tuple(box) if self.size == size and box == (0, 0) + self.size: return self.copy() if self.mode in (""1"", ""P""): resample = Resampling.NEAREST if self.mode in [""LA"", ""RGBA""] and resample != Resampling.NEAREST: im = self.convert({""LA"": ""La"", ""RGBA"": ""RGBa""}[self.mode]) im = im.resize(size, resample, box) return im.convert(self.mode) self.load() if reducing_gap is not None and resample != Resampling.NEAREST: factor_x = int((box[2] - box[0]) / size[0] / reducing_gap) or 1 factor_y = int((box[3] - box[1]) / size[1] / reducing_gap) or 1 if factor_x > 1 or factor_y > 1: reduce_box = self._get_safe_box(size, resample, box) factor = (factor_x, factor_y) if callable(self.reduce): self = self.reduce(factor, box=reduce_box) else: self = Image.reduce(self, factor, box=reduce_box) box = ( (box[0] - reduce_box[0]) / factor_x, (box[1] - reduce_box[1]) / factor_y, (box[2] - reduce_box[0]) / factor_x, (box[3] - reduce_box[1]) / factor_y, ) return self._new(self.im.resize(size, resample, box)) ``` ###Assistant : Returns a resized copy of this image. :param size: The requested size in pixels, as a 2-tuple: (width, height). :param resample: An optional resampling filter. This can be one of :py:data:`PIL.Image.Resampling.NEAREST`, :py:data:`PIL.Image.Resampling.BOX`, :py:data:`PIL.Image.Resampling.BILINEAR`, :py:data:`PIL.Image.Resampling.HAMMING`, :py:data:`PIL.Image.Resampling.BICUBIC` or :py:data:`PIL.Image.Resampling.LANCZOS`. If the image has mode ""1"" or ""P"", it is always set to :py:data:`PIL.Image.Resampling.NEAREST`. If the image mode specifies a number of bits, such as ""I;16"", then the default filter is :py:data:`PIL.Image.Resampling.NEAREST`. Otherwise, the default filter is :py:data:`PIL.Image.Resampling.BICUBIC`. See: :ref:`concept-filters`. :param box: An optional 4-tuple of floats providing the source image region to be scaled. The values must be within (0, 0, width, height) rectangle. If omitted or None, the entire source is used. :param reducing_gap: Apply optimization by resizing the image in two steps. First, reducing the image by integer times using :py:meth:`~PIL.Image.Image.reduce`. Second, resizing using regular resampling. The last step changes size no less than by ``reducing_gap`` times. ``reducing_gap`` may be None (no first step is performed) or should be greater than 1.0. The bigger ``reducing_gap``, the closer the result to the fair resampling. The smaller ``reducing_gap``, the faster resizing. With ``reducing_gap`` greater or equal to 3.0, the result is indistinguishable from fair resampling in most cases. The default value is None (no optimization). :returns: An :py:class:`~PIL.Image.Image` object. " 478,"def test_unrecognized_key(self) -> None: yaml_str = output_error = self.get_errors_from_gen_backend_stubs(yaml_str) self.assertExpectedInline(output_error, ) # noqa: B950 # if use_out_as_primary is provided, it must be a bool","\ backend: XLA cpp_namespace: torch_xla supported: - abs invalid_key: invalid_val contains unexpected keys: invalid_key. Only the following keys are supported: backend, cpp_namespace, extra_headers, supported, autograd, full_codegen",26,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_unrecognized_key(self) -> None: yaml_str = output_error = self.get_errors_from_gen_backend_stubs(yaml_str) self.assertExpectedInline(output_error, ) # noqa: B950 # if use_out_as_primary is provided, it must be a bool ``` ###Assistant : \ backend: XLA cpp_namespace: torch_xla supported: - abs invalid_key: invalid_val contains unexpected keys: invalid_key. Only the following keys are supported: backend, cpp_namespace, extra_headers, supported, autograd, full_codegen " 479,"def f2cexpr(expr): # TODO: support Fortran `len` function with optional kind parameter expr = re.sub(r'\blen\b', 'f2py_slen', expr) return expr ","Rewrite Fortran expression as f2py supported C expression. Due to the lack of a proper expression parser in f2py, this function uses a heuristic approach that assumes that Fortran arithmetic expressions are valid C arithmetic expressions when mapping Fortran function calls to the corresponding C function/CPP macros calls. ",48,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def f2cexpr(expr): # TODO: support Fortran `len` function with optional kind parameter expr = re.sub(r'\blen\b', 'f2py_slen', expr) return expr ``` ###Assistant : Rewrite Fortran expression as f2py supported C expression. Due to the lack of a proper expression parser in f2py, this function uses a heuristic approach that assumes that Fortran arithmetic expressions are valid C arithmetic expressions when mapping Fortran function calls to the corresponding C function/CPP macros calls. " 480,"def build_args(self, category, command=None, generate=False): logger.debug(""Build cli arguments: (category: %s, command: %s, generate: %s)"", category, command, generate) command = self.command if not command else command script = f""{category}.py"" pathexecscript = os.path.join(self.pathscript, script) args = [sys.executable] if generate else [sys.executable, ""-u""] args.extend([pathexecscript, command]) cli_opts = get_config().cli_opts for cliopt in cli_opts.gen_cli_arguments(command): args.extend(cliopt) if command == ""train"" and not generate: self._get_training_session_info(cliopt) if not generate: args.append(""-gui"") # Indicate to Faceswap that we are running the GUI if generate: # Delimit args with spaces args = [f'""{arg}""' if "" "" in arg and not arg.startswith((""["", ""("")) and not arg.endswith((""]"", "")"")) else arg for arg in args] logger.debug(""Built cli arguments: (%s)"", args) return args "," Build the faceswap command and arguments list. If training, pass the model folder and name to the training :class:`lib.gui.analysis.Session` for the GUI. ",22,108,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def build_args(self, category, command=None, generate=False): logger.debug(""Build cli arguments: (category: %s, command: %s, generate: %s)"", category, command, generate) command = self.command if not command else command script = f""{category}.py"" pathexecscript = os.path.join(self.pathscript, script) args = [sys.executable] if generate else [sys.executable, ""-u""] args.extend([pathexecscript, command]) cli_opts = get_config().cli_opts for cliopt in cli_opts.gen_cli_arguments(command): args.extend(cliopt) if command == ""train"" and not generate: self._get_training_session_info(cliopt) if not generate: args.append(""-gui"") # Indicate to Faceswap that we are running the GUI if generate: # Delimit args with spaces args = [f'""{arg}""' if "" "" in arg and not arg.startswith((""["", ""("")) and not arg.endswith((""]"", "")"")) else arg for arg in args] logger.debug(""Built cli arguments: (%s)"", args) return args ``` ###Assistant : Build the faceswap command and arguments list. If training, pass the model folder and name to the training :class:`lib.gui.analysis.Session` for the GUI. " 481,"def describe_v_switches(self, vpc_id=None): request = DescribeVSwitchesRequest() if vpc_id is not None: request.set_VpcId(vpc_id) response = self._send_request(request) if response is not None: return response.get(""VSwitches"").get(""VSwitch"") else: logging.error(""Describe VSwitches Failed."") return None ","Queries one or more VSwitches. :param vpc_id: The ID of the VPC to which the VSwitch belongs. :return: VSwitch list. ",20,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def describe_v_switches(self, vpc_id=None): request = DescribeVSwitchesRequest() if vpc_id is not None: request.set_VpcId(vpc_id) response = self._send_request(request) if response is not None: return response.get(""VSwitches"").get(""VSwitch"") else: logging.error(""Describe VSwitches Failed."") return None ``` ###Assistant : Queries one or more VSwitches. :param vpc_id: The ID of the VPC to which the VSwitch belongs. :return: VSwitch list. " 482,"def line(self, Y, X=None, win=None, env=None, opts=None, update=None, name=None): if update is not None: if update == ""remove"": return self.scatter( X=None, Y=None, opts=opts, win=win, env=env, update=update, name=name, ) else: assert X is not None, ""must specify x-values for line update"" assert Y.ndim == 1 or Y.ndim == 2, ""Y should have 1 or 2 dim"" assert Y.shape[-1] > 0, ""must plot one line at least"" if X is not None: assert X.ndim == 1 or X.ndim == 2, ""X should have 1 or 2 dim"" else: X = np.linspace(0, 1, Y.shape[0]) if Y.ndim == 2 and X.ndim == 1: X = np.tile(X, (Y.shape[1], 1)).transpose() assert X.shape == Y.shape, ""X and Y should be the same shape"" opts = {} if opts is None else opts opts[""markers""] = opts.get(""markers"", False) opts[""fillarea""] = opts.get(""fillarea"", False) opts[""mode""] = ""lines+markers"" if opts.get(""markers"") else ""lines"" _title2str(opts) _assert_opts(opts) if Y.ndim == 1: linedata = np.column_stack((X, Y)) else: linedata = np.column_stack((X.ravel(order=""F""), Y.ravel(order=""F""))) labels = None if Y.ndim == 2: labels = np.arange(1, Y.shape[1] + 1) labels = np.tile(labels, (Y.shape[0], 1)).ravel(order=""F"") return self.scatter( X=linedata, Y=labels, opts=opts, win=win, env=env, update=update, name=name ) "," This function draws a line plot. It takes in an `N` or `NxM` tensor `Y` that specifies the values of the `M` lines (that connect `N` points) to plot. It also takes an optional `X` tensor that specifies the corresponding x-axis values; `X` can be an `N` tensor (in which case all lines will share the same x-axis values) or have the same size as `Y`. `update` can be used to efficiently update the data of an existing line. Use 'append' to append data, 'replace' to use new data, and 'remove' to delete the trace that is specified in `name`. If updating a single trace, use `name` to specify the name of the trace to be updated. Update data that is all NaN is ignored (can be used for masking update). Using `update='append'` will create a plot if it doesn't exist and append to the existing plot otherwise. The following `opts` are supported: - `opts.fillarea` : fill area below line (`boolean`) - `opts.markers` : show markers (`boolean`; default = `false`) - `opts.markersymbol`: marker symbol (`string`; default = `'dot'`) - `opts.markersize` : marker size (`number`; default = `'10'`) - `opts.linecolor` : line colors (`np.array`; default = None) - `opts.dash` : line dash type (`np.array`; default = None) - `opts.legend` : `list` or `tuple` containing legend names If `update` is specified, the figure will be updated without creating a new plot -- this can be used for efficient updating. ",237,183,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def line(self, Y, X=None, win=None, env=None, opts=None, update=None, name=None): if update is not None: if update == ""remove"": return self.scatter( X=None, Y=None, opts=opts, win=win, env=env, update=update, name=name, ) else: assert X is not None, ""must specify x-values for line update"" assert Y.ndim == 1 or Y.ndim == 2, ""Y should have 1 or 2 dim"" assert Y.shape[-1] > 0, ""must plot one line at least"" if X is not None: assert X.ndim == 1 or X.ndim == 2, ""X should have 1 or 2 dim"" else: X = np.linspace(0, 1, Y.shape[0]) if Y.ndim == 2 and X.ndim == 1: X = np.tile(X, (Y.shape[1], 1)).transpose() assert X.shape == Y.shape, ""X and Y should be the same shape"" opts = {} if opts is None else opts opts[""markers""] = opts.get(""markers"", False) opts[""fillarea""] = opts.get(""fillarea"", False) opts[""mode""] = ""lines+markers"" if opts.get(""markers"") else ""lines"" _title2str(opts) _assert_opts(opts) if Y.ndim == 1: linedata = np.column_stack((X, Y)) else: linedata = np.column_stack((X.ravel(order=""F""), Y.ravel(order=""F""))) labels = None if Y.ndim == 2: labels = np.arange(1, Y.shape[1] + 1) labels = np.tile(labels, (Y.shape[0], 1)).ravel(order=""F"") return self.scatter( X=linedata, Y=labels, opts=opts, win=win, env=env, update=update, name=name ) ``` ###Assistant : This function draws a line plot. It takes in an `N` or `NxM` tensor `Y` that specifies the values of the `M` lines (that connect `N` points) to plot. It also takes an optional `X` tensor that specifies the corresponding x-axis values; `X` can be an `N` tensor (in which case all lines will share the same x-axis values) or have the same size as `Y`. `update` can be used to efficiently update the data of an existing line. Use 'append' to append data, 'replace' to use new data, and 'remove' to delete the trace that is specified in `name`. If updating a single trace, use `name` to specify the name of the trace to be updated. Update data that is all NaN is ignored (can be used for masking update). Using `update='append'` will create a plot if it doesn't exist and append to the existing plot otherwise. The following `opts` are supported: - `opts.fillarea` : fill area below line (`boolean`) - `opts.markers` : show markers (`boolean`; default = `false`) - `opts.markersymbol`: marker symbol (`string`; default = `'dot'`) - `opts.markersize` : marker size (`number`; default = `'10'`) - `opts.linecolor` : line colors (`np.array`; default = None) - `opts.dash` : line dash type (`np.array`; default = None) - `opts.legend` : `list` or `tuple` containing legend names If `update` is specified, the figure will be updated without creating a new plot -- this can be used for efficient updating. " 483,"def test_delete_with_id(self, db_mock_class): op = DatabricksReposDeleteOperator(task_id=TASK_ID, repo_id=""123"") db_mock = db_mock_class.return_value db_mock.delete_repo.return_value = None op.execute(None) db_mock_class.assert_called_once_with( DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay ) db_mock.delete_repo.assert_called_once_with('123') "," Test the execute function using Repo ID. ",7,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_delete_with_id(self, db_mock_class): op = DatabricksReposDeleteOperator(task_id=TASK_ID, repo_id=""123"") db_mock = db_mock_class.return_value db_mock.delete_repo.return_value = None op.execute(None) db_mock_class.assert_called_once_with( DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay ) db_mock.delete_repo.assert_called_once_with('123') ``` ###Assistant : Test the execute function using Repo ID. " 484,"def test_user_rate_reached(self): for i in range(5): group = self.store_event( data={ ""fingerprint"": [""group1""], ""timestamp"": iso_format(before_now(minutes=5 + i)), ""tags"": {""sentry:user"": i}, }, project_id=self.project.id, ).group snooze = GroupSnooze.objects.create(group=group, user_count=5, user_window=60) assert not snooze.is_valid(test_rates=True) ",Test that ignoring an error issue until it's hit by 10 users in an hour works.,16,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_user_rate_reached(self): for i in range(5): group = self.store_event( data={ ""fingerprint"": [""group1""], ""timestamp"": iso_format(before_now(minutes=5 + i)), ""tags"": {""sentry:user"": i}, }, project_id=self.project.id, ).group snooze = GroupSnooze.objects.create(group=group, user_count=5, user_window=60) assert not snooze.is_valid(test_rates=True) ``` ###Assistant : Test that ignoring an error issue until it's hit by 10 users in an hour works. " 485,"def make_zipfile(base_name, base_dir, verbose=0, dry_run=0): zip_filename = base_name + "".zip"" mkpath(os.path.dirname(zip_filename), dry_run=dry_run) # If zipfile module is not available, try spawning an external # 'zip' command. if zipfile is None: if verbose: zipoptions = ""-r"" else: zipoptions = ""-rq"" try: spawn([""zip"", zipoptions, zip_filename, base_dir], dry_run=dry_run) except DistutilsExecError: # XXX really should distinguish between ""couldn't find # external 'zip' command"" and ""zip failed"". raise DistutilsExecError((""unable to create zip file '%s': "" ""could neither import the 'zipfile' module nor "" ""find a standalone zip utility"") % zip_filename) else: log.info(""creating '%s' and adding '%s' to it"", zip_filename, base_dir) if not dry_run: try: zip = zipfile.ZipFile(zip_filename, ""w"", compression=zipfile.ZIP_DEFLATED) except RuntimeError: zip = zipfile.ZipFile(zip_filename, ""w"", compression=zipfile.ZIP_STORED) with zip: if base_dir != os.curdir: path = os.path.normpath(os.path.join(base_dir, '')) zip.write(path, path) log.info(""adding '%s'"", path) for dirpath, dirnames, filenames in os.walk(base_dir): for name in dirnames: path = os.path.normpath(os.path.join(dirpath, name, '')) zip.write(path, path) log.info(""adding '%s'"", path) for name in filenames: path = os.path.normpath(os.path.join(dirpath, name)) if os.path.isfile(path): zip.write(path, path) log.info(""adding '%s'"", path) return zip_filename ARCHIVE_FORMATS = { 'gztar': (make_tarball, [('compress', 'gzip')], ""gzip'ed tar-file""), 'bztar': (make_tarball, [('compress', 'bzip2')], ""bzip2'ed tar-file""), 'xztar': (make_tarball, [('compress', 'xz')], ""xz'ed tar-file""), 'ztar': (make_tarball, [('compress', 'compress')], ""compressed tar file""), 'tar': (make_tarball, [('compress', None)], ""uncompressed tar file""), 'zip': (make_zipfile, [],""ZIP file"") } ","Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_name' + "".zip"". Uses either the ""zipfile"" Python module (if available) or the InfoZIP ""zip"" utility (if installed and found on the default search path). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip file. ",57,203,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def make_zipfile(base_name, base_dir, verbose=0, dry_run=0): zip_filename = base_name + "".zip"" mkpath(os.path.dirname(zip_filename), dry_run=dry_run) # If zipfile module is not available, try spawning an external # 'zip' command. if zipfile is None: if verbose: zipoptions = ""-r"" else: zipoptions = ""-rq"" try: spawn([""zip"", zipoptions, zip_filename, base_dir], dry_run=dry_run) except DistutilsExecError: # XXX really should distinguish between ""couldn't find # external 'zip' command"" and ""zip failed"". raise DistutilsExecError((""unable to create zip file '%s': "" ""could neither import the 'zipfile' module nor "" ""find a standalone zip utility"") % zip_filename) else: log.info(""creating '%s' and adding '%s' to it"", zip_filename, base_dir) if not dry_run: try: zip = zipfile.ZipFile(zip_filename, ""w"", compression=zipfile.ZIP_DEFLATED) except RuntimeError: zip = zipfile.ZipFile(zip_filename, ""w"", compression=zipfile.ZIP_STORED) with zip: if base_dir != os.curdir: path = os.path.normpath(os.path.join(base_dir, '')) zip.write(path, path) log.info(""adding '%s'"", path) for dirpath, dirnames, filenames in os.walk(base_dir): for name in dirnames: path = os.path.normpath(os.path.join(dirpath, name, '')) zip.write(path, path) log.info(""adding '%s'"", path) for name in filenames: path = os.path.normpath(os.path.join(dirpath, name)) if os.path.isfile(path): zip.write(path, path) log.info(""adding '%s'"", path) return zip_filename ARCHIVE_FORMATS = { 'gztar': (make_tarball, [('compress', 'gzip')], ""gzip'ed tar-file""), 'bztar': (make_tarball, [('compress', 'bzip2')], ""bzip2'ed tar-file""), 'xztar': (make_tarball, [('compress', 'xz')], ""xz'ed tar-file""), 'ztar': (make_tarball, [('compress', 'compress')], ""compressed tar file""), 'tar': (make_tarball, [('compress', None)], ""uncompressed tar file""), 'zip': (make_zipfile, [],""ZIP file"") } ``` ###Assistant : Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_name' + "".zip"". Uses either the ""zipfile"" Python module (if available) or the InfoZIP ""zip"" utility (if installed and found on the default search path). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip file. " 486,"def test_power_levels_user_default(self): # The other user doesn't have the proper power level. channel = self._upgrade_room(self.other_token) self.assertEqual(403, channel.code, channel.result) # Increase the power levels so that this user can upgrade. power_levels = self.helper.get_state( self.room_id, ""m.room.power_levels"", tok=self.creator_token, ) power_levels[""users_default""] = 100 self.helper.send_state( self.room_id, ""m.room.power_levels"", body=power_levels, tok=self.creator_token, ) # The upgrade should succeed! channel = self._upgrade_room(self.other_token) self.assertEqual(200, channel.code, channel.result) "," Another user can upgrade the room if the default power level for users is increased. ",15,56,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_power_levels_user_default(self): # The other user doesn't have the proper power level. channel = self._upgrade_room(self.other_token) self.assertEqual(403, channel.code, channel.result) # Increase the power levels so that this user can upgrade. power_levels = self.helper.get_state( self.room_id, ""m.room.power_levels"", tok=self.creator_token, ) power_levels[""users_default""] = 100 self.helper.send_state( self.room_id, ""m.room.power_levels"", body=power_levels, tok=self.creator_token, ) # The upgrade should succeed! channel = self._upgrade_room(self.other_token) self.assertEqual(200, channel.code, channel.result) ``` ###Assistant : Another user can upgrade the room if the default power level for users is increased. " 487,"def get_exempt_total(filters): conditions = get_conditions(filters) try: return ( frappe.db.sql( .format( where_conditions=conditions ), filters, )[0][0] or 0 ) except (IndexError, TypeError): return 0 ","Returns the sum of each Sales Invoice Item Amount which is Vat Exempt. select sum(i.base_amount) as total from `tabSales Invoice Item` i inner join `tabSales Invoice` s on i.parent = s.name where s.docstatus = 1 and i.is_exempt = 1 {where_conditions} ; ",41,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_exempt_total(filters): conditions = get_conditions(filters) try: return ( frappe.db.sql( .format( where_conditions=conditions ), filters, )[0][0] or 0 ) except (IndexError, TypeError): return 0 ``` ###Assistant : Returns the sum of each Sales Invoice Item Amount which is Vat Exempt. select sum(i.base_amount) as total from `tabSales Invoice Item` i inner join `tabSales Invoice` s on i.parent = s.name where s.docstatus = 1 and i.is_exempt = 1 {where_conditions} ; " 488,"def encrypt(self, key, text, confounder=None, key_usage_number=None): if key_usage_number is None: key_usage_number = self.get_usage()[0] self.cipher = key.encrypt(key_usage_number, text, confounder=confounder) EncryptionKey = lambda **kwargs: ASN1F_SEQUENCE( Int32(""keytype"", 0, explicit_tag=0x0), ASN1F_STRING(""keyvalue"", """", explicit_tag=0x1), **kwargs ) KerberosFlags = ASN1F_FLAGS _PADATA_TYPES = { 1: ""PA-TGS-REQ"", 2: ""PA-ENC-TIMESTAMP"", 3: ""PA-PW-SALT"", 11: ""PA-ETYPE-INFO"", 14: ""PA-PK-AS-REQ-OLD"", 15: ""PA-PK-AS-REP-OLD"", 16: ""PA-PK-AS-REQ"", 17: ""PA-PK-AS-REP"", 19: ""PA-ETYPE-INFO2"", 20: ""PA-SVR-REFERRAL-INFO"", 128: ""PA-PAC-REQUEST"", 133: ""PA-FX-COOKIE"", 134: ""PA-AUTHENTICATION-SET"", 135: ""PA-AUTH-SET-SELECTED"", 136: ""PA-FX-FAST"", 137: ""PA-FX-ERROR"", 165: ""PA-SUPPORTED-ENCTYPES"", 167: ""PA-PAC-OPTIONS"", } _PADATA_CLASSES = { # Filled elsewhere in this file } # RFC4120 "," Encrypt text and set it into cipher. :param key: the key to use for encryption :param text: the bytes value to encode :param confounder: (optional) specify the confounder bytes. Random otherwise :param key_usage_number: (optional) specify the key usage number. Guessed otherwise ",41,86,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def encrypt(self, key, text, confounder=None, key_usage_number=None): if key_usage_number is None: key_usage_number = self.get_usage()[0] self.cipher = key.encrypt(key_usage_number, text, confounder=confounder) EncryptionKey = lambda **kwargs: ASN1F_SEQUENCE( Int32(""keytype"", 0, explicit_tag=0x0), ASN1F_STRING(""keyvalue"", """", explicit_tag=0x1), **kwargs ) KerberosFlags = ASN1F_FLAGS _PADATA_TYPES = { 1: ""PA-TGS-REQ"", 2: ""PA-ENC-TIMESTAMP"", 3: ""PA-PW-SALT"", 11: ""PA-ETYPE-INFO"", 14: ""PA-PK-AS-REQ-OLD"", 15: ""PA-PK-AS-REP-OLD"", 16: ""PA-PK-AS-REQ"", 17: ""PA-PK-AS-REP"", 19: ""PA-ETYPE-INFO2"", 20: ""PA-SVR-REFERRAL-INFO"", 128: ""PA-PAC-REQUEST"", 133: ""PA-FX-COOKIE"", 134: ""PA-AUTHENTICATION-SET"", 135: ""PA-AUTH-SET-SELECTED"", 136: ""PA-FX-FAST"", 137: ""PA-FX-ERROR"", 165: ""PA-SUPPORTED-ENCTYPES"", 167: ""PA-PAC-OPTIONS"", } _PADATA_CLASSES = { # Filled elsewhere in this file } # RFC4120 ``` ###Assistant : Encrypt text and set it into cipher. :param key: the key to use for encryption :param text: the bytes value to encode :param confounder: (optional) specify the confounder bytes. Random otherwise :param key_usage_number: (optional) specify the key usage number. Guessed otherwise " 489,"def create_v_switch(self, vpc_id, zone_id, cidr_block): request = CreateVSwitchRequest() request.set_ZoneId(zone_id) request.set_VpcId(vpc_id) request.set_CidrBlock(cidr_block) response = self._send_request(request) if response is not None: return response.get(""VSwitchId"") else: logging.error(""create_v_switch vpc_id %s failed."", vpc_id) return None ","Create vSwitches to divide the VPC into one or more subnets :param vpc_id: The ID of the VPC to which the VSwitch belongs. :param zone_id: The ID of the zone to which the target VSwitch belongs. :param cidr_block: The CIDR block of the VSwitch. :return: ",45,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def create_v_switch(self, vpc_id, zone_id, cidr_block): request = CreateVSwitchRequest() request.set_ZoneId(zone_id) request.set_VpcId(vpc_id) request.set_CidrBlock(cidr_block) response = self._send_request(request) if response is not None: return response.get(""VSwitchId"") else: logging.error(""create_v_switch vpc_id %s failed."", vpc_id) return None ``` ###Assistant : Create vSwitches to divide the VPC into one or more subnets :param vpc_id: The ID of the VPC to which the VSwitch belongs. :param zone_id: The ID of the zone to which the target VSwitch belongs. :param cidr_block: The CIDR block of the VSwitch. :return: " 490,"def transparent_background(self) -> bool: return self.bgcolor is None or self.bgcolor.is_default ",Check if the style specified a transparent background.,8,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def transparent_background(self) -> bool: return self.bgcolor is None or self.bgcolor.is_default ``` ###Assistant : Check if the style specified a transparent background. " 491,"def isPerfectNumber(number): # precondition assert isinstance(number, int) and ( number > 1 ), ""'number' must been an int and >= 1"" divisors = getDivisors(number) # precondition assert ( isinstance(divisors, list) and (divisors[0] == 1) and (divisors[len(divisors) - 1] == number) ), ""Error in help-function getDivisiors(...)"" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1]) == number # ------------------------------------------------------------ "," input: positive integer 'number' > 1 returns true if 'number' is a perfect number otherwise false. ",16,61,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def isPerfectNumber(number): # precondition assert isinstance(number, int) and ( number > 1 ), ""'number' must been an int and >= 1"" divisors = getDivisors(number) # precondition assert ( isinstance(divisors, list) and (divisors[0] == 1) and (divisors[len(divisors) - 1] == number) ), ""Error in help-function getDivisiors(...)"" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1]) == number # ------------------------------------------------------------ ``` ###Assistant : input: positive integer 'number' > 1 returns true if 'number' is a perfect number otherwise false. " 492,"def _get_cmap(name=None, lut=None): if name is None: name = mpl.rcParams['image.cmap'] if isinstance(name, colors.Colormap): return name _api.check_in_list(sorted(_colormaps), name=name) if lut is None: return _colormaps[name] else: return _colormaps[name].resampled(lut) # do it in two steps like this so we can have an un-deprecated version in # pyplot. get_cmap = _api.deprecated( '3.6', pending=True, alternative=""``matplotlib.colormaps[name]``"" )(_get_cmap) @_api.deprecated( '3.6', pending=True, alternative=""``matplotlib.colormaps.unregister_cmap(name)``"" )"," Get a colormap instance, defaulting to rc values if *name* is None. Colormaps added with :func:`register_cmap` take precedence over built-in colormaps. Parameters ---------- name : `matplotlib.colors.Colormap` or str or None, default: None If a `.Colormap` instance, it will be returned. Otherwise, the name of a colormap known to Matplotlib, which will be resampled by *lut*. The default, None, means :rc:`image.cmap`. lut : int or None, default: None If *name* is not already a Colormap instance and *lut* is not None, the colormap will be resampled to have *lut* entries in the lookup table. Returns ------- Colormap ",96,56,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_cmap(name=None, lut=None): if name is None: name = mpl.rcParams['image.cmap'] if isinstance(name, colors.Colormap): return name _api.check_in_list(sorted(_colormaps), name=name) if lut is None: return _colormaps[name] else: return _colormaps[name].resampled(lut) # do it in two steps like this so we can have an un-deprecated version in # pyplot. get_cmap = _api.deprecated( '3.6', pending=True, alternative=""``matplotlib.colormaps[name]``"" )(_get_cmap) @_api.deprecated( '3.6', pending=True, alternative=""``matplotlib.colormaps.unregister_cmap(name)``"" ) ``` ###Assistant : Get a colormap instance, defaulting to rc values if *name* is None. Colormaps added with :func:`register_cmap` take precedence over built-in colormaps. Parameters ---------- name : `matplotlib.colors.Colormap` or str or None, default: None If a `.Colormap` instance, it will be returned. Otherwise, the name of a colormap known to Matplotlib, which will be resampled by *lut*. The default, None, means :rc:`image.cmap`. lut : int or None, default: None If *name* is not already a Colormap instance and *lut* is not None, the colormap will be resampled to have *lut* entries in the lookup table. Returns ------- Colormap " 493,"def cancel(self): with self._condition: if self._state in [RUNNING, FINISHED]: return False if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: return True self._state = CANCELLED self._condition.notify_all() self._invoke_callbacks() return True ","Cancel the future if possible. Returns True if the future was cancelled, False otherwise. A future cannot be cancelled if it is running or has already completed. ",27,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cancel(self): with self._condition: if self._state in [RUNNING, FINISHED]: return False if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: return True self._state = CANCELLED self._condition.notify_all() self._invoke_callbacks() return True ``` ###Assistant : Cancel the future if possible. Returns True if the future was cancelled, False otherwise. A future cannot be cancelled if it is running or has already completed. " 494,"def get_hub_metadata(url, token=None): if token is None: token = HfFolder.get_token() headers = {""user-agent"": http_user_agent()} headers[""authorization""] = f""Bearer {token}"" r = huggingface_hub.file_download._request_with_retry( method=""HEAD"", url=url, headers=headers, allow_redirects=False ) huggingface_hub.file_download._raise_for_status(r) commit_hash = r.headers.get(HUGGINGFACE_HEADER_X_REPO_COMMIT) etag = r.headers.get(HUGGINGFACE_HEADER_X_LINKED_ETAG) or r.headers.get(""ETag"") if etag is not None: etag = huggingface_hub.file_download._normalize_etag(etag) return etag, commit_hash "," Returns the commit hash and associated etag for a given url. ",11,46,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_hub_metadata(url, token=None): if token is None: token = HfFolder.get_token() headers = {""user-agent"": http_user_agent()} headers[""authorization""] = f""Bearer {token}"" r = huggingface_hub.file_download._request_with_retry( method=""HEAD"", url=url, headers=headers, allow_redirects=False ) huggingface_hub.file_download._raise_for_status(r) commit_hash = r.headers.get(HUGGINGFACE_HEADER_X_REPO_COMMIT) etag = r.headers.get(HUGGINGFACE_HEADER_X_LINKED_ETAG) or r.headers.get(""ETag"") if etag is not None: etag = huggingface_hub.file_download._normalize_etag(etag) return etag, commit_hash ``` ###Assistant : Returns the commit hash and associated etag for a given url. " 495,"def aug_test(self, aug_batch_feats, aug_batch_img_metas, rescale=False): return self.aug_test_bboxes( aug_batch_feats, aug_batch_img_metas, rescale=rescale) ","Test function with test time augmentation. Args: aug_batch_feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. aug_batch_img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[ndarray]: bbox results of each class ",75,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def aug_test(self, aug_batch_feats, aug_batch_img_metas, rescale=False): return self.aug_test_bboxes( aug_batch_feats, aug_batch_img_metas, rescale=rescale) ``` ###Assistant : Test function with test time augmentation. Args: aug_batch_feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. aug_batch_img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[ndarray]: bbox results of each class " 496,"def ensure_schema_for_first_block(self) -> Optional[Union[""pyarrow.Schema"", type]]: get_schema = cached_remote_fn(_get_schema) try: block = next(self.iter_blocks()) except (StopIteration, ValueError): # Dataset is empty (no blocks) or was manually cleared. return None schema = ray.get(get_schema.remote(block)) # Set the schema. self._metadata[0].schema = schema return schema ","Ensure that the schema is set for the first block. Returns None if the block list is empty. ",18,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ensure_schema_for_first_block(self) -> Optional[Union[""pyarrow.Schema"", type]]: get_schema = cached_remote_fn(_get_schema) try: block = next(self.iter_blocks()) except (StopIteration, ValueError): # Dataset is empty (no blocks) or was manually cleared. return None schema = ray.get(get_schema.remote(block)) # Set the schema. self._metadata[0].schema = schema return schema ``` ###Assistant : Ensure that the schema is set for the first block. Returns None if the block list is empty. " 497,"def get_local_am_pm(): am_local = time(1).strftime(""%p"") pm_local = time(13).strftime(""%p"") return am_local, pm_local @pytest.fixture(params=[""string"", ""pathlike"", ""buffer""])",Return the AM and PM strings returned by strftime in current locale.,12,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_local_am_pm(): am_local = time(1).strftime(""%p"") pm_local = time(13).strftime(""%p"") return am_local, pm_local @pytest.fixture(params=[""string"", ""pathlike"", ""buffer""]) ``` ###Assistant : Return the AM and PM strings returned by strftime in current locale. " 498,"def validate_pages_layout(module, page): try: getattr(page, ""layout"") except AttributeError: raise exceptions.NoLayoutException( f ) "," No layout found in {module + "".py""} A variable or a function named ""layout"" is required. ",16,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def validate_pages_layout(module, page): try: getattr(page, ""layout"") except AttributeError: raise exceptions.NoLayoutException( f ) ``` ###Assistant : No layout found in {module + "".py""} A variable or a function named ""layout"" is required. " 499,"def test_upscale_downscale_delay(): upscale_delay_s = 30.0 downscale_delay_s = 600.0 config = AutoscalingConfig( min_replicas=1, max_replicas=2, target_num_ongoing_requests_per_replica=1, upscale_delay_s=30.0, downscale_delay_s=600.0, ) policy = BasicAutoscalingPolicy(config) upscale_wait_periods = int(upscale_delay_s / CONTROL_LOOP_PERIOD_S) downscale_wait_periods = int(downscale_delay_s / CONTROL_LOOP_PERIOD_S) overload_requests = [100] # We should scale up only after enough consecutive scale-up decisions. for i in range(upscale_wait_periods): new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1 ) assert new_num_replicas == 1, i new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1 ) assert new_num_replicas == 2 no_requests = [0, 0] # We should scale down only after enough consecutive scale-down decisions. for i in range(downscale_wait_periods): new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=no_requests, curr_target_num_replicas=2 ) assert new_num_replicas == 2, i new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=no_requests, curr_target_num_replicas=2 ) assert new_num_replicas == 1 # Get some scale-up decisions, but not enough to trigger a scale up. for i in range(int(upscale_wait_periods / 2)): new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1 ) assert new_num_replicas == 1, i # Interrupt with a scale-down decision. policy.get_decision_num_replicas( current_num_ongoing_requests=[0], curr_target_num_replicas=1 ) # The counter should be reset, so it should require `upscale_wait_periods` # more periods before we actually scale up. for i in range(upscale_wait_periods): new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1 ) assert new_num_replicas == 1, i new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1 ) assert new_num_replicas == 2 # Get some scale-down decisions, but not enough to trigger a scale down. for i in range(int(downscale_wait_periods / 2)): new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=no_requests, curr_target_num_replicas=2 ) assert new_num_replicas == 2, i # Interrupt with a scale-up decision. policy.get_decision_num_replicas( current_num_ongoing_requests=[100, 100], curr_target_num_replicas=2 ) # The counter should be reset so it should require `downscale_wait_periods` # more periods before we actually scale down. for i in range(downscale_wait_periods): new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=no_requests, curr_target_num_replicas=2 ) assert new_num_replicas == 2, i new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=no_requests, curr_target_num_replicas=2 ) assert new_num_replicas == 1 ",Unit test for upscale_delay_s and downscale_delay_s.,6,278,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_upscale_downscale_delay(): upscale_delay_s = 30.0 downscale_delay_s = 600.0 config = AutoscalingConfig( min_replicas=1, max_replicas=2, target_num_ongoing_requests_per_replica=1, upscale_delay_s=30.0, downscale_delay_s=600.0, ) policy = BasicAutoscalingPolicy(config) upscale_wait_periods = int(upscale_delay_s / CONTROL_LOOP_PERIOD_S) downscale_wait_periods = int(downscale_delay_s / CONTROL_LOOP_PERIOD_S) overload_requests = [100] # We should scale up only after enough consecutive scale-up decisions. for i in range(upscale_wait_periods): new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1 ) assert new_num_replicas == 1, i new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1 ) assert new_num_replicas == 2 no_requests = [0, 0] # We should scale down only after enough consecutive scale-down decisions. for i in range(downscale_wait_periods): new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=no_requests, curr_target_num_replicas=2 ) assert new_num_replicas == 2, i new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=no_requests, curr_target_num_replicas=2 ) assert new_num_replicas == 1 # Get some scale-up decisions, but not enough to trigger a scale up. for i in range(int(upscale_wait_periods / 2)): new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1 ) assert new_num_replicas == 1, i # Interrupt with a scale-down decision. policy.get_decision_num_replicas( current_num_ongoing_requests=[0], curr_target_num_replicas=1 ) # The counter should be reset, so it should require `upscale_wait_periods` # more periods before we actually scale up. for i in range(upscale_wait_periods): new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1 ) assert new_num_replicas == 1, i new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1 ) assert new_num_replicas == 2 # Get some scale-down decisions, but not enough to trigger a scale down. for i in range(int(downscale_wait_periods / 2)): new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=no_requests, curr_target_num_replicas=2 ) assert new_num_replicas == 2, i # Interrupt with a scale-up decision. policy.get_decision_num_replicas( current_num_ongoing_requests=[100, 100], curr_target_num_replicas=2 ) # The counter should be reset so it should require `downscale_wait_periods` # more periods before we actually scale down. for i in range(downscale_wait_periods): new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=no_requests, curr_target_num_replicas=2 ) assert new_num_replicas == 2, i new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=no_requests, curr_target_num_replicas=2 ) assert new_num_replicas == 1 ``` ###Assistant : Unit test for upscale_delay_s and downscale_delay_s. " 500,"def set_aspect(self, aspect, adjustable=None, anchor=None, share=False): if cbook._str_equal(aspect, 'equal'): aspect = 1 if not cbook._str_equal(aspect, 'auto'): aspect = float(aspect) # raise ValueError if necessary if aspect<0: raise ValueError(""aspect must be positive"") if share: axes = {sibling for name in self._axis_names for sibling in self._shared_axes[name].get_siblings(self)} else: axes = [self] for ax in axes: ax._aspect = aspect if adjustable is None: adjustable = self._adjustable self.set_adjustable(adjustable, share=share) # Handle sharing. if anchor is not None: self.set_anchor(anchor, share=share) self.stale = True "," Set the aspect ratio of the axes scaling, i.e. y/x-scale. Parameters ---------- aspect : {'auto', 'equal'} or float Possible values: - 'auto': fill the position rectangle with data. - 'equal': same as ``aspect=1``, i.e. same scaling for x and y. - *float*: The displayed size of 1 unit in y-data coordinates will be *aspect* times the displayed size of 1 unit in x-data coordinates; e.g. for ``aspect=2`` a square in data coordinates will be rendered with a height of twice its width. adjustable : None or {'box', 'datalim'}, optional If not ``None``, this defines which parameter will be adjusted to meet the required aspect. See `.set_adjustable` for further details. anchor : None or str or (float, float), optional If not ``None``, this defines where the Axes will be drawn if there is extra space due to aspect constraints. The most common way to to specify the anchor are abbreviations of cardinal directions: ===== ===================== value description ===== ===================== 'C' centered 'SW' lower left corner 'S' middle of bottom edge 'SE' lower right corner etc. ===== ===================== See `~.Axes.set_anchor` for further details. share : bool, default: False If ``True``, apply the settings to all shared Axes. See Also -------- matplotlib.axes.Axes.set_adjustable Set how the Axes adjusts to achieve the required aspect ratio. matplotlib.axes.Axes.set_anchor Set the position in case of extra space. ",219,77,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_aspect(self, aspect, adjustable=None, anchor=None, share=False): if cbook._str_equal(aspect, 'equal'): aspect = 1 if not cbook._str_equal(aspect, 'auto'): aspect = float(aspect) # raise ValueError if necessary if aspect<0: raise ValueError(""aspect must be positive"") if share: axes = {sibling for name in self._axis_names for sibling in self._shared_axes[name].get_siblings(self)} else: axes = [self] for ax in axes: ax._aspect = aspect if adjustable is None: adjustable = self._adjustable self.set_adjustable(adjustable, share=share) # Handle sharing. if anchor is not None: self.set_anchor(anchor, share=share) self.stale = True ``` ###Assistant : Set the aspect ratio of the axes scaling, i.e. y/x-scale. Parameters ---------- aspect : {'auto', 'equal'} or float Possible values: - 'auto': fill the position rectangle with data. - 'equal': same as ``aspect=1``, i.e. same scaling for x and y. - *float*: The displayed size of 1 unit in y-data coordinates will be *aspect* times the displayed size of 1 unit in x-data coordinates; e.g. for ``aspect=2`` a square in data coordinates will be rendered with a height of twice its width. adjustable : None or {'box', 'datalim'}, optional If not ``None``, this defines which parameter will be adjusted to meet the required aspect. See `.set_adjustable` for further details. anchor : None or str or (float, float), optional If not ``None``, this defines where the Axes will be drawn if there is extra space due to aspect constraints. The most common way to to specify the anchor are abbreviations of cardinal directions: ===== ===================== value description ===== ===================== 'C' centered 'SW' lower left corner 'S' middle of bottom edge 'SE' lower right corner etc. ===== ===================== See `~.Axes.set_anchor` for further details. share : bool, default: False If ``True``, apply the settings to all shared Axes. See Also -------- matplotlib.axes.Axes.set_adjustable Set how the Axes adjusts to achieve the required aspect ratio. matplotlib.axes.Axes.set_anchor Set the position in case of extra space. " 501,"def test_pk_attributes(self): # pk can be used as a substitute for the primary key. # The primary key can be accessed via the pk property on the model. e = Employee.objects.get(pk=123) self.assertEqual(e.pk, 123) # Or we can use the real attribute name for the primary key: self.assertEqual(e.employee_code, 123) with self.assertRaisesMessage( AttributeError, ""'Employee' object has no attribute 'id'"" ): e.id "," pk and attribute name are available on the model No default id attribute is added ",15,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_pk_attributes(self): # pk can be used as a substitute for the primary key. # The primary key can be accessed via the pk property on the model. e = Employee.objects.get(pk=123) self.assertEqual(e.pk, 123) # Or we can use the real attribute name for the primary key: self.assertEqual(e.employee_code, 123) with self.assertRaisesMessage( AttributeError, ""'Employee' object has no attribute 'id'"" ): e.id ``` ###Assistant : pk and attribute name are available on the model No default id attribute is added " 502,"async def __aenter__(self): if self._closed: # httpx.AsyncClient does not allow reuse so we will not either. raise RuntimeError( ""The client cannot be started again after closing. "" ""Retrieve a new client with `get_client()` instead."" ) if self._started: # httpx.AsyncClient does not allow reentrancy so we will not either. raise RuntimeError(""The client cannot be started more than once."") await self._exit_stack.__aenter__() # Enter a lifespan context if using an ephemeral application. # See https://github.com/encode/httpx/issues/350 if self._ephemeral_app and self.manage_lifespan: self._ephemeral_lifespan = await self._exit_stack.enter_async_context( app_lifespan_context(self._ephemeral_app) ) # Enter the httpx client's context await self._exit_stack.enter_async_context(self._client) self._started = True return self "," Start the client. If the client is already started, this will raise an exception. If the client is already closed, this will raise an exception. Use a new client instance instead. ",31,95,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def __aenter__(self): if self._closed: # httpx.AsyncClient does not allow reuse so we will not either. raise RuntimeError( ""The client cannot be started again after closing. "" ""Retrieve a new client with `get_client()` instead."" ) if self._started: # httpx.AsyncClient does not allow reentrancy so we will not either. raise RuntimeError(""The client cannot be started more than once."") await self._exit_stack.__aenter__() # Enter a lifespan context if using an ephemeral application. # See https://github.com/encode/httpx/issues/350 if self._ephemeral_app and self.manage_lifespan: self._ephemeral_lifespan = await self._exit_stack.enter_async_context( app_lifespan_context(self._ephemeral_app) ) # Enter the httpx client's context await self._exit_stack.enter_async_context(self._client) self._started = True return self ``` ###Assistant : Start the client. If the client is already started, this will raise an exception. If the client is already closed, this will raise an exception. Use a new client instance instead. " 503,"def training_iteration(self) -> ResultDict: # Sample n batches from n workers. new_sample_batches = synchronous_parallel_sample( worker_set=self.workers, concat=False ) for batch in new_sample_batches: # Update counters. self._counters[NUM_ENV_STEPS_SAMPLED] += batch.env_steps() self._counters[NUM_AGENT_STEPS_SAMPLED] += batch.agent_steps() # Store new samples in the replay buffer. self.local_replay_buffer.add(batch) # Sample n batches from replay buffer until the total number of timesteps # reaches `train_batch_size`. train_batch = sample_min_n_steps_from_buffer( replay_buffer=self.local_replay_buffer, min_steps=self.config[""train_batch_size""], count_by_agent_steps=self._by_agent_steps, ) if train_batch is None: return {} # Learn on the training batch. # Use simple optimizer (only for multi-agent or tf-eager; all other # cases should use the multi-GPU optimizer, even if only using 1 GPU) if self.config.get(""simple_optimizer"") is True: train_results = train_one_step(self, train_batch) else: train_results = multi_gpu_train_one_step(self, train_batch) # TODO: Move training steps counter update outside of `train_one_step()` method. # # Update train step counters. # self._counters[NUM_ENV_STEPS_TRAINED] += train_batch.env_steps() # self._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps() # Update target network every `target_network_update_freq` steps. cur_ts = self._counters[NUM_ENV_STEPS_SAMPLED] last_update = self._counters[LAST_TARGET_UPDATE_TS] if cur_ts - last_update >= self.config[""target_network_update_freq""]: to_update = self.workers.local_worker().get_policies_to_train() self.workers.local_worker().foreach_policy_to_train( lambda p, pid: pid in to_update and p.update_target() ) self._counters[NUM_TARGET_UPDATES] += 1 self._counters[LAST_TARGET_UPDATE_TS] = cur_ts # Update weights and global_vars - after learning on the local worker - on all # remote workers. global_vars = { ""timestep"": self._counters[NUM_ENV_STEPS_SAMPLED], } # Update remote workers' weights and global vars after learning on local worker. with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]: self.workers.sync_weights(global_vars=global_vars) # Return all collected metrics for the iteration. return train_results ","QMIX training iteration function. - Sample n MultiAgentBatches from n workers synchronously. - Store new samples in the replay buffer. - Sample one training MultiAgentBatch from the replay buffer. - Learn on the training batch. - Update the target network every `target_network_update_freq` steps. - Return all collected training metrics for the iteration. Returns: The results dict from executing the training iteration. ",61,224,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def training_iteration(self) -> ResultDict: # Sample n batches from n workers. new_sample_batches = synchronous_parallel_sample( worker_set=self.workers, concat=False ) for batch in new_sample_batches: # Update counters. self._counters[NUM_ENV_STEPS_SAMPLED] += batch.env_steps() self._counters[NUM_AGENT_STEPS_SAMPLED] += batch.agent_steps() # Store new samples in the replay buffer. self.local_replay_buffer.add(batch) # Sample n batches from replay buffer until the total number of timesteps # reaches `train_batch_size`. train_batch = sample_min_n_steps_from_buffer( replay_buffer=self.local_replay_buffer, min_steps=self.config[""train_batch_size""], count_by_agent_steps=self._by_agent_steps, ) if train_batch is None: return {} # Learn on the training batch. # Use simple optimizer (only for multi-agent or tf-eager; all other # cases should use the multi-GPU optimizer, even if only using 1 GPU) if self.config.get(""simple_optimizer"") is True: train_results = train_one_step(self, train_batch) else: train_results = multi_gpu_train_one_step(self, train_batch) # TODO: Move training steps counter update outside of `train_one_step()` method. # # Update train step counters. # self._counters[NUM_ENV_STEPS_TRAINED] += train_batch.env_steps() # self._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps() # Update target network every `target_network_update_freq` steps. cur_ts = self._counters[NUM_ENV_STEPS_SAMPLED] last_update = self._counters[LAST_TARGET_UPDATE_TS] if cur_ts - last_update >= self.config[""target_network_update_freq""]: to_update = self.workers.local_worker().get_policies_to_train() self.workers.local_worker().foreach_policy_to_train( lambda p, pid: pid in to_update and p.update_target() ) self._counters[NUM_TARGET_UPDATES] += 1 self._counters[LAST_TARGET_UPDATE_TS] = cur_ts # Update weights and global_vars - after learning on the local worker - on all # remote workers. global_vars = { ""timestep"": self._counters[NUM_ENV_STEPS_SAMPLED], } # Update remote workers' weights and global vars after learning on local worker. with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]: self.workers.sync_weights(global_vars=global_vars) # Return all collected metrics for the iteration. return train_results ``` ###Assistant : QMIX training iteration function. - Sample n MultiAgentBatches from n workers synchronously. - Store new samples in the replay buffer. - Sample one training MultiAgentBatch from the replay buffer. - Learn on the training batch. - Update the target network every `target_network_update_freq` steps. - Return all collected training metrics for the iteration. Returns: The results dict from executing the training iteration. " 504,"def _announce() -> None: current_version = get_package_version() tag_name = f""v{current_version}"" click.echo( f ) if ""rc"" in tag_name: click.echo( ) else: click.echo( ) @cli.command() @click.option(""--gh-token"", envvar=[""GH_TOKEN"", ""GITHUB_TOKEN""], required=True)","Generate markdown to announce the release. Hi everyone. Synapse {current_version} has just been released. [notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) | \ [docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \ [debs](https://packages.matrix.org/debian/) | \ [pypi](https://pypi.org/project/matrix-synapse/{current_version}/) Announce the RC in - #homeowners:matrix.org (Synapse Announcements) - #synapse-dev:matrix.org Announce the release in - #homeowners:matrix.org (Synapse Announcements), bumping the version in the topic - #synapse:matrix.org (Synapse Admins), bumping the version in the topic - #synapse-dev:matrix.org - #synapse-package-maintainers:matrix.org Ask the designated people to do the blog and tweets.",72,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _announce() -> None: current_version = get_package_version() tag_name = f""v{current_version}"" click.echo( f ) if ""rc"" in tag_name: click.echo( ) else: click.echo( ) @cli.command() @click.option(""--gh-token"", envvar=[""GH_TOKEN"", ""GITHUB_TOKEN""], required=True) ``` ###Assistant : Generate markdown to announce the release. Hi everyone. Synapse {current_version} has just been released. [notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) | \ [docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \ [debs](https://packages.matrix.org/debian/) | \ [pypi](https://pypi.org/project/matrix-synapse/{current_version}/) Announce the RC in - #homeowners:matrix.org (Synapse Announcements) - #synapse-dev:matrix.org Announce the release in - #homeowners:matrix.org (Synapse Announcements), bumping the version in the topic - #synapse:matrix.org (Synapse Admins), bumping the version in the topic - #synapse-dev:matrix.org - #synapse-package-maintainers:matrix.org Ask the designated people to do the blog and tweets. " 505,"def update(self, value=None, visible=None): if not self._widget_was_created(): # if widget hasn't been created yet, then don't allow return if value is not None: self._TKOut.output.delete('1.0', tk.END) self._TKOut.output.insert(tk.END, value) if visible is False: self._pack_forget_save_settings(self._TKOut.frame) elif visible is True: self._pack_restore_settings(self._TKOut.frame) if visible is not None: self._visible = visible "," Changes some of the settings for the Output Element. Must call `Window.Read` or `Window.Finalize` prior Changes will not be visible in your window until you call window.read or window.refresh. If you change visibility, your element may MOVE. If you want it to remain stationary, use the ""layout helper"" function ""pin"" to ensure your element is ""pinned"" to that location in your layout so that it returns there when made visible. :param value: string that will replace current contents of the output area :type value: (str) :param visible: control visibility of element :type visible: (bool) ",94,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update(self, value=None, visible=None): if not self._widget_was_created(): # if widget hasn't been created yet, then don't allow return if value is not None: self._TKOut.output.delete('1.0', tk.END) self._TKOut.output.insert(tk.END, value) if visible is False: self._pack_forget_save_settings(self._TKOut.frame) elif visible is True: self._pack_restore_settings(self._TKOut.frame) if visible is not None: self._visible = visible ``` ###Assistant : Changes some of the settings for the Output Element. Must call `Window.Read` or `Window.Finalize` prior Changes will not be visible in your window until you call window.read or window.refresh. If you change visibility, your element may MOVE. If you want it to remain stationary, use the ""layout helper"" function ""pin"" to ensure your element is ""pinned"" to that location in your layout so that it returns there when made visible. :param value: string that will replace current contents of the output area :type value: (str) :param visible: control visibility of element :type visible: (bool) " 506,"def example(self): result = getattr(self, ""_example"", None) if result is None: # No example batch was found, so get one from the `.train` dataset result = next(iter(self.train)) # And cache it for next time self._example = result return result","Get and cache an example batch of `inputs, labels` for plotting.",11,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def example(self): result = getattr(self, ""_example"", None) if result is None: # No example batch was found, so get one from the `.train` dataset result = next(iter(self.train)) # And cache it for next time self._example = result return result ``` ###Assistant : Get and cache an example batch of `inputs, labels` for plotting. " 507,"def unit_attname(cls, unit_str): lower = unit_str.lower() if unit_str in cls.UNITS: return unit_str elif lower in cls.UNITS: return lower elif lower in cls.LALIAS: return cls.LALIAS[lower] else: raise Exception( 'Could not find a unit keyword associated with ""%s""' % unit_str ) "," Retrieve the unit attribute name for the given unit string. For example, if the given unit string is 'metre', return 'm'. Raise an exception if an attribute cannot be found. ",30,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def unit_attname(cls, unit_str): lower = unit_str.lower() if unit_str in cls.UNITS: return unit_str elif lower in cls.UNITS: return lower elif lower in cls.LALIAS: return cls.LALIAS[lower] else: raise Exception( 'Could not find a unit keyword associated with ""%s""' % unit_str ) ``` ###Assistant : Retrieve the unit attribute name for the given unit string. For example, if the given unit string is 'metre', return 'm'. Raise an exception if an attribute cannot be found. " 508,"def test_dont_import_tf_error(): # Do not import tf for testing purposes. os.environ[""RLLIB_TEST_NO_TF_IMPORT""] = ""1"" config = ppo.PPOConfig().environment(""CartPole-v1"") for _ in framework_iterator(config, frameworks=(""tf"", ""tf2"")): with pytest.raises(ImportError, match=""However, no installation was found""): config.build() ","Check error being thrown, if tf not installed but configured.",10,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_dont_import_tf_error(): # Do not import tf for testing purposes. os.environ[""RLLIB_TEST_NO_TF_IMPORT""] = ""1"" config = ppo.PPOConfig().environment(""CartPole-v1"") for _ in framework_iterator(config, frameworks=(""tf"", ""tf2"")): with pytest.raises(ImportError, match=""However, no installation was found""): config.build() ``` ###Assistant : Check error being thrown, if tf not installed but configured. " 509,"def _current(self): if self._hmac: return self._hmac else: h = self._outer.copy() h.update(self._inner.digest()) return h ","Return a hash object for the current state. To be used only internally with digest() and hexdigest(). ",17,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _current(self): if self._hmac: return self._hmac else: h = self._outer.copy() h.update(self._inner.digest()) return h ``` ###Assistant : Return a hash object for the current state. To be used only internally with digest() and hexdigest(). " 510,"def delete_links_from_desktop_icons(report): desktop_icons = frappe.db.get_values(""Desktop Icon"", {""_report"": report}, [""name""]) for desktop_icon in desktop_icons: frappe.delete_doc(""Desktop Icon"", desktop_icon[0])", Check for one or multiple Desktop Icons and delete ,9,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def delete_links_from_desktop_icons(report): desktop_icons = frappe.db.get_values(""Desktop Icon"", {""_report"": report}, [""name""]) for desktop_icon in desktop_icons: frappe.delete_doc(""Desktop Icon"", desktop_icon[0]) ``` ###Assistant : Check for one or multiple Desktop Icons and delete " 511,"def get_bboxes_list(end2end_result, structure_master_result): # end2end end2end_xyxy_list = [] end2end_xywh_list = [] for end2end_item in end2end_result: src_bbox = end2end_item['bbox'] end2end_xyxy_list.append(src_bbox) xywh_bbox = xyxy2xywh(src_bbox) end2end_xywh_list.append(xywh_bbox) end2end_xyxy_bboxes = np.array(end2end_xyxy_list) end2end_xywh_bboxes = np.array(end2end_xywh_list) # structure master src_bboxes = structure_master_result['bbox'] src_bboxes = remove_empty_bboxes(src_bboxes) # structure_master_xywh_bboxes = src_bboxes # xyxy_bboxes = xywh2xyxy(src_bboxes) # structure_master_xyxy_bboxes = xyxy_bboxes structure_master_xyxy_bboxes = src_bboxes xywh_bbox = xyxy2xywh(src_bboxes) structure_master_xywh_bboxes = xywh_bbox return end2end_xyxy_bboxes, end2end_xywh_bboxes, structure_master_xywh_bboxes, structure_master_xyxy_bboxes "," This function is use to convert end2end results and structure master results to List of xyxy bbox format and List of xywh bbox format :param end2end_result: bbox's format is xyxy :param structure_master_result: bbox's format is xywh :return: 4 kind list of bbox () ",43,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_bboxes_list(end2end_result, structure_master_result): # end2end end2end_xyxy_list = [] end2end_xywh_list = [] for end2end_item in end2end_result: src_bbox = end2end_item['bbox'] end2end_xyxy_list.append(src_bbox) xywh_bbox = xyxy2xywh(src_bbox) end2end_xywh_list.append(xywh_bbox) end2end_xyxy_bboxes = np.array(end2end_xyxy_list) end2end_xywh_bboxes = np.array(end2end_xywh_list) # structure master src_bboxes = structure_master_result['bbox'] src_bboxes = remove_empty_bboxes(src_bboxes) # structure_master_xywh_bboxes = src_bboxes # xyxy_bboxes = xywh2xyxy(src_bboxes) # structure_master_xyxy_bboxes = xyxy_bboxes structure_master_xyxy_bboxes = src_bboxes xywh_bbox = xyxy2xywh(src_bboxes) structure_master_xywh_bboxes = xywh_bbox return end2end_xyxy_bboxes, end2end_xywh_bboxes, structure_master_xywh_bboxes, structure_master_xyxy_bboxes ``` ###Assistant : This function is use to convert end2end results and structure master results to List of xyxy bbox format and List of xywh bbox format :param end2end_result: bbox's format is xyxy :param structure_master_result: bbox's format is xywh :return: 4 kind list of bbox () " 512,"def autoname_elements() -> None: for name, var in sys._getframe().f_back.f_locals.items(): if isinstance(var, ParserElement) and not var.customName: var.set_name(name) dbl_quoted_string = Combine( Regex(r'""(?:[^""\n\r\\]|(?:"""")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '""' ).set_name(""string enclosed in double quotes"") sgl_quoted_string = Combine( Regex(r""'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*"") + ""'"" ).set_name(""string enclosed in single quotes"") quoted_string = Combine( Regex(r'""(?:[^""\n\r\\]|(?:"""")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '""' | Regex(r""'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*"") + ""'"" ).set_name(""quotedString using single or double quotes"") unicode_string = Combine(""u"" + quoted_string.copy()).set_name(""unicode string literal"") alphas8bit = srange(r""[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]"") punc8bit = srange(r""[\0xa1-\0xbf\0xd7\0xf7]"") # build list of built-in expressions, for future reference if a global default value # gets updated _builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)] # backward compatibility names tokenMap = token_map conditionAsParseAction = condition_as_parse_action nullDebugAction = null_debug_action sglQuotedString = sgl_quoted_string dblQuotedString = dbl_quoted_string quotedString = quoted_string unicodeString = unicode_string lineStart = line_start lineEnd = line_end stringStart = string_start stringEnd = string_end traceParseAction = trace_parse_action "," Utility to simplify mass-naming of parser elements, for generating railroad diagram with named subdiagrams. ",14,134,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def autoname_elements() -> None: for name, var in sys._getframe().f_back.f_locals.items(): if isinstance(var, ParserElement) and not var.customName: var.set_name(name) dbl_quoted_string = Combine( Regex(r'""(?:[^""\n\r\\]|(?:"""")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '""' ).set_name(""string enclosed in double quotes"") sgl_quoted_string = Combine( Regex(r""'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*"") + ""'"" ).set_name(""string enclosed in single quotes"") quoted_string = Combine( Regex(r'""(?:[^""\n\r\\]|(?:"""")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '""' | Regex(r""'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*"") + ""'"" ).set_name(""quotedString using single or double quotes"") unicode_string = Combine(""u"" + quoted_string.copy()).set_name(""unicode string literal"") alphas8bit = srange(r""[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]"") punc8bit = srange(r""[\0xa1-\0xbf\0xd7\0xf7]"") # build list of built-in expressions, for future reference if a global default value # gets updated _builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)] # backward compatibility names tokenMap = token_map conditionAsParseAction = condition_as_parse_action nullDebugAction = null_debug_action sglQuotedString = sgl_quoted_string dblQuotedString = dbl_quoted_string quotedString = quoted_string unicodeString = unicode_string lineStart = line_start lineEnd = line_end stringStart = string_start stringEnd = string_end traceParseAction = trace_parse_action ``` ###Assistant : Utility to simplify mass-naming of parser elements, for generating railroad diagram with named subdiagrams. " 513,"def test_valid_zero_ops_doesnt_require_backend_dispatch_key(self) -> None: yaml_str = # External codegen on a yaml file with no operators is effectively a no-op, # so there's no reason to parse the backend self.assert_success_from_gen_backend_stubs(yaml_str) ","\ backend: BAD_XLA cpp_namespace: torch_xla supported:",6,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_valid_zero_ops_doesnt_require_backend_dispatch_key(self) -> None: yaml_str = # External codegen on a yaml file with no operators is effectively a no-op, # so there's no reason to parse the backend self.assert_success_from_gen_backend_stubs(yaml_str) ``` ###Assistant : \ backend: BAD_XLA cpp_namespace: torch_xla supported: " 514,"async def test_pseudo_remote_peas_topologies(gateway, head, worker): worker_port = random_port() head_port = random_port() port_expose = random_port() graph_description = '{""start-gateway"": [""pod0""], ""pod0"": [""end-gateway""]}' if head == 'remote': pods_addresses = f'{{""pod0"": [""{HOST}:{head_port}""]}}' else: pods_addresses = f'{{""pod0"": [""0.0.0.0:{head_port}""]}}' # create a single head pea head_pea = _create_head_pea(head, head_port) # create a single worker pea worker_pea = _create_worker_pea(worker, worker_port) # create a single gateway pea gateway_pea = _create_gateway_pea( gateway, graph_description, pods_addresses, port_expose ) with gateway_pea, worker_pea, head_pea: await asyncio.sleep(1.0) # this would be done by the Pod, its adding the worker to the head activate_msg = ControlRequest(command='ACTIVATE') worker_host, worker_port = worker_pea.runtime_ctrl_address.split(':') if head == 'remote': worker_host = __docker_host__ activate_msg.add_related_entity('worker', worker_host, int(worker_port)) assert GrpcConnectionPool.send_request_sync( activate_msg, head_pea.runtime_ctrl_address ) # send requests to the gateway c = Client(host='127.0.0.1', port=port_expose, asyncio=True) responses = c.post( '/', inputs=async_inputs, request_size=1, return_results=True ) response_list = []"," g(l)-h(l)-w(l) - works g(l)-h(l)-w(r) - works - head connects to worker via localhost g(l)-h(r)-w(r) - works - head (inside docker) connects to worker via dockerhost g(l)-h(r)-w(l) - doesn't work remote head need remote worker g(r)-... - doesn't work, as distributed parser not enabled for gateway After any 1 failure, segfault ",50,132,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_pseudo_remote_peas_topologies(gateway, head, worker): worker_port = random_port() head_port = random_port() port_expose = random_port() graph_description = '{""start-gateway"": [""pod0""], ""pod0"": [""end-gateway""]}' if head == 'remote': pods_addresses = f'{{""pod0"": [""{HOST}:{head_port}""]}}' else: pods_addresses = f'{{""pod0"": [""0.0.0.0:{head_port}""]}}' # create a single head pea head_pea = _create_head_pea(head, head_port) # create a single worker pea worker_pea = _create_worker_pea(worker, worker_port) # create a single gateway pea gateway_pea = _create_gateway_pea( gateway, graph_description, pods_addresses, port_expose ) with gateway_pea, worker_pea, head_pea: await asyncio.sleep(1.0) # this would be done by the Pod, its adding the worker to the head activate_msg = ControlRequest(command='ACTIVATE') worker_host, worker_port = worker_pea.runtime_ctrl_address.split(':') if head == 'remote': worker_host = __docker_host__ activate_msg.add_related_entity('worker', worker_host, int(worker_port)) assert GrpcConnectionPool.send_request_sync( activate_msg, head_pea.runtime_ctrl_address ) # send requests to the gateway c = Client(host='127.0.0.1', port=port_expose, asyncio=True) responses = c.post( '/', inputs=async_inputs, request_size=1, return_results=True ) response_list = [] ``` ###Assistant : g(l)-h(l)-w(l) - works g(l)-h(l)-w(r) - works - head connects to worker via localhost g(l)-h(r)-w(r) - works - head (inside docker) connects to worker via dockerhost g(l)-h(r)-w(l) - doesn't work remote head need remote worker g(r)-... - doesn't work, as distributed parser not enabled for gateway After any 1 failure, segfault " 515,"async def async_wait_start_success(self): import asyncio _timeout = self.args.timeout_ready if _timeout <= 0: _timeout = None else: _timeout /= 1e3 timeout_ns = 1e9 * _timeout if _timeout else None now = time.time_ns() while timeout_ns is None or time.time_ns() - now < timeout_ns: if self.ready_or_shutdown.event.is_set(): self._check_failed_to_start() self.logger.debug(__ready_msg__) return else: await asyncio.sleep(0.1) self._fail_start_timeout(_timeout) "," Wait for the `Pea` to start successfully in a non-blocking manner ",11,50,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_wait_start_success(self): import asyncio _timeout = self.args.timeout_ready if _timeout <= 0: _timeout = None else: _timeout /= 1e3 timeout_ns = 1e9 * _timeout if _timeout else None now = time.time_ns() while timeout_ns is None or time.time_ns() - now < timeout_ns: if self.ready_or_shutdown.event.is_set(): self._check_failed_to_start() self.logger.debug(__ready_msg__) return else: await asyncio.sleep(0.1) self._fail_start_timeout(_timeout) ``` ###Assistant : Wait for the `Pea` to start successfully in a non-blocking manner " 516,"def set_fontsize(self, s=None): if s is None: s = mpl.rcParams[""legend.fontsize""] self.prop = FontProperties(size=s) self.stale = True "," Set the fontsize in points. If *s* is not given, reset to :rc:`legend.fontsize`. ",13,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_fontsize(self, s=None): if s is None: s = mpl.rcParams[""legend.fontsize""] self.prop = FontProperties(size=s) self.stale = True ``` ###Assistant : Set the fontsize in points. If *s* is not given, reset to :rc:`legend.fontsize`. " 517,"def get_next(self, timeout=None): if not self.has_next(): raise StopIteration(""No more results to get"") if self._next_return_index >= self._next_task_index: raise ValueError( ""It is not allowed to call get_next() after "" ""get_next_unordered()."" ) future = self._index_to_future[self._next_return_index] if timeout is not None: res, _ = ray.wait([future], timeout=timeout) if not res: raise TimeoutError(""Timed out waiting for result"") del self._index_to_future[self._next_return_index] self._next_return_index += 1 future_key = tuple(future) if isinstance(future, list) else future i, a = self._future_to_actor.pop(future_key) self._return_actor(a) return ray.get(future) ","Returns the next pending result in order. This returns the next result produced by submit(), blocking for up to the specified timeout until it is available. Returns: The next result. Raises: TimeoutError if the timeout is reached. Examples: >>> pool = ActorPool(...) >>> pool.submit(lambda a, v: a.double.remote(v), 1) >>> print(pool.get_next()) 2 ",51,71,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_next(self, timeout=None): if not self.has_next(): raise StopIteration(""No more results to get"") if self._next_return_index >= self._next_task_index: raise ValueError( ""It is not allowed to call get_next() after "" ""get_next_unordered()."" ) future = self._index_to_future[self._next_return_index] if timeout is not None: res, _ = ray.wait([future], timeout=timeout) if not res: raise TimeoutError(""Timed out waiting for result"") del self._index_to_future[self._next_return_index] self._next_return_index += 1 future_key = tuple(future) if isinstance(future, list) else future i, a = self._future_to_actor.pop(future_key) self._return_actor(a) return ray.get(future) ``` ###Assistant : Returns the next pending result in order. This returns the next result produced by submit(), blocking for up to the specified timeout until it is available. Returns: The next result. Raises: TimeoutError if the timeout is reached. Examples: >>> pool = ActorPool(...) >>> pool.submit(lambda a, v: a.double.remote(v), 1) >>> print(pool.get_next()) 2 " 518,"def expand_egg_links(self) -> None: prefixes = [ Path(prefix) for prefix in self.base_paths[""libdirs""].split(os.pathsep) if vistir.path.is_in_path(prefix, self.prefix.as_posix()) ] for loc in prefixes: if not loc.exists(): continue for pth in loc.iterdir(): if not pth.suffix == "".egg-link"": continue contents = [ vistir.path.normalize_path(line.strip()) for line in pth.read_text().splitlines() ] pth.write_text(""\n"".join(contents)) "," Expand paths specified in egg-link files to prevent pip errors during reinstall ",12,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def expand_egg_links(self) -> None: prefixes = [ Path(prefix) for prefix in self.base_paths[""libdirs""].split(os.pathsep) if vistir.path.is_in_path(prefix, self.prefix.as_posix()) ] for loc in prefixes: if not loc.exists(): continue for pth in loc.iterdir(): if not pth.suffix == "".egg-link"": continue contents = [ vistir.path.normalize_path(line.strip()) for line in pth.read_text().splitlines() ] pth.write_text(""\n"".join(contents)) ``` ###Assistant : Expand paths specified in egg-link files to prevent pip errors during reinstall " 519,"def test_page_with_og(self) -> None: html = b parser = OpenGraphParser(html, ""text/html; charset=UTF-8"") result = parser.extract_data() self.assertEqual(result.title, ""The Rock"") self.assertEqual(result.description, ""The Rock film"") "," ",27,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_page_with_og(self) -> None: html = b parser = OpenGraphParser(html, ""text/html; charset=UTF-8"") result = parser.extract_data() self.assertEqual(result.title, ""The Rock"") self.assertEqual(result.description, ""The Rock film"") ``` ###Assistant : " 520,"def is_rational_function(self, *syms): if self in _illegal: return False if syms: syms = set(map(sympify, syms)) else: syms = self.free_symbols if not syms: return True return self._eval_is_rational_function(syms) "," Test whether function is a ratio of two polynomials in the given symbols, syms. When syms is not given, all free symbols will be used. The rational function does not have to be in expanded or in any kind of canonical form. This function returns False for expressions that are ""rational functions"" with symbolic exponents. Thus, you should be able to call .as_numer_denom() and apply polynomial algorithms to the result for expressions for which this returns True. This is not part of the assumptions system. You cannot do Symbol('z', rational_function=True). Examples ======== >>> from sympy import Symbol, sin >>> from sympy.abc import x, y >>> (x/y).is_rational_function() True >>> (x**2).is_rational_function() True >>> (x/sin(y)).is_rational_function(y) False >>> n = Symbol('n', integer=True) >>> (x**n + 1).is_rational_function(x) False This function does not attempt any nontrivial simplifications that may result in an expression that does not appear to be a rational function to become one. >>> from sympy import sqrt, factor >>> y = Symbol('y', positive=True) >>> a = sqrt(y**2 + 2*y + 1)/y >>> a.is_rational_function(y) False >>> factor(a) (y + 1)/y >>> factor(a).is_rational_function(y) True See also is_algebraic_expr(). ",182,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_rational_function(self, *syms): if self in _illegal: return False if syms: syms = set(map(sympify, syms)) else: syms = self.free_symbols if not syms: return True return self._eval_is_rational_function(syms) ``` ###Assistant : Test whether function is a ratio of two polynomials in the given symbols, syms. When syms is not given, all free symbols will be used. The rational function does not have to be in expanded or in any kind of canonical form. This function returns False for expressions that are ""rational functions"" with symbolic exponents. Thus, you should be able to call .as_numer_denom() and apply polynomial algorithms to the result for expressions for which this returns True. This is not part of the assumptions system. You cannot do Symbol('z', rational_function=True). Examples ======== >>> from sympy import Symbol, sin >>> from sympy.abc import x, y >>> (x/y).is_rational_function() True >>> (x**2).is_rational_function() True >>> (x/sin(y)).is_rational_function(y) False >>> n = Symbol('n', integer=True) >>> (x**n + 1).is_rational_function(x) False This function does not attempt any nontrivial simplifications that may result in an expression that does not appear to be a rational function to become one. >>> from sympy import sqrt, factor >>> y = Symbol('y', positive=True) >>> a = sqrt(y**2 + 2*y + 1)/y >>> a.is_rational_function(y) False >>> factor(a) (y + 1)/y >>> factor(a).is_rational_function(y) True See also is_algebraic_expr(). " 521,"def reduce_alg_num(self, a): elt = self.ZK.parent.element_from_alg_num(a) red = self.reduce_element(elt) return a.field_element(list(reversed(red.QQ_col.flat()))) "," Reduce an :py:class:`~.AlgebraicNumber` to a ""small representative"" modulo this prime ideal. Parameters ========== elt : :py:class:`~.AlgebraicNumber` The element to be reduced. Returns ======= :py:class:`~.AlgebraicNumber` The reduced element. See Also ======== reduce_element reduce_ANP .Submodule.reduce_element ",33,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def reduce_alg_num(self, a): elt = self.ZK.parent.element_from_alg_num(a) red = self.reduce_element(elt) return a.field_element(list(reversed(red.QQ_col.flat()))) ``` ###Assistant : Reduce an :py:class:`~.AlgebraicNumber` to a ""small representative"" modulo this prime ideal. Parameters ========== elt : :py:class:`~.AlgebraicNumber` The element to be reduced. Returns ======= :py:class:`~.AlgebraicNumber` The reduced element. See Also ======== reduce_element reduce_ANP .Submodule.reduce_element " 522,"def _auto_joiner(self, short_results, input_mapping, is_dict=False): concat_results = [] elem_type = {} if is_dict else [] for k, vs in input_mapping.items(): single_results = elem_type for v in vs: if len(single_results) == 0: single_results = short_results[v] elif isinstance(elem_type, list): single_results.extend(short_results[v]) elif isinstance(elem_type, dict): for sk in single_results.keys(): if isinstance(single_results[sk], str): single_results[sk] += short_results[v][sk] else: single_results[sk].extend(short_results[v][sk]) else: raise ValueError( ""Invalid element type, the type of results "" ""for each element should be list of dict, "" ""but {} received."".format(type(single_results))) concat_results.append(single_results) return concat_results "," Join the short results automatically and generate the final results to match with the user inputs. Args: short_results (List[dict] / List[List[str]] / List[str]): input raw texts. input_mapping (dict): cutting length. is_dict (bool): whether the element type is dict, default to False. return: short_input_texts (List[str]): the short input texts for model inference. ",51,79,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _auto_joiner(self, short_results, input_mapping, is_dict=False): concat_results = [] elem_type = {} if is_dict else [] for k, vs in input_mapping.items(): single_results = elem_type for v in vs: if len(single_results) == 0: single_results = short_results[v] elif isinstance(elem_type, list): single_results.extend(short_results[v]) elif isinstance(elem_type, dict): for sk in single_results.keys(): if isinstance(single_results[sk], str): single_results[sk] += short_results[v][sk] else: single_results[sk].extend(short_results[v][sk]) else: raise ValueError( ""Invalid element type, the type of results "" ""for each element should be list of dict, "" ""but {} received."".format(type(single_results))) concat_results.append(single_results) return concat_results ``` ###Assistant : Join the short results automatically and generate the final results to match with the user inputs. Args: short_results (List[dict] / List[List[str]] / List[str]): input raw texts. input_mapping (dict): cutting length. is_dict (bool): whether the element type is dict, default to False. return: short_input_texts (List[str]): the short input texts for model inference. " 523,"def get_variable_names_from_ckpt(path_ckpt, use_ema=True): v_all = tf.train.list_variables(path_ckpt) # keep name only v_name_all = [x[0] for x in v_all] if use_ema: v_name_all = [x for x in v_name_all if ""ExponentialMovingAverage"" in x] else: v_name_all = [ x for x in v_name_all if ""ExponentialMovingAverage"" not in x ] # remove util variables used for RMSprop v_name_all = [x for x in v_name_all if ""RMS"" not in x] return v_name_all ","Get list of tensor names from checkpoint. Args: path_ckpt: str, path to the ckpt files use_ema: Bool, whether to use ExponentialMovingAverage result or not. Returns: List of variable names from checkpoint. ",31,66,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_variable_names_from_ckpt(path_ckpt, use_ema=True): v_all = tf.train.list_variables(path_ckpt) # keep name only v_name_all = [x[0] for x in v_all] if use_ema: v_name_all = [x for x in v_name_all if ""ExponentialMovingAverage"" in x] else: v_name_all = [ x for x in v_name_all if ""ExponentialMovingAverage"" not in x ] # remove util variables used for RMSprop v_name_all = [x for x in v_name_all if ""RMS"" not in x] return v_name_all ``` ###Assistant : Get list of tensor names from checkpoint. Args: path_ckpt: str, path to the ckpt files use_ema: Bool, whether to use ExponentialMovingAverage result or not. Returns: List of variable names from checkpoint. " 524,"def ode_order(expr, func): a = Wild('a', exclude=[func]) if expr.match(a): return 0 if isinstance(expr, Derivative): if expr.args[0] == func: return len(expr.variables) else: return max(ode_order(arg, func) for arg in expr.args[0].args) + len(expr.variables) else: return max(ode_order(arg, func) for arg in expr.args) "," Returns the order of a given differential equation with respect to func. This function is implemented recursively. Examples ======== >>> from sympy import Function >>> from sympy.solvers.deutils import ode_order >>> from sympy.abc import x >>> f, g = map(Function, ['f', 'g']) >>> ode_order(f(x).diff(x, 2) + f(x).diff(x)**2 + ... f(x).diff(x), f(x)) 2 >>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), f(x)) 2 >>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), g(x)) 3 ",67,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ode_order(expr, func): a = Wild('a', exclude=[func]) if expr.match(a): return 0 if isinstance(expr, Derivative): if expr.args[0] == func: return len(expr.variables) else: return max(ode_order(arg, func) for arg in expr.args[0].args) + len(expr.variables) else: return max(ode_order(arg, func) for arg in expr.args) ``` ###Assistant : Returns the order of a given differential equation with respect to func. This function is implemented recursively. Examples ======== >>> from sympy import Function >>> from sympy.solvers.deutils import ode_order >>> from sympy.abc import x >>> f, g = map(Function, ['f', 'g']) >>> ode_order(f(x).diff(x, 2) + f(x).diff(x)**2 + ... f(x).diff(x), f(x)) 2 >>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), f(x)) 2 >>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), g(x)) 3 " 525,"async def async_send_message(self, message, **kwargs): nextcord.VoiceClient.warn_nacl = False discord_bot = nextcord.Client() images = None embedding = None if ATTR_TARGET not in kwargs: _LOGGER.error(""No target specified"") return None data = kwargs.get(ATTR_DATA) or {} embeds: list[nextcord.Embed] = [] if ATTR_EMBED in data: embedding = data[ATTR_EMBED] fields = embedding.get(ATTR_EMBED_FIELDS) or [] if embedding: embed = nextcord.Embed(**embedding) for field in fields: embed.add_field(**field) if ATTR_EMBED_FOOTER in embedding: embed.set_footer(**embedding[ATTR_EMBED_FOOTER]) if ATTR_EMBED_AUTHOR in embedding: embed.set_author(**embedding[ATTR_EMBED_AUTHOR]) if ATTR_EMBED_THUMBNAIL in embedding: embed.set_thumbnail(**embedding[ATTR_EMBED_THUMBNAIL]) embeds.append(embed) if ATTR_IMAGES in data: images = [] for image in data.get(ATTR_IMAGES, []): image_exists = await self.hass.async_add_executor_job( self.file_exists, image ) if image_exists: images.append(image) else: _LOGGER.warning(""Image not found: %s"", image) await discord_bot.login(self.token) try: for channelid in kwargs[ATTR_TARGET]: channelid = int(channelid) try: channel = await discord_bot.fetch_channel(channelid) except nextcord.NotFound: try: channel = await discord_bot.fetch_user(channelid) except nextcord.NotFound: _LOGGER.warning(""Channel not found for ID: %s"", channelid) continue # Must create new instances of File for each channel. files = [nextcord.File(image) for image in images] if images else [] await channel.send(message, files=files, embeds=embeds) except (nextcord.HTTPException, nextcord.NotFound) as error: _LOGGER.warning(""Communication error: %s"", error) await discord_bot.close() ","Login to Discord, send message to channel(s) and log out.",10,170,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_send_message(self, message, **kwargs): nextcord.VoiceClient.warn_nacl = False discord_bot = nextcord.Client() images = None embedding = None if ATTR_TARGET not in kwargs: _LOGGER.error(""No target specified"") return None data = kwargs.get(ATTR_DATA) or {} embeds: list[nextcord.Embed] = [] if ATTR_EMBED in data: embedding = data[ATTR_EMBED] fields = embedding.get(ATTR_EMBED_FIELDS) or [] if embedding: embed = nextcord.Embed(**embedding) for field in fields: embed.add_field(**field) if ATTR_EMBED_FOOTER in embedding: embed.set_footer(**embedding[ATTR_EMBED_FOOTER]) if ATTR_EMBED_AUTHOR in embedding: embed.set_author(**embedding[ATTR_EMBED_AUTHOR]) if ATTR_EMBED_THUMBNAIL in embedding: embed.set_thumbnail(**embedding[ATTR_EMBED_THUMBNAIL]) embeds.append(embed) if ATTR_IMAGES in data: images = [] for image in data.get(ATTR_IMAGES, []): image_exists = await self.hass.async_add_executor_job( self.file_exists, image ) if image_exists: images.append(image) else: _LOGGER.warning(""Image not found: %s"", image) await discord_bot.login(self.token) try: for channelid in kwargs[ATTR_TARGET]: channelid = int(channelid) try: channel = await discord_bot.fetch_channel(channelid) except nextcord.NotFound: try: channel = await discord_bot.fetch_user(channelid) except nextcord.NotFound: _LOGGER.warning(""Channel not found for ID: %s"", channelid) continue # Must create new instances of File for each channel. files = [nextcord.File(image) for image in images] if images else [] await channel.send(message, files=files, embeds=embeds) except (nextcord.HTTPException, nextcord.NotFound) as error: _LOGGER.warning(""Communication error: %s"", error) await discord_bot.close() ``` ###Assistant : Login to Discord, send message to channel(s) and log out. " 526,"def rand_series_with_duplicate_datetimeindex() -> Series: dates = [ datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 3), datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 4), datetime(2000, 1, 4), datetime(2000, 1, 5), ] return Series(np.random.randn(len(dates)), index=dates) # ---------------------------------------------------------------- # Scalars # ---------------------------------------------------------------- @pytest.fixture( params=[ ( Interval(left=0, right=5, inclusive=""right""), IntervalDtype(""int64"", inclusive=""right""), ), ( Interval(left=0.1, right=0.5, inclusive=""right""), IntervalDtype(""float64"", inclusive=""right""), ), (Period(""2012-01"", freq=""M""), ""period[M]""), (Period(""2012-02-01"", freq=""D""), ""period[D]""), ( Timestamp(""2011-01-01"", tz=""US/Eastern""), DatetimeTZDtype(tz=""US/Eastern""), ), (Timedelta(seconds=500), ""timedelta64[ns]""), ] )"," Fixture for Series with a DatetimeIndex that has duplicates. ",9,78,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rand_series_with_duplicate_datetimeindex() -> Series: dates = [ datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 3), datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 4), datetime(2000, 1, 4), datetime(2000, 1, 5), ] return Series(np.random.randn(len(dates)), index=dates) # ---------------------------------------------------------------- # Scalars # ---------------------------------------------------------------- @pytest.fixture( params=[ ( Interval(left=0, right=5, inclusive=""right""), IntervalDtype(""int64"", inclusive=""right""), ), ( Interval(left=0.1, right=0.5, inclusive=""right""), IntervalDtype(""float64"", inclusive=""right""), ), (Period(""2012-01"", freq=""M""), ""period[M]""), (Period(""2012-02-01"", freq=""D""), ""period[D]""), ( Timestamp(""2011-01-01"", tz=""US/Eastern""), DatetimeTZDtype(tz=""US/Eastern""), ), (Timedelta(seconds=500), ""timedelta64[ns]""), ] ) ``` ###Assistant : Fixture for Series with a DatetimeIndex that has duplicates. " 527,"def get_major_minor_version(): # type: () -> str return ""{}.{}"".format(*sys.version_info) "," Return the major-minor version of the current Python as a string, e.g. ""3.7"" or ""3.10"". ",15,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_major_minor_version(): # type: () -> str return ""{}.{}"".format(*sys.version_info) ``` ###Assistant : Return the major-minor version of the current Python as a string, e.g. ""3.7"" or ""3.10"". " 528,"def test_ssd_head_loss(self): s = 300 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, }] cfg = Config( dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0., ignore_iof_thr=-1, gt_max_assign_all=False), sampler=dict(type='PseudoSampler'), smoothl1_beta=1., allowed_border=-1, pos_weight=-1, neg_pos_ratio=3, debug=False)) ssd_head = SSDHead( num_classes=4, in_channels=(1, 1, 1, 1, 1, 1), stacked_convs=1, feat_channels=1, use_depthwise=True, anchor_generator=dict( type='SSDAnchorGenerator', scale_major=False, input_size=s, basesize_ratio_range=(0.15, 0.9), strides=[8, 16, 32, 64, 100, 300], ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), train_cfg=cfg) # SSD head expects a multiple levels of features per image feats = ( torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0])) for stride in ssd_head.prior_generator.strides) cls_scores, bbox_preds = ssd_head.forward(feats) # Test that empty ground truth encourages the network to # predict background gt_instances = InstanceData() gt_instances.bboxes = torch.empty((0, 4)) gt_instances.labels = torch.LongTensor([]) empty_gt_losses = ssd_head.loss(cls_scores, bbox_preds, [gt_instances], img_metas) # When there is no truth, cls_loss and box_loss should all be zero. empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) self.assertEqual( empty_cls_loss.item(), 0, 'there should be no cls loss when there are no true boxes') self.assertEqual( empty_box_loss.item(), 0, 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss # should be nonzero for random inputs gt_instances = InstanceData() gt_instances.bboxes = torch.Tensor( [[23.6667, 23.8757, 238.6326, 151.8874]]) gt_instances.labels = torch.LongTensor([2]) one_gt_losses = ssd_head.loss(cls_scores, bbox_preds, [gt_instances], img_metas) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_loss = sum(one_gt_losses['loss_bbox']) self.assertGreater(onegt_cls_loss.item(), 0, 'cls loss should be non-zero') self.assertGreater(onegt_box_loss.item(), 0, 'box loss should be non-zero') ",Tests ssd head loss when truth is empty and non-empty.,10,232,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_ssd_head_loss(self): s = 300 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, }] cfg = Config( dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0., ignore_iof_thr=-1, gt_max_assign_all=False), sampler=dict(type='PseudoSampler'), smoothl1_beta=1., allowed_border=-1, pos_weight=-1, neg_pos_ratio=3, debug=False)) ssd_head = SSDHead( num_classes=4, in_channels=(1, 1, 1, 1, 1, 1), stacked_convs=1, feat_channels=1, use_depthwise=True, anchor_generator=dict( type='SSDAnchorGenerator', scale_major=False, input_size=s, basesize_ratio_range=(0.15, 0.9), strides=[8, 16, 32, 64, 100, 300], ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), train_cfg=cfg) # SSD head expects a multiple levels of features per image feats = ( torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0])) for stride in ssd_head.prior_generator.strides) cls_scores, bbox_preds = ssd_head.forward(feats) # Test that empty ground truth encourages the network to # predict background gt_instances = InstanceData() gt_instances.bboxes = torch.empty((0, 4)) gt_instances.labels = torch.LongTensor([]) empty_gt_losses = ssd_head.loss(cls_scores, bbox_preds, [gt_instances], img_metas) # When there is no truth, cls_loss and box_loss should all be zero. empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) self.assertEqual( empty_cls_loss.item(), 0, 'there should be no cls loss when there are no true boxes') self.assertEqual( empty_box_loss.item(), 0, 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss # should be nonzero for random inputs gt_instances = InstanceData() gt_instances.bboxes = torch.Tensor( [[23.6667, 23.8757, 238.6326, 151.8874]]) gt_instances.labels = torch.LongTensor([2]) one_gt_losses = ssd_head.loss(cls_scores, bbox_preds, [gt_instances], img_metas) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_loss = sum(one_gt_losses['loss_bbox']) self.assertGreater(onegt_cls_loss.item(), 0, 'cls loss should be non-zero') self.assertGreater(onegt_box_loss.item(), 0, 'box loss should be non-zero') ``` ###Assistant : Tests ssd head loss when truth is empty and non-empty. " 529,"def _legacy_check(self) -> None: if self._min_size > 0 or self._arguments.extract_every_n != 1: logger.warning(""This alignments file was generated with the legacy extraction method."") logger.warning(""You should run this extraction job, but with 'min_size' set to 0 and "" ""'extract-every-n' set to 1 to update the alignments file."") logger.warning(""You can then re-run this extraction job with your chosen options."") sys.exit(0) maskers = [""components"", ""extended""] nn_masks = [mask for mask in list(self._alignments.mask_summary) if mask not in maskers] logtype = logger.warning if nn_masks else logger.info logtype(""This alignments file was created with the legacy extraction method and will be "" ""updated."") logtype(""Faces will be extracted using the new method and landmarks based masks will be "" ""regenerated."") if nn_masks: logtype(""However, the NN based masks '%s' will be cropped to the legacy extraction "" ""method, so you may want to run the mask tool to regenerate these "" ""masks."", ""', '"".join(nn_masks)) self._mask_pipeline = Extractor(None, None, maskers, multiprocess=True) self._mask_pipeline.launch() # Update alignments versioning self._alignments._version = _VERSION # pylint:disable=protected-access "," Check whether the alignments file was created with the legacy extraction method. If so, force user to re-extract all faces if any options have been specified, otherwise raise the appropriate warnings and set the legacy options. ",36,160,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _legacy_check(self) -> None: if self._min_size > 0 or self._arguments.extract_every_n != 1: logger.warning(""This alignments file was generated with the legacy extraction method."") logger.warning(""You should run this extraction job, but with 'min_size' set to 0 and "" ""'extract-every-n' set to 1 to update the alignments file."") logger.warning(""You can then re-run this extraction job with your chosen options."") sys.exit(0) maskers = [""components"", ""extended""] nn_masks = [mask for mask in list(self._alignments.mask_summary) if mask not in maskers] logtype = logger.warning if nn_masks else logger.info logtype(""This alignments file was created with the legacy extraction method and will be "" ""updated."") logtype(""Faces will be extracted using the new method and landmarks based masks will be "" ""regenerated."") if nn_masks: logtype(""However, the NN based masks '%s' will be cropped to the legacy extraction "" ""method, so you may want to run the mask tool to regenerate these "" ""masks."", ""', '"".join(nn_masks)) self._mask_pipeline = Extractor(None, None, maskers, multiprocess=True) self._mask_pipeline.launch() # Update alignments versioning self._alignments._version = _VERSION # pylint:disable=protected-access ``` ###Assistant : Check whether the alignments file was created with the legacy extraction method. If so, force user to re-extract all faces if any options have been specified, otherwise raise the appropriate warnings and set the legacy options. " 530,"def FisherZ(name, d1, d2): r return rv(name, FisherZDistribution, (d1, d2)) #------------------------------------------------------------------------------- # Frechet distribution --------------------------------------------------------- "," Create a Continuous Random Variable with an Fisher's Z distribution. Explanation =========== The density of the Fisher's Z distribution is given by .. math:: f(x) := \frac{2d_1^{d_1/2} d_2^{d_2/2}} {\mathrm{B}(d_1/2, d_2/2)} \frac{e^{d_1z}}{\left(d_1e^{2z}+d_2\right)^{\left(d_1+d_2\right)/2}} .. TODO - What is the difference between these degrees of freedom? Parameters ========== d1 : `d_1 > 0` Degree of freedom. d2 : `d_2 > 0` Degree of freedom. Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import FisherZ, density >>> from sympy import Symbol, pprint >>> d1 = Symbol(""d1"", positive=True) >>> d2 = Symbol(""d2"", positive=True) >>> z = Symbol(""z"") >>> X = FisherZ(""x"", d1, d2) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) d1 d2 d1 d2 - -- - -- -- -- 2 2 2 2 / 2*z \ d1*z 2*d1 *d2 *\d1*e + d2/ *e ----------------------------------------- /d1 d2\ B|--, --| \2 2 / References ========== .. [1] https://en.wikipedia.org/wiki/Fisher%27s_z-distribution .. [2] http://mathworld.wolfram.com/Fishersz-Distribution.html ",145,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def FisherZ(name, d1, d2): r return rv(name, FisherZDistribution, (d1, d2)) #------------------------------------------------------------------------------- # Frechet distribution --------------------------------------------------------- ``` ###Assistant : Create a Continuous Random Variable with an Fisher's Z distribution. Explanation =========== The density of the Fisher's Z distribution is given by .. math:: f(x) := \frac{2d_1^{d_1/2} d_2^{d_2/2}} {\mathrm{B}(d_1/2, d_2/2)} \frac{e^{d_1z}}{\left(d_1e^{2z}+d_2\right)^{\left(d_1+d_2\right)/2}} .. TODO - What is the difference between these degrees of freedom? Parameters ========== d1 : `d_1 > 0` Degree of freedom. d2 : `d_2 > 0` Degree of freedom. Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import FisherZ, density >>> from sympy import Symbol, pprint >>> d1 = Symbol(""d1"", positive=True) >>> d2 = Symbol(""d2"", positive=True) >>> z = Symbol(""z"") >>> X = FisherZ(""x"", d1, d2) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) d1 d2 d1 d2 - -- - -- -- -- 2 2 2 2 / 2*z \ d1*z 2*d1 *d2 *\d1*e + d2/ *e ----------------------------------------- /d1 d2\ B|--, --| \2 2 / References ========== .. [1] https://en.wikipedia.org/wiki/Fisher%27s_z-distribution .. [2] http://mathworld.wolfram.com/Fishersz-Distribution.html " 531,"def find_batch_size(tensors): if isinstance(tensors, (list, tuple)): for t in tensors: result = find_batch_size(t) if result is not None: return result elif isinstance(tensors, Mapping): for key, value in tensors.items(): result = find_batch_size(value) if result is not None: return result elif isinstance(tensors, torch.Tensor): return tensors.shape[0] if len(tensors.shape) >= 1 else None elif isinstance(tensors, np.ndarray): return tensors.shape[0] if len(tensors.shape) >= 1 else None "," Find the first dimension of a tensor in a nested list/tuple/dict of tensors. ",13,60,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_batch_size(tensors): if isinstance(tensors, (list, tuple)): for t in tensors: result = find_batch_size(t) if result is not None: return result elif isinstance(tensors, Mapping): for key, value in tensors.items(): result = find_batch_size(value) if result is not None: return result elif isinstance(tensors, torch.Tensor): return tensors.shape[0] if len(tensors.shape) >= 1 else None elif isinstance(tensors, np.ndarray): return tensors.shape[0] if len(tensors.shape) >= 1 else None ``` ###Assistant : Find the first dimension of a tensor in a nested list/tuple/dict of tensors. " 532,"def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5): out_logits, raw_masks = outputs.logits, outputs.pred_masks preds = [] "," Converts the output of [`DetrForSegmentation`] into image segmentation predictions. Only supports PyTorch. Parameters: outputs ([`DetrSegmentationOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`): Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction. threshold (`float`, *optional*, defaults to 0.9): Threshold to use to filter out queries. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image in the batch as predicted by the model. ",101,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5): out_logits, raw_masks = outputs.logits, outputs.pred_masks preds = [] ``` ###Assistant : Converts the output of [`DetrForSegmentation`] into image segmentation predictions. Only supports PyTorch. Parameters: outputs ([`DetrSegmentationOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`): Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction. threshold (`float`, *optional*, defaults to 0.9): Threshold to use to filter out queries. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image in the batch as predicted by the model. " 533,"def iterate_graycode(self, k): unranked_code = GrayCode.unrank(self.superset_size, (self.rank_gray + k) % self.cardinality) return Subset.subset_from_bitlist(self.superset, unranked_code) "," Helper function used for prev_gray and next_gray. It performs ``k`` step overs to get the respective Gray codes. Examples ======== >>> from sympy.combinatorics import Subset >>> a = Subset([1, 2, 3], [1, 2, 3, 4]) >>> a.iterate_graycode(3).subset [1, 4] >>> a.iterate_graycode(-2).subset [1, 2, 4] See Also ======== next_gray, prev_gray ",49,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def iterate_graycode(self, k): unranked_code = GrayCode.unrank(self.superset_size, (self.rank_gray + k) % self.cardinality) return Subset.subset_from_bitlist(self.superset, unranked_code) ``` ###Assistant : Helper function used for prev_gray and next_gray. It performs ``k`` step overs to get the respective Gray codes. Examples ======== >>> from sympy.combinatorics import Subset >>> a = Subset([1, 2, 3], [1, 2, 3, 4]) >>> a.iterate_graycode(3).subset [1, 4] >>> a.iterate_graycode(-2).subset [1, 2, 4] See Also ======== next_gray, prev_gray " 534,"def is_sequence_right_padded(mask): max_seq_length = tf.shape(mask)[1] count_of_true = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1) right_padded_mask = tf.sequence_mask(count_of_true, maxlen=max_seq_length) return tf.reduce_all(tf.equal(mask, right_padded_mask)) ","Check the mask tensor and see if it right padded. For cuDNN kernel, it uses the sequence length param to skip the tailing timestep. If the data is left padded, or not a strict right padding (has masked value in the middle of the sequence), then cuDNN kernel won't be work properly in those cases. Left padded data: [[False, False, True, True, True]]. Right padded data: [[True, True, True, False, False]]. Mixture of mask/unmasked data: [[True, False, True, False, False]]. Note that for the mixed data example above, the actually data RNN should see are those 2 Trues (index 0 and 2), the index 1 False should be ignored and not pollute the internal states. Args: mask: the Boolean tensor with shape [batch, timestep] Returns: boolean scalar tensor, whether the mask is strictly right padded. ",135,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_sequence_right_padded(mask): max_seq_length = tf.shape(mask)[1] count_of_true = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1) right_padded_mask = tf.sequence_mask(count_of_true, maxlen=max_seq_length) return tf.reduce_all(tf.equal(mask, right_padded_mask)) ``` ###Assistant : Check the mask tensor and see if it right padded. For cuDNN kernel, it uses the sequence length param to skip the tailing timestep. If the data is left padded, or not a strict right padding (has masked value in the middle of the sequence), then cuDNN kernel won't be work properly in those cases. Left padded data: [[False, False, True, True, True]]. Right padded data: [[True, True, True, False, False]]. Mixture of mask/unmasked data: [[True, False, True, False, False]]. Note that for the mixed data example above, the actually data RNN should see are those 2 Trues (index 0 and 2), the index 1 False should be ignored and not pollute the internal states. Args: mask: the Boolean tensor with shape [batch, timestep] Returns: boolean scalar tensor, whether the mask is strictly right padded. " 535,"def _send_event_over_federation(self) -> None: body = { ""pdus"": [ { ""sender"": self.user_id, ""type"": EventTypes.Message, ""state_key"": """", ""content"": {""body"": ""hello world"", ""msgtype"": ""m.text""}, ""room_id"": self.room_id, ""depth"": 0, ""origin_server_ts"": self.clock.time_msec(), ""prev_events"": [], ""auth_events"": [], ""signatures"": {}, ""unsigned"": {}, } ], } channel = self.make_signed_federation_request( method=""PUT"", path=""/_matrix/federation/v1/send/1"", content=body, ) self.assertEqual(channel.code, 200, channel.result) ",Send a dummy event over federation and check that the request succeeds.,12,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _send_event_over_federation(self) -> None: body = { ""pdus"": [ { ""sender"": self.user_id, ""type"": EventTypes.Message, ""state_key"": """", ""content"": {""body"": ""hello world"", ""msgtype"": ""m.text""}, ""room_id"": self.room_id, ""depth"": 0, ""origin_server_ts"": self.clock.time_msec(), ""prev_events"": [], ""auth_events"": [], ""signatures"": {}, ""unsigned"": {}, } ], } channel = self.make_signed_federation_request( method=""PUT"", path=""/_matrix/federation/v1/send/1"", content=body, ) self.assertEqual(channel.code, 200, channel.result) ``` ###Assistant : Send a dummy event over federation and check that the request succeeds. " 536,"def _print_loss(self, loss): output = "", "".join([f""Loss {side}: {side_loss:.5f}"" for side, side_loss in zip((""A"", ""B""), loss)]) timestamp = time.strftime(""%H:%M:%S"") output = f""[{timestamp}] [#{self._model.iterations:05d}] {output}"" print(f""\r{output}"", end="""") "," Outputs the loss for the current iteration to the console. Parameters ---------- loss: list The loss for each side. List should contain 2 ``floats`` side ""a"" in position 0 and side ""b"" in position `. ",35,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _print_loss(self, loss): output = "", "".join([f""Loss {side}: {side_loss:.5f}"" for side, side_loss in zip((""A"", ""B""), loss)]) timestamp = time.strftime(""%H:%M:%S"") output = f""[{timestamp}] [#{self._model.iterations:05d}] {output}"" print(f""\r{output}"", end="""") ``` ###Assistant : Outputs the loss for the current iteration to the console. Parameters ---------- loss: list The loss for each side. List should contain 2 ``floats`` side ""a"" in position 0 and side ""b"" in position `. " 537,"def itermonthdays2(self, year, month): for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday): yield d, i % 7 "," Like itermonthdates(), but will yield (day number, weekday number) tuples. For days outside the specified month the day number is 0. ",21,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def itermonthdays2(self, year, month): for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday): yield d, i % 7 ``` ###Assistant : Like itermonthdates(), but will yield (day number, weekday number) tuples. For days outside the specified month the day number is 0. " 538,"def test_distance_function_return_none_if_invalid_state(hass): hass.states.async_set(""test.object_2"", ""happy"", {""latitude"": 10}) tpl = template.Template(""{{ distance(states.test.object_2) | round }}"", hass) with pytest.raises(TemplateError): tpl.async_render() ",Test distance function return None if invalid state.,8,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_distance_function_return_none_if_invalid_state(hass): hass.states.async_set(""test.object_2"", ""happy"", {""latitude"": 10}) tpl = template.Template(""{{ distance(states.test.object_2) | round }}"", hass) with pytest.raises(TemplateError): tpl.async_render() ``` ###Assistant : Test distance function return None if invalid state. " 539,"def Concatenate(self, parameters): return _concatenate_getitem(self, parameters) # 3.7-8 elif sys.version_info[:2] >= (3, 7):","Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a higher order function which adds, removes or transforms parameters of a callable. For example:: Callable[Concatenate[int, P], int] See PEP 612 for detailed information. ",33,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def Concatenate(self, parameters): return _concatenate_getitem(self, parameters) # 3.7-8 elif sys.version_info[:2] >= (3, 7): ``` ###Assistant : Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a higher order function which adds, removes or transforms parameters of a callable. For example:: Callable[Concatenate[int, P], int] See PEP 612 for detailed information. " 540,"def compute_recall(result_neighbors, ground_truth_neighbors) -> float: assert len( result_neighbors.shape) == 2, ""shape = [num_queries, neighbors_per_query]"" assert len(ground_truth_neighbors.shape ) == 2, ""shape = [num_queries, ground_truth_neighbors_per_query]"" assert result_neighbors.shape[0] == ground_truth_neighbors.shape[0] gt_sets = [set(np.asarray(x)) for x in ground_truth_neighbors] hits = sum( len(list(x for x in nn_per_q if x.item() in gt_sets[q])) for q, nn_per_q in enumerate(result_neighbors)) return hits / ground_truth_neighbors.size ","Computes the recall of an approximate nearest neighbor search. Args: result_neighbors: int32 numpy array of the shape [num_queries, neighbors_per_query] where the values are the indices of the dataset. ground_truth_neighbors: int32 numpy array of with shape [num_queries, ground_truth_neighbors_per_query] where the values are the indices of the dataset. Returns: The recall. ",49,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def compute_recall(result_neighbors, ground_truth_neighbors) -> float: assert len( result_neighbors.shape) == 2, ""shape = [num_queries, neighbors_per_query]"" assert len(ground_truth_neighbors.shape ) == 2, ""shape = [num_queries, ground_truth_neighbors_per_query]"" assert result_neighbors.shape[0] == ground_truth_neighbors.shape[0] gt_sets = [set(np.asarray(x)) for x in ground_truth_neighbors] hits = sum( len(list(x for x in nn_per_q if x.item() in gt_sets[q])) for q, nn_per_q in enumerate(result_neighbors)) return hits / ground_truth_neighbors.size ``` ###Assistant : Computes the recall of an approximate nearest neighbor search. Args: result_neighbors: int32 numpy array of the shape [num_queries, neighbors_per_query] where the values are the indices of the dataset. ground_truth_neighbors: int32 numpy array of with shape [num_queries, ground_truth_neighbors_per_query] where the values are the indices of the dataset. Returns: The recall. " 541,"def isocalendar(self) -> DataFrame: from pandas import DataFrame values = self._local_timestamps() sarray = fields.build_isocalendar_sarray(values) iso_calendar_df = DataFrame( sarray, columns=[""year"", ""week"", ""day""], dtype=""UInt32"" ) if self._hasna: iso_calendar_df.iloc[self._isnan] = None return iso_calendar_df "," Returns a DataFrame with the year, week, and day calculated according to the ISO 8601 standard. .. versionadded:: 1.1.0 Returns ------- DataFrame with columns year, week and day See Also -------- Timestamp.isocalendar : Function return a 3-tuple containing ISO year, week number, and weekday for the given Timestamp object. datetime.date.isocalendar : Return a named tuple object with three components: year, week and weekday. Examples -------- >>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4) >>> idx.isocalendar() year week day 2019-12-29 2019 52 7 2019-12-30 2020 1 1 2019-12-31 2020 1 2 2020-01-01 2020 1 3 >>> idx.isocalendar().week 2019-12-29 52 2019-12-30 1 2019-12-31 1 2020-01-01 1 Freq: D, Name: week, dtype: UInt32 ",108,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def isocalendar(self) -> DataFrame: from pandas import DataFrame values = self._local_timestamps() sarray = fields.build_isocalendar_sarray(values) iso_calendar_df = DataFrame( sarray, columns=[""year"", ""week"", ""day""], dtype=""UInt32"" ) if self._hasna: iso_calendar_df.iloc[self._isnan] = None return iso_calendar_df ``` ###Assistant : Returns a DataFrame with the year, week, and day calculated according to the ISO 8601 standard. .. versionadded:: 1.1.0 Returns ------- DataFrame with columns year, week and day See Also -------- Timestamp.isocalendar : Function return a 3-tuple containing ISO year, week number, and weekday for the given Timestamp object. datetime.date.isocalendar : Return a named tuple object with three components: year, week and weekday. Examples -------- >>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4) >>> idx.isocalendar() year week day 2019-12-29 2019 52 7 2019-12-30 2020 1 1 2019-12-31 2020 1 2 2020-01-01 2020 1 3 >>> idx.isocalendar().week 2019-12-29 52 2019-12-30 1 2019-12-31 1 2020-01-01 1 Freq: D, Name: week, dtype: UInt32 " 542,"def putpixel(self, xy, value): if self.readonly: self._copy() self.load() if self.pyaccess: return self.pyaccess.putpixel(xy, value) if ( self.mode in (""P"", ""PA"") and isinstance(value, (list, tuple)) and len(value) in [3, 4] ): # RGB or RGBA value for a P or PA image if self.mode == ""PA"": alpha = value[3] if len(value) == 4 else 255 value = value[:3] value = self.palette.getcolor(value, self) if self.mode == ""PA"": value = (value, alpha) return self.im.putpixel(xy, value) "," Modifies the pixel at the given position. The color is given as a single numerical value for single-band images, and a tuple for multi-band images. In addition to this, RGB and RGBA tuples are accepted for P and PA images. Note that this method is relatively slow. For more extensive changes, use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw` module instead. See: * :py:meth:`~PIL.Image.Image.paste` * :py:meth:`~PIL.Image.Image.putdata` * :py:mod:`~PIL.ImageDraw` :param xy: The pixel coordinate, given as (x, y). See :ref:`coordinate-system`. :param value: The pixel value. ",81,71,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def putpixel(self, xy, value): if self.readonly: self._copy() self.load() if self.pyaccess: return self.pyaccess.putpixel(xy, value) if ( self.mode in (""P"", ""PA"") and isinstance(value, (list, tuple)) and len(value) in [3, 4] ): # RGB or RGBA value for a P or PA image if self.mode == ""PA"": alpha = value[3] if len(value) == 4 else 255 value = value[:3] value = self.palette.getcolor(value, self) if self.mode == ""PA"": value = (value, alpha) return self.im.putpixel(xy, value) ``` ###Assistant : Modifies the pixel at the given position. The color is given as a single numerical value for single-band images, and a tuple for multi-band images. In addition to this, RGB and RGBA tuples are accepted for P and PA images. Note that this method is relatively slow. For more extensive changes, use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw` module instead. See: * :py:meth:`~PIL.Image.Image.paste` * :py:meth:`~PIL.Image.Image.putdata` * :py:mod:`~PIL.ImageDraw` :param xy: The pixel coordinate, given as (x, y). See :ref:`coordinate-system`. :param value: The pixel value. " 543,"async def test_available_template_with_entities(hass): await setup.async_setup_component( hass, ""switch"", { ""switch"": { ""platform"": ""template"", ""switches"": { ""test_template_switch"": { **OPTIMISTIC_SWITCH_CONFIG, ""value_template"": ""{{ 1 == 1 }}"", ""availability_template"": ""{{ is_state('availability_state.state', 'on') }}"", } }, } }, ) await hass.async_block_till_done() await hass.async_start() await hass.async_block_till_done() hass.states.async_set(""availability_state.state"", STATE_ON) await hass.async_block_till_done() assert hass.states.get(""switch.test_template_switch"").state != STATE_UNAVAILABLE hass.states.async_set(""availability_state.state"", STATE_OFF) await hass.async_block_till_done() assert hass.states.get(""switch.test_template_switch"").state == STATE_UNAVAILABLE ",Test availability templates with values from other entities.,8,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_available_template_with_entities(hass): await setup.async_setup_component( hass, ""switch"", { ""switch"": { ""platform"": ""template"", ""switches"": { ""test_template_switch"": { **OPTIMISTIC_SWITCH_CONFIG, ""value_template"": ""{{ 1 == 1 }}"", ""availability_template"": ""{{ is_state('availability_state.state', 'on') }}"", } }, } }, ) await hass.async_block_till_done() await hass.async_start() await hass.async_block_till_done() hass.states.async_set(""availability_state.state"", STATE_ON) await hass.async_block_till_done() assert hass.states.get(""switch.test_template_switch"").state != STATE_UNAVAILABLE hass.states.async_set(""availability_state.state"", STATE_OFF) await hass.async_block_till_done() assert hass.states.get(""switch.test_template_switch"").state == STATE_UNAVAILABLE ``` ###Assistant : Test availability templates with values from other entities. " 544,"def etfs_disc_command(sort=""""): # Debug if cfg.DEBUG: logger.debug(""etfs"") df_etfs = wsj_model.etf_movers(sort, export=True) if df_etfs.empty: raise Exception(""No available data found"") df_etfs.set_index("" "", inplace=True) prfx = ""Top"" if sort == ""active"": prfx = ""Most"" title = f""ETF Movers ({prfx} {sort.capitalize()})"" dindex = len(df_etfs.index) if dindex > 15: embeds: list = [] # Output i, i2, end = 0, 0, 15 df_pg, embeds_img, images_list = [], [], [] while i < dindex: df_pg = df_etfs.iloc[i:end] df_pg.append(df_pg) fig = df2img.plot_dataframe( df_pg, fig_size=(1200, (40 + (40 * dindex))), col_width=[1, 9, 1.5, 1.5, 1.5, 1.5], tbl_header=cfg.PLT_TBL_HEADER, tbl_cells=cfg.PLT_TBL_CELLS, font=cfg.PLT_TBL_FONT, row_fill_color=cfg.PLT_TBL_ROW_COLORS, paper_bgcolor=""rgba(0, 0, 0, 0)"", ) fig.update_traces(cells=(dict(align=[""left""]))) imagefile = ""disc-etfs.png"" imagefile = helpers.save_image(imagefile, fig) if cfg.IMAGES_URL or cfg.IMGUR_CLIENT_ID != ""REPLACE_ME"": image_link = cfg.IMAGES_URL + imagefile images_list.append(imagefile) else: imagefile_save = cfg.IMG_DIR / imagefile uploaded_image = gst_imgur.upload_image( imagefile_save, title=""something"" ) image_link = uploaded_image.link os.remove(imagefile_save) embeds_img.append( f""{image_link}"", ) embeds.append( disnake.Embed( title=title, colour=cfg.COLOR, ), ) i2 += 1 i += 15 end += 15 # Author/Footer for i in range(0, i2): embeds[i].set_author( name=cfg.AUTHOR_NAME, url=cfg.AUTHOR_URL, icon_url=cfg.AUTHOR_ICON_URL, ) embeds[i].set_footer( text=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) i = 0 for i in range(0, i2): embeds[i].set_image(url=embeds_img[i]) i += 1 embeds[0].set_footer(text=f""Page 1 of {len(embeds)}"") choices = [ disnake.SelectOption(label=""Home"", value=""0"", emoji=""🟢""), ] output = { ""view"": Menu, ""title"": title, ""embed"": embeds, ""choices"": choices, ""embeds_img"": embeds_img, ""images_list"": images_list, } else: fig = df2img.plot_dataframe( df_etfs, fig_size=(1200, (40 + (40 * dindex))), col_width=[1, 9, 1.5, 1.5, 1.5, 1.5], tbl_header=cfg.PLT_TBL_HEADER, tbl_cells=cfg.PLT_TBL_CELLS, font=cfg.PLT_TBL_FONT, row_fill_color=cfg.PLT_TBL_ROW_COLORS, paper_bgcolor=""rgba(0, 0, 0, 0)"", ) fig.update_traces(cells=(dict(align=[""left""]))) imagefile = helpers.save_image(""disc-etfs.png"", fig) output = { ""title"": title, ""imagefile"": imagefile, } return output ","Displays ETF's Top Gainers/Decliners, Most Active [Wall Street Journal]",9,247,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def etfs_disc_command(sort=""""): # Debug if cfg.DEBUG: logger.debug(""etfs"") df_etfs = wsj_model.etf_movers(sort, export=True) if df_etfs.empty: raise Exception(""No available data found"") df_etfs.set_index("" "", inplace=True) prfx = ""Top"" if sort == ""active"": prfx = ""Most"" title = f""ETF Movers ({prfx} {sort.capitalize()})"" dindex = len(df_etfs.index) if dindex > 15: embeds: list = [] # Output i, i2, end = 0, 0, 15 df_pg, embeds_img, images_list = [], [], [] while i < dindex: df_pg = df_etfs.iloc[i:end] df_pg.append(df_pg) fig = df2img.plot_dataframe( df_pg, fig_size=(1200, (40 + (40 * dindex))), col_width=[1, 9, 1.5, 1.5, 1.5, 1.5], tbl_header=cfg.PLT_TBL_HEADER, tbl_cells=cfg.PLT_TBL_CELLS, font=cfg.PLT_TBL_FONT, row_fill_color=cfg.PLT_TBL_ROW_COLORS, paper_bgcolor=""rgba(0, 0, 0, 0)"", ) fig.update_traces(cells=(dict(align=[""left""]))) imagefile = ""disc-etfs.png"" imagefile = helpers.save_image(imagefile, fig) if cfg.IMAGES_URL or cfg.IMGUR_CLIENT_ID != ""REPLACE_ME"": image_link = cfg.IMAGES_URL + imagefile images_list.append(imagefile) else: imagefile_save = cfg.IMG_DIR / imagefile uploaded_image = gst_imgur.upload_image( imagefile_save, title=""something"" ) image_link = uploaded_image.link os.remove(imagefile_save) embeds_img.append( f""{image_link}"", ) embeds.append( disnake.Embed( title=title, colour=cfg.COLOR, ), ) i2 += 1 i += 15 end += 15 # Author/Footer for i in range(0, i2): embeds[i].set_author( name=cfg.AUTHOR_NAME, url=cfg.AUTHOR_URL, icon_url=cfg.AUTHOR_ICON_URL, ) embeds[i].set_footer( text=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) i = 0 for i in range(0, i2): embeds[i].set_image(url=embeds_img[i]) i += 1 embeds[0].set_footer(text=f""Page 1 of {len(embeds)}"") choices = [ disnake.SelectOption(label=""Home"", value=""0"", emoji=""🟢""), ] output = { ""view"": Menu, ""title"": title, ""embed"": embeds, ""choices"": choices, ""embeds_img"": embeds_img, ""images_list"": images_list, } else: fig = df2img.plot_dataframe( df_etfs, fig_size=(1200, (40 + (40 * dindex))), col_width=[1, 9, 1.5, 1.5, 1.5, 1.5], tbl_header=cfg.PLT_TBL_HEADER, tbl_cells=cfg.PLT_TBL_CELLS, font=cfg.PLT_TBL_FONT, row_fill_color=cfg.PLT_TBL_ROW_COLORS, paper_bgcolor=""rgba(0, 0, 0, 0)"", ) fig.update_traces(cells=(dict(align=[""left""]))) imagefile = helpers.save_image(""disc-etfs.png"", fig) output = { ""title"": title, ""imagefile"": imagefile, } return output ``` ###Assistant : Displays ETF's Top Gainers/Decliners, Most Active [Wall Street Journal] " 545,"def get(self): logger = get_logger() logger.debug(f""ENTER::Partition.get::{self._identity}"") if len(self.call_queue): self.drain_call_queue() result = ray.get(self.oid) logger.debug(f""EXIT::Partition.get::{self._identity}"") return result "," Get the object wrapped by this partition out of the Plasma store. Returns ------- pandas.DataFrame The object from the Plasma store. ",21,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get(self): logger = get_logger() logger.debug(f""ENTER::Partition.get::{self._identity}"") if len(self.call_queue): self.drain_call_queue() result = ray.get(self.oid) logger.debug(f""EXIT::Partition.get::{self._identity}"") return result ``` ###Assistant : Get the object wrapped by this partition out of the Plasma store. Returns ------- pandas.DataFrame The object from the Plasma store. " 546,"def dag_bag_ext(): clear_db_runs() dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False) dag_0 = DAG(""dag_0"", start_date=DEFAULT_DATE, schedule_interval=None) task_a_0 = EmptyOperator(task_id=""task_a_0"", dag=dag_0) task_b_0 = ExternalTaskMarker( task_id=""task_b_0"", external_dag_id=""dag_1"", external_task_id=""task_a_1"", recursion_depth=3, dag=dag_0 ) task_a_0 >> task_b_0 dag_1 = DAG(""dag_1"", start_date=DEFAULT_DATE, schedule_interval=None) task_a_1 = ExternalTaskSensor( task_id=""task_a_1"", external_dag_id=dag_0.dag_id, external_task_id=task_b_0.task_id, dag=dag_1 ) task_b_1 = ExternalTaskMarker( task_id=""task_b_1"", external_dag_id=""dag_2"", external_task_id=""task_a_2"", recursion_depth=2, dag=dag_1 ) task_a_1 >> task_b_1 dag_2 = DAG(""dag_2"", start_date=DEFAULT_DATE, schedule_interval=None) task_a_2 = ExternalTaskSensor( task_id=""task_a_2"", external_dag_id=dag_1.dag_id, external_task_id=task_b_1.task_id, dag=dag_2 ) task_b_2 = ExternalTaskMarker( task_id=""task_b_2"", external_dag_id=""dag_3"", external_task_id=""task_a_3"", recursion_depth=1, dag=dag_2 ) task_a_2 >> task_b_2 dag_3 = DAG(""dag_3"", start_date=DEFAULT_DATE, schedule_interval=None) task_a_3 = ExternalTaskSensor( task_id=""task_a_3"", external_dag_id=dag_2.dag_id, external_task_id=task_b_2.task_id, dag=dag_3 ) task_b_3 = EmptyOperator(task_id=""task_b_3"", dag=dag_3) task_a_3 >> task_b_3 for dag in [dag_0, dag_1, dag_2, dag_3]: dag_bag.bag_dag(dag=dag, root_dag=dag) yield dag_bag clear_db_runs() @pytest.fixture"," Create a DagBag with DAGs looking like this. The dotted lines represent external dependencies set up using ExternalTaskMarker and ExternalTaskSensor. dag_0: task_a_0 >> task_b_0 | | dag_1: ---> task_a_1 >> task_b_1 | | dag_2: ---> task_a_2 >> task_b_2 | | dag_3: ---> task_a_3 >> task_b_3 ",45,111,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dag_bag_ext(): clear_db_runs() dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False) dag_0 = DAG(""dag_0"", start_date=DEFAULT_DATE, schedule_interval=None) task_a_0 = EmptyOperator(task_id=""task_a_0"", dag=dag_0) task_b_0 = ExternalTaskMarker( task_id=""task_b_0"", external_dag_id=""dag_1"", external_task_id=""task_a_1"", recursion_depth=3, dag=dag_0 ) task_a_0 >> task_b_0 dag_1 = DAG(""dag_1"", start_date=DEFAULT_DATE, schedule_interval=None) task_a_1 = ExternalTaskSensor( task_id=""task_a_1"", external_dag_id=dag_0.dag_id, external_task_id=task_b_0.task_id, dag=dag_1 ) task_b_1 = ExternalTaskMarker( task_id=""task_b_1"", external_dag_id=""dag_2"", external_task_id=""task_a_2"", recursion_depth=2, dag=dag_1 ) task_a_1 >> task_b_1 dag_2 = DAG(""dag_2"", start_date=DEFAULT_DATE, schedule_interval=None) task_a_2 = ExternalTaskSensor( task_id=""task_a_2"", external_dag_id=dag_1.dag_id, external_task_id=task_b_1.task_id, dag=dag_2 ) task_b_2 = ExternalTaskMarker( task_id=""task_b_2"", external_dag_id=""dag_3"", external_task_id=""task_a_3"", recursion_depth=1, dag=dag_2 ) task_a_2 >> task_b_2 dag_3 = DAG(""dag_3"", start_date=DEFAULT_DATE, schedule_interval=None) task_a_3 = ExternalTaskSensor( task_id=""task_a_3"", external_dag_id=dag_2.dag_id, external_task_id=task_b_2.task_id, dag=dag_3 ) task_b_3 = EmptyOperator(task_id=""task_b_3"", dag=dag_3) task_a_3 >> task_b_3 for dag in [dag_0, dag_1, dag_2, dag_3]: dag_bag.bag_dag(dag=dag, root_dag=dag) yield dag_bag clear_db_runs() @pytest.fixture ``` ###Assistant : Create a DagBag with DAGs looking like this. The dotted lines represent external dependencies set up using ExternalTaskMarker and ExternalTaskSensor. dag_0: task_a_0 >> task_b_0 | | dag_1: ---> task_a_1 >> task_b_1 | | dag_2: ---> task_a_2 >> task_b_2 | | dag_3: ---> task_a_3 >> task_b_3 " 547,"def test_constrained_layout3(): fig, axs = plt.subplots(2, 2, layout=""constrained"") for nn, ax in enumerate(axs.flat): pcm = example_pcolor(ax, fontsize=24) if nn == 3: pad = 0.08 else: pad = 0.02 # default fig.colorbar(pcm, ax=ax, pad=pad) @image_comparison(['constrained_layout4.png'])",Test constrained_layout for colorbars with subplots,6,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_constrained_layout3(): fig, axs = plt.subplots(2, 2, layout=""constrained"") for nn, ax in enumerate(axs.flat): pcm = example_pcolor(ax, fontsize=24) if nn == 3: pad = 0.08 else: pad = 0.02 # default fig.colorbar(pcm, ax=ax, pad=pad) @image_comparison(['constrained_layout4.png']) ``` ###Assistant : Test constrained_layout for colorbars with subplots " 548,"def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: return self.grouper.indices "," Dict {group name -> group indices}. ",6,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: return self.grouper.indices ``` ###Assistant : Dict {group name -> group indices}. " 549,"def get_hash(self, data, hasher=None): if hasher is None: hasher = self.hasher if hasher is None: hasher = hashlib.md5 prefix = '' else: hasher = getattr(hashlib, hasher) prefix = '%s=' % self.hasher digest = hasher(data).digest() digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii') return '%s%s' % (prefix, digest) "," Get the hash of some data, using a particular hash algorithm, if specified. :param data: The data to be hashed. :type data: bytes :param hasher: The name of a hash implementation, supported by hashlib, or ``None``. Examples of valid values are ``'sha1'``, ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and ``'sha512'``. If no hasher is specified, the ``hasher`` attribute of the :class:`InstalledDistribution` instance is used. If the hasher is determined to be ``None``, MD5 is used as the hashing algorithm. :returns: The hash of the data. If a hasher was explicitly specified, the returned hash will be prefixed with the specified hasher followed by '='. :rtype: str ",104,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_hash(self, data, hasher=None): if hasher is None: hasher = self.hasher if hasher is None: hasher = hashlib.md5 prefix = '' else: hasher = getattr(hashlib, hasher) prefix = '%s=' % self.hasher digest = hasher(data).digest() digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii') return '%s%s' % (prefix, digest) ``` ###Assistant : Get the hash of some data, using a particular hash algorithm, if specified. :param data: The data to be hashed. :type data: bytes :param hasher: The name of a hash implementation, supported by hashlib, or ``None``. Examples of valid values are ``'sha1'``, ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and ``'sha512'``. If no hasher is specified, the ``hasher`` attribute of the :class:`InstalledDistribution` instance is used. If the hasher is determined to be ``None``, MD5 is used as the hashing algorithm. :returns: The hash of the data. If a hasher was explicitly specified, the returned hash will be prefixed with the specified hasher followed by '='. :rtype: str " 550,"def test_async_call_same_actor_multiple_times(self): actors = [Actor.remote(i, maybe_crash=False) for i in range(4)] manager = FaultTolerantActorManager(actors=actors) # 2 asynchronous call to actor 0. num_of_calls = manager.foreach_actor_async( lambda w: w.call(), healthy_only=False, remote_actor_indices=[0, 0], ) self.assertEqual(num_of_calls, 2) # Now, let's actually fetch the results. results = manager.fetch_ready_async_reqs(timeout_seconds=None) # Returns 1 and 2, representing the first and second calls to actor 0. self.assertEqual([r.get() for r in results.ignore_errors()], [1, 2]) ",Test multiple asynchronous remote calls to the same actor.,9,63,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_async_call_same_actor_multiple_times(self): actors = [Actor.remote(i, maybe_crash=False) for i in range(4)] manager = FaultTolerantActorManager(actors=actors) # 2 asynchronous call to actor 0. num_of_calls = manager.foreach_actor_async( lambda w: w.call(), healthy_only=False, remote_actor_indices=[0, 0], ) self.assertEqual(num_of_calls, 2) # Now, let's actually fetch the results. results = manager.fetch_ready_async_reqs(timeout_seconds=None) # Returns 1 and 2, representing the first and second calls to actor 0. self.assertEqual([r.get() for r in results.ignore_errors()], [1, 2]) ``` ###Assistant : Test multiple asynchronous remote calls to the same actor. " 551,"def _load(self): data = {} if not self._is_extract: if not self.have_alignments_file: return data data = super()._load() return data skip_existing = hasattr(self._args, 'skip_existing') and self._args.skip_existing skip_faces = hasattr(self._args, 'skip_faces') and self._args.skip_faces if not skip_existing and not skip_faces: logger.debug(""No skipping selected. Returning empty dictionary"") return data if not self.have_alignments_file and (skip_existing or skip_faces): logger.warning(""Skip Existing/Skip Faces selected, but no alignments file found!"") return data data = super()._load() if skip_faces: # Remove items from alignments that have no faces so they will # be re-detected del_keys = [key for key, val in data.items() if not val[""faces""]] logger.debug(""Frames with no faces selected for redetection: %s"", len(del_keys)) for key in del_keys: if key in data: logger.trace(""Selected for redetection: '%s'"", key) del data[key] return data "," Override the parent :func:`~lib.align.Alignments._load` to handle skip existing frames and faces on extract. If skip existing has been selected, existing alignments are loaded and returned to the calling script. Returns ------- dict Any alignments that have already been extracted if skip existing has been selected otherwise an empty dictionary ",49,119,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _load(self): data = {} if not self._is_extract: if not self.have_alignments_file: return data data = super()._load() return data skip_existing = hasattr(self._args, 'skip_existing') and self._args.skip_existing skip_faces = hasattr(self._args, 'skip_faces') and self._args.skip_faces if not skip_existing and not skip_faces: logger.debug(""No skipping selected. Returning empty dictionary"") return data if not self.have_alignments_file and (skip_existing or skip_faces): logger.warning(""Skip Existing/Skip Faces selected, but no alignments file found!"") return data data = super()._load() if skip_faces: # Remove items from alignments that have no faces so they will # be re-detected del_keys = [key for key, val in data.items() if not val[""faces""]] logger.debug(""Frames with no faces selected for redetection: %s"", len(del_keys)) for key in del_keys: if key in data: logger.trace(""Selected for redetection: '%s'"", key) del data[key] return data ``` ###Assistant : Override the parent :func:`~lib.align.Alignments._load` to handle skip existing frames and faces on extract. If skip existing has been selected, existing alignments are loaded and returned to the calling script. Returns ------- dict Any alignments that have already been extracted if skip existing has been selected otherwise an empty dictionary " 552,"def get_gtech() -> pd.DataFrame: return get_df( ""https://finance.yahoo.com/screener/predefined/growth_technology_stocks"" ) @log_start_end(log=logger)","Get technology stocks with revenue and earnings growth in excess of 25%. [Source: Yahoo Finance] Returns ------- pd.DataFrame Growth technology stocks ",21,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_gtech() -> pd.DataFrame: return get_df( ""https://finance.yahoo.com/screener/predefined/growth_technology_stocks"" ) @log_start_end(log=logger) ``` ###Assistant : Get technology stocks with revenue and earnings growth in excess of 25%. [Source: Yahoo Finance] Returns ------- pd.DataFrame Growth technology stocks " 553,"def test_bert2gpt2_summarization(self): model = EncoderDecoderModel.from_pretrained(""patrickvonplaten/bert2gpt2-cnn_dailymail-fp16"") model.to(torch_device) tokenizer_in = AutoTokenizer.from_pretrained(""bert-base-cased"") tokenizer_out = AutoTokenizer.from_pretrained(""../gpt2"") ARTICLE_STUDENTS = EXPECTED_SUMMARY_STUDENTS = input_dict = tokenizer_in(ARTICLE_STUDENTS, return_tensors=""pt"") output_ids = model.generate(input_dict[""input_ids""].to(torch_device)) summary = tokenizer_out.batch_decode(output_ids, skip_special_tokens=True) self.assertEqual(summary, [EXPECTED_SUMMARY_STUDENTS]) @require_torch","(CNN)Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members singing a racist chant. SAE's national chapter suspended the students, but University of Oklahoma President David Boren took it a step further, saying the university's affiliation with the fraternity is permanently done. The news is shocking, but it's not the first time SAE has faced controversy. SAE was founded March 9, 1856, at the University of Alabama, five years before the American Civil War, according to the fraternity website. When the war began, the group had fewer than 400 members, of which ""369 went to war for the Confederate States and seven for the Union Army,"" the website says. The fraternity now boasts more than 200,000 living alumni, along with about 15,000 undergraduates populating 219 chapters and 20 ""colonies"" seeking full membership at universities. SAE has had to work hard to change recently after a string of member deaths, many blamed on the hazing of new recruits, SAE national President Bradley Cohen wrote in a message on the fraternity's website. The fraternity's website lists more than 130 chapters cited or suspended for ""health and safety incidents"" since 2010. At least 30 of the incidents involved hazing, and dozens more involved alcohol. However, the list is missing numerous incidents from recent months. Among them, according to various media outlets: Yale University banned the SAEs from campus activities last month after members allegedly tried to interfere with a sexual misconduct investigation connected to an initiation rite. Stanford University in December suspended SAE housing privileges after finding sorority members attending a fraternity function were subjected to graphic sexual content. And Johns Hopkins University in November suspended the fraternity for underage drinking. ""The media has labeled us as the 'nation's deadliest fraternity,' "" Cohen said. In 2011, for example, a student died while being coerced into excessive alcohol consumption, according to a lawsuit. SAE's previous insurer dumped the fraternity. ""As a result, we are paying Lloyd's of London the highest insurance rates in the Greek-letter world,"" Cohen said. Universities have turned down SAE's attempts to open new chapters, and the fraternity had to close 12 in 18 months over hazing incidents.SAS Alpha Epsilon suspended the students, but university president says it's permanent.\nThe fraternity has had to deal with a string of student deaths since 2010.\nSAS has more than 200,000 members, many of whom are students.\nA student died while being forced into excessive alcohol consumption.",403,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_bert2gpt2_summarization(self): model = EncoderDecoderModel.from_pretrained(""patrickvonplaten/bert2gpt2-cnn_dailymail-fp16"") model.to(torch_device) tokenizer_in = AutoTokenizer.from_pretrained(""bert-base-cased"") tokenizer_out = AutoTokenizer.from_pretrained(""../gpt2"") ARTICLE_STUDENTS = EXPECTED_SUMMARY_STUDENTS = input_dict = tokenizer_in(ARTICLE_STUDENTS, return_tensors=""pt"") output_ids = model.generate(input_dict[""input_ids""].to(torch_device)) summary = tokenizer_out.batch_decode(output_ids, skip_special_tokens=True) self.assertEqual(summary, [EXPECTED_SUMMARY_STUDENTS]) @require_torch ``` ###Assistant : (CNN)Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members singing a racist chant. SAE's national chapter suspended the students, but University of Oklahoma President David Boren took it a step further, saying the university's affiliation with the fraternity is permanently done. The news is shocking, but it's not the first time SAE has faced controversy. SAE was founded March 9, 1856, at the University of Alabama, five years before the American Civil War, according to the fraternity website. When the war began, the group had fewer than 400 members, of which ""369 went to war for the Confederate States and seven for the Union Army,"" the website says. The fraternity now boasts more than 200,000 living alumni, along with about 15,000 undergraduates populating 219 chapters and 20 ""colonies"" seeking full membership at universities. SAE has had to work hard to change recently after a string of member deaths, many blamed on the hazing of new recruits, SAE national President Bradley Cohen wrote in a message on the fraternity's website. The fraternity's website lists more than 130 chapters cited or suspended for ""health and safety incidents"" since 2010. At least 30 of the incidents involved hazing, and dozens more involved alcohol. However, the list is missing numerous incidents from recent months. Among them, according to various media outlets: Yale University banned the SAEs from campus activities last month after members allegedly tried to interfere with a sexual misconduct investigation connected to an initiation rite. Stanford University in December suspended SAE housing privileges after finding sorority members attending a fraternity function were subjected to graphic sexual content. And Johns Hopkins University in November suspended the fraternity for underage drinking. ""The media has labeled us as the 'nation's deadliest fraternity,' "" Cohen said. In 2011, for example, a student died while being coerced into excessive alcohol consumption, according to a lawsuit. SAE's previous insurer dumped the fraternity. ""As a result, we are paying Lloyd's of London the highest insurance rates in the Greek-letter world,"" Cohen said. Universities have turned down SAE's attempts to open new chapters, and the fraternity had to close 12 in 18 months over hazing incidents.SAS Alpha Epsilon suspended the students, but university president says it's permanent.\nThe fraternity has had to deal with a string of student deaths since 2010.\nSAS has more than 200,000 members, many of whom are students.\nA student died while being forced into excessive alcohol consumption. " 554,"def _get_builtin_permissions(opts): # noqa: D205, D212 perms = [] for action in opts.default_permissions: perms.append( ( get_permission_codename(action, opts), ""Can %s %s"" % (action, opts.verbose_name_raw), ) ) return perms "," Return (codename, name) for all autogenerated permissions. By default, this is ('add', 'change', 'delete', 'view') ",15,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_builtin_permissions(opts): # noqa: D205, D212 perms = [] for action in opts.default_permissions: perms.append( ( get_permission_codename(action, opts), ""Can %s %s"" % (action, opts.verbose_name_raw), ) ) return perms ``` ###Assistant : Return (codename, name) for all autogenerated permissions. By default, this is ('add', 'change', 'delete', 'view') " 555,"def accumulate(self, model): self._do_sync() if self.sync_gradients: context = contextlib.nullcontext else: context = self.no_sync with context(model): yield "," A context manager that will lightly wrap around and perform gradient accumulation automatically Args: model (`torch.nn.Module`): PyTorch Module that was prepared with `Accelerator.prepare` ",23,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def accumulate(self, model): self._do_sync() if self.sync_gradients: context = contextlib.nullcontext else: context = self.no_sync with context(model): yield ``` ###Assistant : A context manager that will lightly wrap around and perform gradient accumulation automatically Args: model (`torch.nn.Module`): PyTorch Module that was prepared with `Accelerator.prepare` " 556,"def get_attribute(value): attribute = Attribute() if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) attribute.append(token) if value and value[0] in ATTRIBUTE_ENDS: raise errors.HeaderParseError( ""expected token but found '{}'"".format(value)) token, value = get_attrtext(value) attribute.append(token) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) attribute.append(token) return attribute, value "," [CFWS] 1*attrtext [CFWS] This version of the BNF makes the CFWS explicit, and as usual we use a value terminal for the actual run of characters. The RFC equivalent of attrtext is the token characters, with the subtraction of '*', ""'"", and '%'. We include tab in the excluded set just as we do for token. ",56,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_attribute(value): attribute = Attribute() if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) attribute.append(token) if value and value[0] in ATTRIBUTE_ENDS: raise errors.HeaderParseError( ""expected token but found '{}'"".format(value)) token, value = get_attrtext(value) attribute.append(token) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) attribute.append(token) return attribute, value ``` ###Assistant : [CFWS] 1*attrtext [CFWS] This version of the BNF makes the CFWS explicit, and as usual we use a value terminal for the actual run of characters. The RFC equivalent of attrtext is the token characters, with the subtraction of '*', ""'"", and '%'. We include tab in the excluded set just as we do for token. " 557,"def eval_to_file(modin_obj, pandas_obj, fn, extension, **fn_kwargs): with ensure_clean_dir() as dirname: unique_filename_modin = get_unique_filename( extension=extension, data_dir=dirname ) unique_filename_pandas = get_unique_filename( extension=extension, data_dir=dirname ) # parameter `max_retries=0` is set for `to_csv` function on Ray engine, # in order to increase the stability of tests, we repeat the call of # the entire function manually last_exception = None for _ in range(3): try: getattr(modin_obj, fn)(unique_filename_modin, **fn_kwargs) except EXCEPTIONS as exc: last_exception = exc continue break else: raise last_exception getattr(pandas_obj, fn)(unique_filename_pandas, **fn_kwargs) assert assert_files_eq(unique_filename_modin, unique_filename_pandas) @pytest.fixture","Helper function to test `to_` methods. Args: modin_obj: Modin DataFrame or Series to test `to_` method. pandas_obj: Pandas DataFrame or Series to test `to_` method. fn: name of the method, that should be tested. extension: Extension of the test file. ",40,82,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def eval_to_file(modin_obj, pandas_obj, fn, extension, **fn_kwargs): with ensure_clean_dir() as dirname: unique_filename_modin = get_unique_filename( extension=extension, data_dir=dirname ) unique_filename_pandas = get_unique_filename( extension=extension, data_dir=dirname ) # parameter `max_retries=0` is set for `to_csv` function on Ray engine, # in order to increase the stability of tests, we repeat the call of # the entire function manually last_exception = None for _ in range(3): try: getattr(modin_obj, fn)(unique_filename_modin, **fn_kwargs) except EXCEPTIONS as exc: last_exception = exc continue break else: raise last_exception getattr(pandas_obj, fn)(unique_filename_pandas, **fn_kwargs) assert assert_files_eq(unique_filename_modin, unique_filename_pandas) @pytest.fixture ``` ###Assistant : Helper function to test `to_` methods. Args: modin_obj: Modin DataFrame or Series to test `to_` method. pandas_obj: Pandas DataFrame or Series to test `to_` method. fn: name of the method, that should be tested. extension: Extension of the test file. " 558,"def _is_matching_generic_foreign_key(self, field): return ( isinstance(field, GenericForeignKey) and field.ct_field == self.content_type_field_name and field.fk_field == self.object_id_field_name ) "," Return True if field is a GenericForeignKey whose content type and object id fields correspond to the equivalent attributes on this GenericRelation. ",22,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _is_matching_generic_foreign_key(self, field): return ( isinstance(field, GenericForeignKey) and field.ct_field == self.content_type_field_name and field.fk_field == self.object_id_field_name ) ``` ###Assistant : Return True if field is a GenericForeignKey whose content type and object id fields correspond to the equivalent attributes on this GenericRelation. " 559,"def hashes_to_frame(self): if not self._hashes_to_frame: logger.debug(""Generating hashes to frame"") for frame_name, val in self._data.items(): for idx, face in enumerate(val[""faces""]): self._hashes_to_frame.setdefault(face[""hash""], {})[frame_name] = idx return self._hashes_to_frame "," dict: The SHA1 hash of the face mapped to the frame(s) and face index within the frame that the hash corresponds to. The structure of the dictionary is: {**SHA1_hash** (`str`): {**filename** (`str`): **face_index** (`int`)}}. Notes ----- This method is depractated and exists purely for updating legacy hash based alignments to new png header storage in :class:`lib.align.update_legacy_png_header`. The first time this property is referenced, the dictionary will be created and cached. Subsequent references will be made to this cached dictionary. ",79,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def hashes_to_frame(self): if not self._hashes_to_frame: logger.debug(""Generating hashes to frame"") for frame_name, val in self._data.items(): for idx, face in enumerate(val[""faces""]): self._hashes_to_frame.setdefault(face[""hash""], {})[frame_name] = idx return self._hashes_to_frame ``` ###Assistant : dict: The SHA1 hash of the face mapped to the frame(s) and face index within the frame that the hash corresponds to. The structure of the dictionary is: {**SHA1_hash** (`str`): {**filename** (`str`): **face_index** (`int`)}}. Notes ----- This method is depractated and exists purely for updating legacy hash based alignments to new png header storage in :class:`lib.align.update_legacy_png_header`. The first time this property is referenced, the dictionary will be created and cached. Subsequent references will be made to this cached dictionary. " 560,"def _get_textdoc(self, index): assert self._opt is not None # FIXME we probably should do eliding here. See # qcommonstyle.cpp:viewItemDrawText # https://github.com/qutebrowser/qutebrowser/issues/118 text_option = QTextOption() if self._opt.features & QStyleOptionViewItem.WrapText: text_option.setWrapMode(QTextOption.WordWrap) else: text_option.setWrapMode(QTextOption.ManualWrap) text_option.setTextDirection(self._opt.direction) text_option.setAlignment(QStyle.visualAlignment( self._opt.direction, self._opt.displayAlignment)) if self._doc is not None: self._doc.deleteLater() self._doc = QTextDocument(self) self._doc.setDefaultFont(self._opt.font) self._doc.setDefaultTextOption(text_option) self._doc.setDocumentMargin(2) if index.parent().isValid(): view = self.parent() assert isinstance(view, completionwidget.CompletionView), view pattern = view.pattern columns_to_filter = index.model().columns_to_filter(index) if index.column() in columns_to_filter and pattern: if self._opt.state & QStyle.State_Selected: color = config.val.colors.completion.item.selected.match.fg else: color = config.val.colors.completion.match.fg _Highlighter(self._doc, pattern, color) self._doc.setPlainText(self._opt.text) else: self._doc.setHtml( '{}'.format( html.escape(config.val.fonts.completion.category), html.escape(self._opt.text))) ","Create the QTextDocument of an item. Args: index: The QModelIndex of the item to draw. ",15,90,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_textdoc(self, index): assert self._opt is not None # FIXME we probably should do eliding here. See # qcommonstyle.cpp:viewItemDrawText # https://github.com/qutebrowser/qutebrowser/issues/118 text_option = QTextOption() if self._opt.features & QStyleOptionViewItem.WrapText: text_option.setWrapMode(QTextOption.WordWrap) else: text_option.setWrapMode(QTextOption.ManualWrap) text_option.setTextDirection(self._opt.direction) text_option.setAlignment(QStyle.visualAlignment( self._opt.direction, self._opt.displayAlignment)) if self._doc is not None: self._doc.deleteLater() self._doc = QTextDocument(self) self._doc.setDefaultFont(self._opt.font) self._doc.setDefaultTextOption(text_option) self._doc.setDocumentMargin(2) if index.parent().isValid(): view = self.parent() assert isinstance(view, completionwidget.CompletionView), view pattern = view.pattern columns_to_filter = index.model().columns_to_filter(index) if index.column() in columns_to_filter and pattern: if self._opt.state & QStyle.State_Selected: color = config.val.colors.completion.item.selected.match.fg else: color = config.val.colors.completion.match.fg _Highlighter(self._doc, pattern, color) self._doc.setPlainText(self._opt.text) else: self._doc.setHtml( '{}'.format( html.escape(config.val.fonts.completion.category), html.escape(self._opt.text))) ``` ###Assistant : Create the QTextDocument of an item. Args: index: The QModelIndex of the item to draw. " 561,"def feed(self, *args): assert len(args) != 0 self.terminals = [] for fed_layer in args: if isinstance(fed_layer, str): try: fed_layer = self.layers[fed_layer] except KeyError: raise KeyError('Unknown layer name fed: %s' % fed_layer) self.terminals.append(fed_layer) return self ","Set the input(s) for the next operation by replacing the terminal nodes. The arguments can be either layer names or the actual layers. ",23,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def feed(self, *args): assert len(args) != 0 self.terminals = [] for fed_layer in args: if isinstance(fed_layer, str): try: fed_layer = self.layers[fed_layer] except KeyError: raise KeyError('Unknown layer name fed: %s' % fed_layer) self.terminals.append(fed_layer) return self ``` ###Assistant : Set the input(s) for the next operation by replacing the terminal nodes. The arguments can be either layer names or the actual layers. " 562,"def unpack_iterator_input(iterator): try: next_element = iterator.get_next() except tf.errors.OutOfRangeError: raise RuntimeError( ""Your dataset iterator ran out of data; "" ""Make sure that your dataset can generate "" ""required number of samples."" ) if isinstance(next_element, (list, tuple)): if len(next_element) not in [2, 3]: raise ValueError( ""Please provide model inputs as a list or tuple of 2 or 3 "" ""elements: (input, target) or (input, target, sample_weights) "" ""Received %s"" % next_element ) if len(next_element) == 2: x, y = next_element weights = None else: x, y, weights = next_element else: x = next_element y = None weights = None return x, y, weights ","Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`. Args: iterator: Instance of a dataset iterator. Returns: Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None. ",33,101,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def unpack_iterator_input(iterator): try: next_element = iterator.get_next() except tf.errors.OutOfRangeError: raise RuntimeError( ""Your dataset iterator ran out of data; "" ""Make sure that your dataset can generate "" ""required number of samples."" ) if isinstance(next_element, (list, tuple)): if len(next_element) not in [2, 3]: raise ValueError( ""Please provide model inputs as a list or tuple of 2 or 3 "" ""elements: (input, target) or (input, target, sample_weights) "" ""Received %s"" % next_element ) if len(next_element) == 2: x, y = next_element weights = None else: x, y, weights = next_element else: x = next_element y = None weights = None return x, y, weights ``` ###Assistant : Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`. Args: iterator: Instance of a dataset iterator. Returns: Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None. " 563,"def mock_2x2x4_devices(one_device_per_chip): return mock_devices(2, 2, 4, 'TPU v4', one_device_per_chip) ",Hard-coded reproduction of jax.devices() output on 2x2x4.,7,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mock_2x2x4_devices(one_device_per_chip): return mock_devices(2, 2, 4, 'TPU v4', one_device_per_chip) ``` ###Assistant : Hard-coded reproduction of jax.devices() output on 2x2x4. " 564,"def export_triton(model_path, output_path=""model_repository"", model_name=""ludwig_model"", model_version=1, **kwargs): logger.info(f""Model path: {model_path}"") logger.info(f""Output path: {output_path}"") logger.info(f""Model name: {model_name}"") logger.info(f""Model version: {model_version}"") logger.info(""\n"") model = LudwigModel.load(model_path) os.makedirs(output_path, exist_ok=True) utils_export_triton(model, output_path, model_name, model_version) logger.info(f""Saved to: {output_path}"") ","Exports a model in torchscript format with config for Triton serving. # Inputs :param model_path: (str) filepath to pre-trained model. :param output_path: (str, default: `'model_repository'`) directory to store the triton models. :param model_name: (str, default: `'ludwig_model'`) save triton under this name. :param model_name: (int, default: `1`) save neuropod under this verison. # Return :returns: (`None`) ",55,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def export_triton(model_path, output_path=""model_repository"", model_name=""ludwig_model"", model_version=1, **kwargs): logger.info(f""Model path: {model_path}"") logger.info(f""Output path: {output_path}"") logger.info(f""Model name: {model_name}"") logger.info(f""Model version: {model_version}"") logger.info(""\n"") model = LudwigModel.load(model_path) os.makedirs(output_path, exist_ok=True) utils_export_triton(model, output_path, model_name, model_version) logger.info(f""Saved to: {output_path}"") ``` ###Assistant : Exports a model in torchscript format with config for Triton serving. # Inputs :param model_path: (str) filepath to pre-trained model. :param output_path: (str, default: `'model_repository'`) directory to store the triton models. :param model_name: (str, default: `'ludwig_model'`) save triton under this name. :param model_name: (int, default: `1`) save neuropod under this verison. # Return :returns: (`None`) " 565,"def tags(self) -> Sequence[Tuple[str, str]]: tags_key_column = self._get_column_name(Columns.TAGS_KEY) tags_value_column = self._get_column_name(Columns.TAGS_VALUE) if tags_key_column in self._snuba_data and tags_value_column in self._snuba_data: keys = self._snuba_data[tags_key_column] values = self._snuba_data[tags_value_column] if keys and values and len(keys) == len(values): return sorted(zip(keys, values)) else: return [] # Nodestore implementation try: rv = sorted( (t, v) for t, v in get_path(self.data, ""tags"", filter=True) or () if t is not None and v is not None ) return rv except ValueError: # at one point Sentry allowed invalid tag sets such as (foo, bar) # vs ((tag, foo), (tag, bar)) return [] "," Tags property uses tags from snuba if loaded otherwise falls back to nodestore. ",13,93,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def tags(self) -> Sequence[Tuple[str, str]]: tags_key_column = self._get_column_name(Columns.TAGS_KEY) tags_value_column = self._get_column_name(Columns.TAGS_VALUE) if tags_key_column in self._snuba_data and tags_value_column in self._snuba_data: keys = self._snuba_data[tags_key_column] values = self._snuba_data[tags_value_column] if keys and values and len(keys) == len(values): return sorted(zip(keys, values)) else: return [] # Nodestore implementation try: rv = sorted( (t, v) for t, v in get_path(self.data, ""tags"", filter=True) or () if t is not None and v is not None ) return rv except ValueError: # at one point Sentry allowed invalid tag sets such as (foo, bar) # vs ((tag, foo), (tag, bar)) return [] ``` ###Assistant : Tags property uses tags from snuba if loaded otherwise falls back to nodestore. " 566,"def test_cable_validates_compatible_types(self): # An interface cannot be connected to a power port cable = Cable(a_terminations=[self.interface1, self.interface2], b_terminations=[self.interface3]) with self.assertRaises(ValidationError): cable.clean() # TODO: Remove this? # def test_cable_front_port_cannot_connect_to_corresponding_rear_port(self): # # cable = Cable(a_terminations=[self.front_port1], b_terminations=[self.rear_port1]) # with self.assertRaises(ValidationError): # cable.clean() "," The clean method should have a check to ensure only compatible port types can be connected by a cable # A cable cannot connect a front port to its corresponding rear port # ",33,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_cable_validates_compatible_types(self): # An interface cannot be connected to a power port cable = Cable(a_terminations=[self.interface1, self.interface2], b_terminations=[self.interface3]) with self.assertRaises(ValidationError): cable.clean() # TODO: Remove this? # def test_cable_front_port_cannot_connect_to_corresponding_rear_port(self): # # cable = Cable(a_terminations=[self.front_port1], b_terminations=[self.rear_port1]) # with self.assertRaises(ValidationError): # cable.clean() ``` ###Assistant : The clean method should have a check to ensure only compatible port types can be connected by a cable # A cable cannot connect a front port to its corresponding rear port # " 567,"def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True): if h5py is None: raise ImportError( ""`save_model()` using h5 format requires h5py. Could not "" ""import h5py."" ) # TODO(psv) Add warning when we save models that contain non-serializable # entities like metrics added using `add_metric` and losses added using # `add_loss.` if len(model.weights) != len(model._undeduplicated_weights): logging.warning( ""Found duplicated `Variable`s in Model's `weights`. "" ""This is usually caused by `Variable`s being shared by "" ""Layers in the Model. These `Variable`s will be treated "" ""as separate `Variable`s when the Model is restored. To "" 'avoid this, please save with `save_format=""tf""`.' ) if not isinstance(filepath, h5py.File): # If file exists and should not be overwritten. if not overwrite and os.path.isfile(filepath): proceed = ask_to_proceed_with_overwrite(filepath) if not proceed: return # Try creating dir if not exist dirpath = os.path.dirname(filepath) if not os.path.exists(dirpath): tf.io.gfile.makedirs(dirpath) f = h5py.File(filepath, mode=""w"") opened_new_file = True else: f = filepath opened_new_file = False try: model_metadata = saving_utils.model_metadata(model, include_optimizer) for k, v in model_metadata.items(): if isinstance(v, (dict, list, tuple)): f.attrs[k] = json.dumps( v, default=json_utils.get_json_type ).encode(""utf8"") else: f.attrs[k] = v model_weights_group = f.create_group(""model_weights"") save_weights_to_hdf5_group(model_weights_group, model) # TODO(b/128683857): Add integration tests between tf.keras and external # Keras, to avoid breaking TF.js users. if isinstance(model.optimizer, optimizer_experimental.Optimizer): logging.warning( ""HDF5 format does not save weights of"" "" `optimizer_experimental.Optimizer`, your optimizer will"" "" be recompiled at loading time."" ) elif ( include_optimizer and model.optimizer and not isinstance(model.optimizer, optimizer_v1.TFOptimizer) ): save_optimizer_weights_to_hdf5_group(f, model.optimizer) f.flush() finally: if opened_new_file: f.close() ","Saves a model to a HDF5 file. The saved model contains: - the model's configuration (topology) - the model's weights - the model's optimizer's state (if any) Thus the saved model can be reinstantiated in the exact same state, without any of the code used for model definition or training. Args: model: Keras model instance to be saved. filepath: One of the following: - String, path where to save the model - `h5py.File` object where to save the model overwrite: Whether we should overwrite any existing model at the target location, or instead ask the user with a manual prompt. include_optimizer: If True, save optimizer's state together. Raises: ImportError: if h5py is not available. ",114,235,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True): if h5py is None: raise ImportError( ""`save_model()` using h5 format requires h5py. Could not "" ""import h5py."" ) # TODO(psv) Add warning when we save models that contain non-serializable # entities like metrics added using `add_metric` and losses added using # `add_loss.` if len(model.weights) != len(model._undeduplicated_weights): logging.warning( ""Found duplicated `Variable`s in Model's `weights`. "" ""This is usually caused by `Variable`s being shared by "" ""Layers in the Model. These `Variable`s will be treated "" ""as separate `Variable`s when the Model is restored. To "" 'avoid this, please save with `save_format=""tf""`.' ) if not isinstance(filepath, h5py.File): # If file exists and should not be overwritten. if not overwrite and os.path.isfile(filepath): proceed = ask_to_proceed_with_overwrite(filepath) if not proceed: return # Try creating dir if not exist dirpath = os.path.dirname(filepath) if not os.path.exists(dirpath): tf.io.gfile.makedirs(dirpath) f = h5py.File(filepath, mode=""w"") opened_new_file = True else: f = filepath opened_new_file = False try: model_metadata = saving_utils.model_metadata(model, include_optimizer) for k, v in model_metadata.items(): if isinstance(v, (dict, list, tuple)): f.attrs[k] = json.dumps( v, default=json_utils.get_json_type ).encode(""utf8"") else: f.attrs[k] = v model_weights_group = f.create_group(""model_weights"") save_weights_to_hdf5_group(model_weights_group, model) # TODO(b/128683857): Add integration tests between tf.keras and external # Keras, to avoid breaking TF.js users. if isinstance(model.optimizer, optimizer_experimental.Optimizer): logging.warning( ""HDF5 format does not save weights of"" "" `optimizer_experimental.Optimizer`, your optimizer will"" "" be recompiled at loading time."" ) elif ( include_optimizer and model.optimizer and not isinstance(model.optimizer, optimizer_v1.TFOptimizer) ): save_optimizer_weights_to_hdf5_group(f, model.optimizer) f.flush() finally: if opened_new_file: f.close() ``` ###Assistant : Saves a model to a HDF5 file. The saved model contains: - the model's configuration (topology) - the model's weights - the model's optimizer's state (if any) Thus the saved model can be reinstantiated in the exact same state, without any of the code used for model definition or training. Args: model: Keras model instance to be saved. filepath: One of the following: - String, path where to save the model - `h5py.File` object where to save the model overwrite: Whether we should overwrite any existing model at the target location, or instead ask the user with a manual prompt. include_optimizer: If True, save optimizer's state together. Raises: ImportError: if h5py is not available. " 568,"def set(self, **kwargs) -> None: # nosec attributes = {} user_id = kwargs[""user_id""] user = self.first(id_int=int(user_id)) if not user: raise UserNotFoundError for k, v in kwargs.items(): if k in user.__attr_searchable__: attributes[k] = v if kwargs.get(""email"", None): user.email = kwargs[""email""] elif kwargs.get(""role"", None): user.role = kwargs[""role""] elif kwargs.get(""name"", None): user.name = kwargs[""name""] elif kwargs.get(""budget"", None): user.budget = kwargs[""budget""] elif kwargs.get(""website"", None): user.website = kwargs[""website""] elif kwargs.get(""institution"", None): user.institution = kwargs[""institution""] else: raise Exception attributes[""__blob__""] = user.to_bytes() self.update_one({""id_int"": int(user_id)}, {""$set"": attributes}) ","Updates the information for the given user id. Args: user_id (str): unique id of the user in the database. email (str, optional): email of the user. Defaults to """". password (str, optional): password of the user. Defaults to """". role (int, optional): role of the user. Defaults to 0. name (str, optional): name of the user. Defaults to """". website (str, optional): website of the institution of the user. Defaults to """". institution (str, optional): name of the institution of the user. Defaults to """". budget (float, optional): privacy budget allocated to the user. Defaults to 0.0. Raises: UserNotFoundError: Raised when a user does not exits for the given user id. Exception: Raised when an invalid argument/property is passed. ",119,79,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set(self, **kwargs) -> None: # nosec attributes = {} user_id = kwargs[""user_id""] user = self.first(id_int=int(user_id)) if not user: raise UserNotFoundError for k, v in kwargs.items(): if k in user.__attr_searchable__: attributes[k] = v if kwargs.get(""email"", None): user.email = kwargs[""email""] elif kwargs.get(""role"", None): user.role = kwargs[""role""] elif kwargs.get(""name"", None): user.name = kwargs[""name""] elif kwargs.get(""budget"", None): user.budget = kwargs[""budget""] elif kwargs.get(""website"", None): user.website = kwargs[""website""] elif kwargs.get(""institution"", None): user.institution = kwargs[""institution""] else: raise Exception attributes[""__blob__""] = user.to_bytes() self.update_one({""id_int"": int(user_id)}, {""$set"": attributes}) ``` ###Assistant : Updates the information for the given user id. Args: user_id (str): unique id of the user in the database. email (str, optional): email of the user. Defaults to """". password (str, optional): password of the user. Defaults to """". role (int, optional): role of the user. Defaults to 0. name (str, optional): name of the user. Defaults to """". website (str, optional): website of the institution of the user. Defaults to """". institution (str, optional): name of the institution of the user. Defaults to """". budget (float, optional): privacy budget allocated to the user. Defaults to 0.0. Raises: UserNotFoundError: Raised when a user does not exits for the given user id. Exception: Raised when an invalid argument/property is passed. " 569,"def test_get_entity_and_validate_dependency_tree_of_a_single_entity_derived_metric(self): use_case_id = UseCaseKey.RELEASE_HEALTH expected_derived_metrics_entities = { SessionMRI.ALL.value: ""metrics_counters"", SessionMRI.ALL_USER.value: ""metrics_sets"", SessionMRI.CRASHED.value: ""metrics_counters"", SessionMRI.CRASHED_USER.value: ""metrics_sets"", SessionMRI.ABNORMAL.value: ""metrics_counters"", SessionMRI.ABNORMAL_USER.value: ""metrics_sets"", SessionMRI.CRASH_FREE_RATE.value: ""metrics_counters"", SessionMRI.CRASH_FREE_USER_RATE.value: ""metrics_sets"", SessionMRI.ERRORED_PREAGGREGATED.value: ""metrics_counters"", SessionMRI.ERRORED_SET.value: ""metrics_sets"", SessionMRI.ERRORED_USER_ALL.value: ""metrics_sets"", SessionMRI.CRASHED_AND_ABNORMAL_USER.value: ""metrics_sets"", SessionMRI.ERRORED_USER.value: ""metrics_sets"", } for key, value in expected_derived_metrics_entities.items(): assert ( MOCKED_DERIVED_METRICS[key].get_entity( projects=[self.project], use_case_id=use_case_id ) ) == value # Incorrectly setup SingularEntityDerivedMetric with metrics spanning multiple entities with pytest.raises(DerivedMetricParseException): self.crash_free_fake.get_entity(projects=[self.project], use_case_id=use_case_id) "," Tests that ensures that get_entity method works expected in the sense that: - Since it is the first function that is called by the query_builder, validation is applied there to ensure that if it is an instance of a SingleEntityDerivedMetric, then it is composed of only other SingleEntityDerivedMetric or RawMetric that belong to the same entity - Return the entity of that derived metric ",64,62,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_entity_and_validate_dependency_tree_of_a_single_entity_derived_metric(self): use_case_id = UseCaseKey.RELEASE_HEALTH expected_derived_metrics_entities = { SessionMRI.ALL.value: ""metrics_counters"", SessionMRI.ALL_USER.value: ""metrics_sets"", SessionMRI.CRASHED.value: ""metrics_counters"", SessionMRI.CRASHED_USER.value: ""metrics_sets"", SessionMRI.ABNORMAL.value: ""metrics_counters"", SessionMRI.ABNORMAL_USER.value: ""metrics_sets"", SessionMRI.CRASH_FREE_RATE.value: ""metrics_counters"", SessionMRI.CRASH_FREE_USER_RATE.value: ""metrics_sets"", SessionMRI.ERRORED_PREAGGREGATED.value: ""metrics_counters"", SessionMRI.ERRORED_SET.value: ""metrics_sets"", SessionMRI.ERRORED_USER_ALL.value: ""metrics_sets"", SessionMRI.CRASHED_AND_ABNORMAL_USER.value: ""metrics_sets"", SessionMRI.ERRORED_USER.value: ""metrics_sets"", } for key, value in expected_derived_metrics_entities.items(): assert ( MOCKED_DERIVED_METRICS[key].get_entity( projects=[self.project], use_case_id=use_case_id ) ) == value # Incorrectly setup SingularEntityDerivedMetric with metrics spanning multiple entities with pytest.raises(DerivedMetricParseException): self.crash_free_fake.get_entity(projects=[self.project], use_case_id=use_case_id) ``` ###Assistant : Tests that ensures that get_entity method works expected in the sense that: - Since it is the first function that is called by the query_builder, validation is applied there to ensure that if it is an instance of a SingleEntityDerivedMetric, then it is composed of only other SingleEntityDerivedMetric or RawMetric that belong to the same entity - Return the entity of that derived metric " 570,"def transform(self, X): check_is_fitted(self) X = self._validate_data(X, reset=False) X = X - self.mean_ U = ridge_regression( self.components_.T, X.T, self.ridge_alpha, solver=""cholesky"" ) return U ","Least Squares projection of the data onto the sparse components. To avoid instability issues in case the system is under-determined, regularization can be applied (Ridge regression) via the `ridge_alpha` parameter. Note that Sparse PCA components orthogonality is not enforced as in PCA hence one cannot use a simple linear projection. Parameters ---------- X : ndarray of shape (n_samples, n_features) Test data to be transformed, must have the same number of features as the data used to train the model. Returns ------- X_new : ndarray of shape (n_samples, n_components) Transformed data. ",90,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def transform(self, X): check_is_fitted(self) X = self._validate_data(X, reset=False) X = X - self.mean_ U = ridge_regression( self.components_.T, X.T, self.ridge_alpha, solver=""cholesky"" ) return U ``` ###Assistant : Least Squares projection of the data onto the sparse components. To avoid instability issues in case the system is under-determined, regularization can be applied (Ridge regression) via the `ridge_alpha` parameter. Note that Sparse PCA components orthogonality is not enforced as in PCA hence one cannot use a simple linear projection. Parameters ---------- X : ndarray of shape (n_samples, n_features) Test data to be transformed, must have the same number of features as the data used to train the model. Returns ------- X_new : ndarray of shape (n_samples, n_components) Transformed data. " 571,"def serialize_object(obj, extra=None): json_str = serialize('json', [obj]) print(json_str) data = json.loads(json_str)[0]['fields'] # Exclude any MPTTModel fields if issubclass(obj.__class__, MPTTModel): for field in ['level', 'lft', 'rght', 'tree_id']: data.pop(field) # Include custom_field_data as ""custom_fields"" if hasattr(obj, 'custom_field_data'): data['custom_fields'] = data.pop('custom_field_data') # Include any tags. Check for tags cached on the instance; fall back to using the manager. if is_taggable(obj): tags = getattr(obj, '_tags', None) or obj.tags.all() data['tags'] = [tag.name for tag in tags] # Append any extra data if extra is not None: data.update(extra) # Copy keys to list to avoid 'dictionary changed size during iteration' exception for key in list(data): # Private fields shouldn't be logged in the object change if isinstance(key, str) and key.startswith('_'): data.pop(key) return data "," Return a generic JSON representation of an object using Django's built-in serializer. (This is used for things like change logging, not the REST API.) Optionally include a dictionary to supplement the object data. A list of keys can be provided to exclude them from the returned dictionary. Private fields (prefaced with an underscore) are implicitly excluded. ",56,117,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def serialize_object(obj, extra=None): json_str = serialize('json', [obj]) print(json_str) data = json.loads(json_str)[0]['fields'] # Exclude any MPTTModel fields if issubclass(obj.__class__, MPTTModel): for field in ['level', 'lft', 'rght', 'tree_id']: data.pop(field) # Include custom_field_data as ""custom_fields"" if hasattr(obj, 'custom_field_data'): data['custom_fields'] = data.pop('custom_field_data') # Include any tags. Check for tags cached on the instance; fall back to using the manager. if is_taggable(obj): tags = getattr(obj, '_tags', None) or obj.tags.all() data['tags'] = [tag.name for tag in tags] # Append any extra data if extra is not None: data.update(extra) # Copy keys to list to avoid 'dictionary changed size during iteration' exception for key in list(data): # Private fields shouldn't be logged in the object change if isinstance(key, str) and key.startswith('_'): data.pop(key) return data ``` ###Assistant : Return a generic JSON representation of an object using Django's built-in serializer. (This is used for things like change logging, not the REST API.) Optionally include a dictionary to supplement the object data. A list of keys can be provided to exclude them from the returned dictionary. Private fields (prefaced with an underscore) are implicitly excluded. " 572,"async def test_statistics_during_period(recorder_mock, hass, hass_ws_client, caplog): now = dt_util.utcnow() await async_setup_component(hass, ""history"", {}) client = await hass_ws_client() # Test the WS API works and issues a warning await client.send_json( { ""id"": 1, ""type"": ""history/statistics_during_period"", ""start_time"": now.isoformat(), ""end_time"": now.isoformat(), ""statistic_ids"": [""sensor.test""], ""period"": ""hour"", } ) response = await client.receive_json() assert response[""success""] assert response[""result""] == {} assert ( ""WS API 'history/statistics_during_period' is deprecated and will be removed in "" ""Home Assistant Core 2022.12. Use 'recorder/statistics_during_period' instead"" ) in caplog.text # Test the WS API forwards to recorder with patch( ""homeassistant.components.history.recorder_ws.ws_handle_get_statistics_during_period"", wraps=ws_handle_get_statistics_during_period, ) as ws_mock: await client.send_json( { ""id"": 2, ""type"": ""history/statistics_during_period"", ""start_time"": now.isoformat(), ""end_time"": now.isoformat(), ""statistic_ids"": [""sensor.test""], ""period"": ""hour"", } ) await client.receive_json() ws_mock.assert_awaited_once() ",Test history/statistics_during_period forwards to recorder.,5,112,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_statistics_during_period(recorder_mock, hass, hass_ws_client, caplog): now = dt_util.utcnow() await async_setup_component(hass, ""history"", {}) client = await hass_ws_client() # Test the WS API works and issues a warning await client.send_json( { ""id"": 1, ""type"": ""history/statistics_during_period"", ""start_time"": now.isoformat(), ""end_time"": now.isoformat(), ""statistic_ids"": [""sensor.test""], ""period"": ""hour"", } ) response = await client.receive_json() assert response[""success""] assert response[""result""] == {} assert ( ""WS API 'history/statistics_during_period' is deprecated and will be removed in "" ""Home Assistant Core 2022.12. Use 'recorder/statistics_during_period' instead"" ) in caplog.text # Test the WS API forwards to recorder with patch( ""homeassistant.components.history.recorder_ws.ws_handle_get_statistics_during_period"", wraps=ws_handle_get_statistics_during_period, ) as ws_mock: await client.send_json( { ""id"": 2, ""type"": ""history/statistics_during_period"", ""start_time"": now.isoformat(), ""end_time"": now.isoformat(), ""statistic_ids"": [""sensor.test""], ""period"": ""hour"", } ) await client.receive_json() ws_mock.assert_awaited_once() ``` ###Assistant : Test history/statistics_during_period forwards to recorder. " 573,"def queryset_chunks(self, qs, chunk_size=DEFAULT_CHUNK_SIZE): i = 0 while True: items = list(qs[i * chunk_size :][:chunk_size]) if not items: break yield items i += 1 "," Yield a queryset in chunks of at most ``chunk_size``. The chunk yielded will be a list, not a queryset. Iterating over the chunks is done in a transaction so that the order and count of items in the queryset remains stable. ",41,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def queryset_chunks(self, qs, chunk_size=DEFAULT_CHUNK_SIZE): i = 0 while True: items = list(qs[i * chunk_size :][:chunk_size]) if not items: break yield items i += 1 ``` ###Assistant : Yield a queryset in chunks of at most ``chunk_size``. The chunk yielded will be a list, not a queryset. Iterating over the chunks is done in a transaction so that the order and count of items in the queryset remains stable. " 574,"def flattened_having(self) -> List[Condition]: flattened: List[Condition] = [] boolean_conditions: List[BooleanCondition] = [] for condition in self.having: if isinstance(condition, Condition): flattened.append(condition) elif isinstance(condition, BooleanCondition): boolean_conditions.append(condition) while len(boolean_conditions) > 0: boolean_condition = boolean_conditions.pop() for condition in boolean_condition.conditions: if isinstance(condition, Condition): flattened.append(condition) elif isinstance(condition, BooleanCondition): boolean_conditions.append(condition) return flattened ","Return self.having as a flattened list ignoring boolean operators This is because self.having can have a mix of BooleanConditions and Conditions. And each BooleanCondition can in turn be a mix of either type. ",33,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def flattened_having(self) -> List[Condition]: flattened: List[Condition] = [] boolean_conditions: List[BooleanCondition] = [] for condition in self.having: if isinstance(condition, Condition): flattened.append(condition) elif isinstance(condition, BooleanCondition): boolean_conditions.append(condition) while len(boolean_conditions) > 0: boolean_condition = boolean_conditions.pop() for condition in boolean_condition.conditions: if isinstance(condition, Condition): flattened.append(condition) elif isinstance(condition, BooleanCondition): boolean_conditions.append(condition) return flattened ``` ###Assistant : Return self.having as a flattened list ignoring boolean operators This is because self.having can have a mix of BooleanConditions and Conditions. And each BooleanCondition can in turn be a mix of either type. " 575,"def _add_timedelta_arraylike(self, other): # overridden by PeriodArray if len(self) != len(other): raise ValueError(""cannot add indices of unequal length"") if isinstance(other, np.ndarray): # ndarray[timedelta64]; wrap in TimedeltaIndex for op from pandas.core.arrays import TimedeltaArray other = TimedeltaArray._from_sequence(other) self_i8 = self.asi8 other_i8 = other.asi8 new_values = checked_add_with_arr( self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan ) if self._hasna or other._hasna: mask = self._isnan | other._isnan np.putmask(new_values, mask, iNaT) return type(self)(new_values, dtype=self.dtype) "," Add a delta of a TimedeltaIndex Returns ------- Same type as self ",12,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _add_timedelta_arraylike(self, other): # overridden by PeriodArray if len(self) != len(other): raise ValueError(""cannot add indices of unequal length"") if isinstance(other, np.ndarray): # ndarray[timedelta64]; wrap in TimedeltaIndex for op from pandas.core.arrays import TimedeltaArray other = TimedeltaArray._from_sequence(other) self_i8 = self.asi8 other_i8 = other.asi8 new_values = checked_add_with_arr( self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan ) if self._hasna or other._hasna: mask = self._isnan | other._isnan np.putmask(new_values, mask, iNaT) return type(self)(new_values, dtype=self.dtype) ``` ###Assistant : Add a delta of a TimedeltaIndex Returns ------- Same type as self " 576,"def _map_graph_network(inputs, outputs): # ""depth"" is number of layers between output Node and the Node. # Nodes are ordered from inputs -> outputs. nodes_in_decreasing_depth, layer_indices = _build_map(outputs) network_nodes = { _make_node_key(node.layer.name, node.layer._inbound_nodes.index(node)) for node in nodes_in_decreasing_depth } nodes_depths = {} # dict {node: depth value} layers_depths = {} # dict {layer: depth value} for node in reversed(nodes_in_decreasing_depth): # If the depth is not set, the node has no outbound nodes (depth 0). depth = nodes_depths.setdefault(node, 0) # Update the depth of the corresponding layer previous_depth = layers_depths.get(node.layer, 0) # If we've seen this layer before at a higher depth, # we should use that depth instead of the node depth. # This is necessary for shared layers that have inputs at different # depth levels in the graph. depth = max(depth, previous_depth) layers_depths[node.layer] = depth nodes_depths[node] = depth # Update the depth of inbound nodes. # The ""depth"" of a node is the max of the depths # of all nodes it is connected to + 1. for node_dep in node.parent_nodes: previous_depth = nodes_depths.get(node_dep, 0) nodes_depths[node_dep] = max(depth + 1, previous_depth) # Handle inputs that are not connected to outputs. # We do not error out here because the inputs may be used to compute losses # and metrics. for input_t in inputs: input_layer = input_t._keras_history[0] if input_layer not in layers_depths: layers_depths[input_layer] = 0 layer_indices[input_layer] = -1 nodes_depths[input_layer._inbound_nodes[0]] = 0 network_nodes.add(_make_node_key(input_layer.name, 0)) # Build a dict {depth: list of nodes with this depth} nodes_by_depth = collections.defaultdict(list) for node, depth in nodes_depths.items(): nodes_by_depth[depth].append(node) # Build a dict {depth: list of layers with this depth} layers_by_depth = collections.defaultdict(list) for layer, depth in layers_depths.items(): layers_by_depth[depth].append(layer) # Get sorted list of layer depths. depth_keys = list(layers_by_depth.keys()) depth_keys.sort(reverse=True) # Set self.layers ordered by depth. layers = [] for depth in depth_keys: layers_for_depth = layers_by_depth[depth] # Network.layers needs to have a deterministic order: # here we order them by traversal order. layers_for_depth.sort(key=lambda x: layer_indices[x]) layers.extend(layers_for_depth) # Get sorted list of node depths. depth_keys = list(nodes_by_depth.keys()) depth_keys.sort(reverse=True) # Check that all tensors required are computable. # computable_tensors: all tensors in the graph # that can be computed from the inputs provided. computable_tensors = set() for x in inputs: computable_tensors.add(id(x)) layers_with_complete_input = [] # To provide a better error msg. for depth in depth_keys: for node in nodes_by_depth[depth]: layer = node.layer if layer and not node.is_input: for x in tf.nest.flatten(node.keras_inputs): if id(x) not in computable_tensors: raise ValueError( f""Graph disconnected: cannot obtain value for tensor {x} "" f'at layer ""{layer.name}"". The following previous layers ' f""were accessed without issue: {layers_with_complete_input}"" ) for x in tf.nest.flatten(node.outputs): computable_tensors.add(id(x)) layers_with_complete_input.append(layer.name) # Ensure name unicity, which will be crucial for serialization # (since serialized nodes refer to layers by their name). all_names = [layer.name for layer in layers] for name in all_names: if all_names.count(name) != 1: raise ValueError( f'The name ""{name}"" is used {all_names.count(name)} ' ""times in the model. All layer names should be unique."" ) return network_nodes, nodes_by_depth, layers, layers_by_depth ","Validates a network's topology and gather its layers and nodes. Args: inputs: List of input tensors. outputs: List of outputs tensors. Returns: A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`. - nodes: list of Node instances. - nodes_by_depth: dict mapping ints (depth) to lists of node instances. - layers: list of Layer instances. - layers_by_depth: dict mapping ints (depth) to lists of layer instances. Raises: ValueError: In case the network is not valid (e.g. disconnected graph). ",74,488,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _map_graph_network(inputs, outputs): # ""depth"" is number of layers between output Node and the Node. # Nodes are ordered from inputs -> outputs. nodes_in_decreasing_depth, layer_indices = _build_map(outputs) network_nodes = { _make_node_key(node.layer.name, node.layer._inbound_nodes.index(node)) for node in nodes_in_decreasing_depth } nodes_depths = {} # dict {node: depth value} layers_depths = {} # dict {layer: depth value} for node in reversed(nodes_in_decreasing_depth): # If the depth is not set, the node has no outbound nodes (depth 0). depth = nodes_depths.setdefault(node, 0) # Update the depth of the corresponding layer previous_depth = layers_depths.get(node.layer, 0) # If we've seen this layer before at a higher depth, # we should use that depth instead of the node depth. # This is necessary for shared layers that have inputs at different # depth levels in the graph. depth = max(depth, previous_depth) layers_depths[node.layer] = depth nodes_depths[node] = depth # Update the depth of inbound nodes. # The ""depth"" of a node is the max of the depths # of all nodes it is connected to + 1. for node_dep in node.parent_nodes: previous_depth = nodes_depths.get(node_dep, 0) nodes_depths[node_dep] = max(depth + 1, previous_depth) # Handle inputs that are not connected to outputs. # We do not error out here because the inputs may be used to compute losses # and metrics. for input_t in inputs: input_layer = input_t._keras_history[0] if input_layer not in layers_depths: layers_depths[input_layer] = 0 layer_indices[input_layer] = -1 nodes_depths[input_layer._inbound_nodes[0]] = 0 network_nodes.add(_make_node_key(input_layer.name, 0)) # Build a dict {depth: list of nodes with this depth} nodes_by_depth = collections.defaultdict(list) for node, depth in nodes_depths.items(): nodes_by_depth[depth].append(node) # Build a dict {depth: list of layers with this depth} layers_by_depth = collections.defaultdict(list) for layer, depth in layers_depths.items(): layers_by_depth[depth].append(layer) # Get sorted list of layer depths. depth_keys = list(layers_by_depth.keys()) depth_keys.sort(reverse=True) # Set self.layers ordered by depth. layers = [] for depth in depth_keys: layers_for_depth = layers_by_depth[depth] # Network.layers needs to have a deterministic order: # here we order them by traversal order. layers_for_depth.sort(key=lambda x: layer_indices[x]) layers.extend(layers_for_depth) # Get sorted list of node depths. depth_keys = list(nodes_by_depth.keys()) depth_keys.sort(reverse=True) # Check that all tensors required are computable. # computable_tensors: all tensors in the graph # that can be computed from the inputs provided. computable_tensors = set() for x in inputs: computable_tensors.add(id(x)) layers_with_complete_input = [] # To provide a better error msg. for depth in depth_keys: for node in nodes_by_depth[depth]: layer = node.layer if layer and not node.is_input: for x in tf.nest.flatten(node.keras_inputs): if id(x) not in computable_tensors: raise ValueError( f""Graph disconnected: cannot obtain value for tensor {x} "" f'at layer ""{layer.name}"". The following previous layers ' f""were accessed without issue: {layers_with_complete_input}"" ) for x in tf.nest.flatten(node.outputs): computable_tensors.add(id(x)) layers_with_complete_input.append(layer.name) # Ensure name unicity, which will be crucial for serialization # (since serialized nodes refer to layers by their name). all_names = [layer.name for layer in layers] for name in all_names: if all_names.count(name) != 1: raise ValueError( f'The name ""{name}"" is used {all_names.count(name)} ' ""times in the model. All layer names should be unique."" ) return network_nodes, nodes_by_depth, layers, layers_by_depth ``` ###Assistant : Validates a network's topology and gather its layers and nodes. Args: inputs: List of input tensors. outputs: List of outputs tensors. Returns: A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`. - nodes: list of Node instances. - nodes_by_depth: dict mapping ints (depth) to lists of node instances. - layers: list of Layer instances. - layers_by_depth: dict mapping ints (depth) to lists of layer instances. Raises: ValueError: In case the network is not valid (e.g. disconnected graph). " 577,"def default_batch_format(self) -> Type: # noqa: E501 import pandas as pd import pyarrow as pa schema = self.schema() assert isinstance(schema, (type, PandasBlockSchema, pa.Schema)) if isinstance(schema, type): return list if isinstance(schema, (PandasBlockSchema, pa.Schema)): if schema.names == [VALUE_COL_NAME]: return np.ndarray return pd.DataFrame ","Return this dataset's default batch format. The default batch format describes what batches of data look like. To learn more about batch formats, read :ref:`writing user-defined functions `. Example: If your dataset represents a list of Python objects, then the default batch format is ``list``. >>> ds = ray.data.range(100) >>> ds # doctest: +SKIP Dataset(num_blocks=20, num_rows=100, schema=) >>> ds.default_batch_format() >>> next(ds.iter_batches(batch_size=4)) [0, 1, 2, 3] If your dataset contains a single ``TensorDtype`` or ``ArrowTensorType`` column named ``__value__`` (as created by :func:`ray.data.from_numpy`), then the default batch format is ``np.ndarray``. For more information on tensor datasets, read the :ref:`tensor support guide `. >>> ds = ray.data.range_tensor(100) >>> ds # doctest: +SKIP Dataset(num_blocks=20, num_rows=100, schema={__value__: ArrowTensorType(shape=(1,), dtype=int64)}) >>> ds.default_batch_format() >>> next(ds.iter_batches(batch_size=4)) array([[0], [1], [2], [3]]) If your dataset represents tabular data and doesn't only consist of a ``__value__`` tensor column (such as is created by :meth:`ray.data.from_numpy`), then the default batch format is ``pd.DataFrame``. >>> import pandas as pd >>> df = pd.DataFrame({""foo"": [""a"", ""b""], ""bar"": [0, 1]}) >>> ds = ray.data.from_pandas(df) >>> ds # doctest: +SKIP Dataset(num_blocks=1, num_rows=2, schema={foo: object, bar: int64}) >>> ds.default_batch_format() >>> next(ds.iter_batches(batch_size=4)) foo bar 0 a 0 1 b 1 .. seealso:: :meth:`~Dataset.map_batches` Call this function to transform batches of data. :meth:`~Dataset.iter_batches` Call this function to iterate over batches of data. ",219,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def default_batch_format(self) -> Type: # noqa: E501 import pandas as pd import pyarrow as pa schema = self.schema() assert isinstance(schema, (type, PandasBlockSchema, pa.Schema)) if isinstance(schema, type): return list if isinstance(schema, (PandasBlockSchema, pa.Schema)): if schema.names == [VALUE_COL_NAME]: return np.ndarray return pd.DataFrame ``` ###Assistant : Return this dataset's default batch format. The default batch format describes what batches of data look like. To learn more about batch formats, read :ref:`writing user-defined functions `. Example: If your dataset represents a list of Python objects, then the default batch format is ``list``. >>> ds = ray.data.range(100) >>> ds # doctest: +SKIP Dataset(num_blocks=20, num_rows=100, schema=) >>> ds.default_batch_format() >>> next(ds.iter_batches(batch_size=4)) [0, 1, 2, 3] If your dataset contains a single ``TensorDtype`` or ``ArrowTensorType`` column named ``__value__`` (as created by :func:`ray.data.from_numpy`), then the default batch format is ``np.ndarray``. For more information on tensor datasets, read the :ref:`tensor support guide `. >>> ds = ray.data.range_tensor(100) >>> ds # doctest: +SKIP Dataset(num_blocks=20, num_rows=100, schema={__value__: ArrowTensorType(shape=(1,), dtype=int64)}) >>> ds.default_batch_format() >>> next(ds.iter_batches(batch_size=4)) array([[0], [1], [2], [3]]) If your dataset represents tabular data and doesn't only consist of a ``__value__`` tensor column (such as is created by :meth:`ray.data.from_numpy`), then the default batch format is ``pd.DataFrame``. >>> import pandas as pd >>> df = pd.DataFrame({""foo"": [""a"", ""b""], ""bar"": [0, 1]}) >>> ds = ray.data.from_pandas(df) >>> ds # doctest: +SKIP Dataset(num_blocks=1, num_rows=2, schema={foo: object, bar: int64}) >>> ds.default_batch_format() >>> next(ds.iter_batches(batch_size=4)) foo bar 0 a 0 1 b 1 .. seealso:: :meth:`~Dataset.map_batches` Call this function to transform batches of data. :meth:`~Dataset.iter_batches` Call this function to iterate over batches of data. " 578,"def test_decision_tree_regressor_sample_weight_consistency(criterion): tree_params = dict(criterion=criterion) tree = DecisionTreeRegressor(**tree_params, random_state=42) for kind in [""zeros"", ""ones""]: check_sample_weights_invariance( ""DecisionTreeRegressor_"" + criterion, tree, kind=""zeros"" ) rng = np.random.RandomState(0) n_samples, n_features = 10, 5 X = rng.rand(n_samples, n_features) y = np.mean(X, axis=1) + rng.rand(n_samples) # make it positive in order to work also for poisson criterion y += np.min(y) + 0.1 # check that multiplying sample_weight by 2 is equivalent # to repeating corresponding samples twice X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) y2 = np.concatenate([y, y[: n_samples // 2]]) sample_weight_1 = np.ones(len(y)) sample_weight_1[: n_samples // 2] = 2 tree1 = DecisionTreeRegressor(**tree_params).fit( X, y, sample_weight=sample_weight_1 ) tree2 = DecisionTreeRegressor(**tree_params).fit(X2, y2, sample_weight=None) assert tree1.tree_.node_count == tree2.tree_.node_count # Thresholds, tree.tree_.threshold, and values, tree.tree_.value, are not # exactly the same, but on the training set, those differences do not # matter and thus predictions are the same. assert_allclose(tree1.predict(X), tree2.predict(X)) # TODO: Remove in v1.2 @pytest.mark.parametrize(""Tree"", REG_TREES.values()) @pytest.mark.parametrize( ""old_criterion, new_criterion"", [ (""mse"", ""squared_error""), (""mae"", ""absolute_error""), ], )",Test that the impact of sample_weight is consistent.,8,159,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_decision_tree_regressor_sample_weight_consistency(criterion): tree_params = dict(criterion=criterion) tree = DecisionTreeRegressor(**tree_params, random_state=42) for kind in [""zeros"", ""ones""]: check_sample_weights_invariance( ""DecisionTreeRegressor_"" + criterion, tree, kind=""zeros"" ) rng = np.random.RandomState(0) n_samples, n_features = 10, 5 X = rng.rand(n_samples, n_features) y = np.mean(X, axis=1) + rng.rand(n_samples) # make it positive in order to work also for poisson criterion y += np.min(y) + 0.1 # check that multiplying sample_weight by 2 is equivalent # to repeating corresponding samples twice X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) y2 = np.concatenate([y, y[: n_samples // 2]]) sample_weight_1 = np.ones(len(y)) sample_weight_1[: n_samples // 2] = 2 tree1 = DecisionTreeRegressor(**tree_params).fit( X, y, sample_weight=sample_weight_1 ) tree2 = DecisionTreeRegressor(**tree_params).fit(X2, y2, sample_weight=None) assert tree1.tree_.node_count == tree2.tree_.node_count # Thresholds, tree.tree_.threshold, and values, tree.tree_.value, are not # exactly the same, but on the training set, those differences do not # matter and thus predictions are the same. assert_allclose(tree1.predict(X), tree2.predict(X)) # TODO: Remove in v1.2 @pytest.mark.parametrize(""Tree"", REG_TREES.values()) @pytest.mark.parametrize( ""old_criterion, new_criterion"", [ (""mse"", ""squared_error""), (""mae"", ""absolute_error""), ], ) ``` ###Assistant : Test that the impact of sample_weight is consistent. " 579,"def test_with_fk_to_field(self): response = self.client.get( reverse(""admin:auth_user_changelist"") + ""?q=joe&%s=id"" % TO_FIELD_VAR ) self.assertContains(response, ""\n1 user\n"") self.assertContains( response, '' % TO_FIELD_VAR, html=True, ) "," The to_field GET parameter is preserved when a search is performed. Refs #10918. ",13,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_with_fk_to_field(self): response = self.client.get( reverse(""admin:auth_user_changelist"") + ""?q=joe&%s=id"" % TO_FIELD_VAR ) self.assertContains(response, ""\n1 user\n"") self.assertContains( response, '' % TO_FIELD_VAR, html=True, ) ``` ###Assistant : The to_field GET parameter is preserved when a search is performed. Refs #10918. " 580,"def remove_module_load(state_dict): new_state_dict = OrderedDict() for k, v in state_dict.items(): new_state_dict[k[7:]] = v return new_state_dict ",create new OrderedDict that does not contain `module.`,8,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def remove_module_load(state_dict): new_state_dict = OrderedDict() for k, v in state_dict.items(): new_state_dict[k[7:]] = v return new_state_dict ``` ###Assistant : create new OrderedDict that does not contain `module.` " 581,"def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) LAYOUTLMV3_START_DOCSTRING = r LAYOUTLMV3_INPUTS_DOCSTRING = r @add_start_docstrings( ""The bare LayoutLMv3 Model transformer outputting raw hidden-states without any specific head on top."", LAYOUTLMV3_START_DOCSTRING, )"," Method used for serving the model. Args: inputs (`Dict[str, tf.Tensor]`): The input of the saved model as a dictionary of tensors. This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. Parameters: config ([`LayoutLMv3Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. Args: input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. Indices can be obtained using [`LayoutLMv3Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) bbox (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size, config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height / config.patch_size) * (width / config.patch_size))`. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are attention masks?](../glossary#attention-mask) token_type_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are token type IDs?](../glossary#token-type-ids) position_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are position IDs?](../glossary#position-ids) head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ",689,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) LAYOUTLMV3_START_DOCSTRING = r LAYOUTLMV3_INPUTS_DOCSTRING = r @add_start_docstrings( ""The bare LayoutLMv3 Model transformer outputting raw hidden-states without any specific head on top."", LAYOUTLMV3_START_DOCSTRING, ) ``` ###Assistant : Method used for serving the model. Args: inputs (`Dict[str, tf.Tensor]`): The input of the saved model as a dictionary of tensors. This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. Parameters: config ([`LayoutLMv3Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. Args: input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. Indices can be obtained using [`LayoutLMv3Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) bbox (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size, config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height / config.patch_size) * (width / config.patch_size))`. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are attention masks?](../glossary#attention-mask) token_type_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are token type IDs?](../glossary#token-type-ids) position_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are position IDs?](../glossary#position-ids) head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. " 582,"def media_image_url(self): if self._table.active_track: return self._table.active_track.get_thumbnail_url(Track.ThumbnailSize.LARGE) return super().media_image_url ",Return the URL for a thumbnail image of the current track.,11,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def media_image_url(self): if self._table.active_track: return self._table.active_track.get_thumbnail_url(Track.ThumbnailSize.LARGE) return super().media_image_url ``` ###Assistant : Return the URL for a thumbnail image of the current track. " 583,"def test_fluctuating_ongoing_requests(delay_s): config = AutoscalingConfig( min_replicas=1, max_replicas=10, target_num_ongoing_requests_per_replica=50, upscale_delay_s=delay_s, downscale_delay_s=delay_s) policy = BasicAutoscalingPolicy(config) if delay_s > 0: wait_periods = int(delay_s / CONTROL_LOOP_PERIOD_S) assert wait_periods > 1 underload_requests, overload_requests = [20, 20], [100] trials = 1000 new_num_replicas = None for trial in range(trials): if trial % 2 == 0: new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1) if delay_s > 0: assert new_num_replicas == 1, trial else: assert new_num_replicas == 2, trial else: new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=underload_requests, curr_target_num_replicas=2) if delay_s > 0: assert new_num_replicas == 2, trial else: assert new_num_replicas == 1, trial @pytest.mark.parametrize( ""ongoing_requests"", [[7, 1, 8, 4], [8, 1, 8, 4], [6, 1, 8, 4], [0, 1, 8, 4]])"," Simulates a workload that switches between too many and too few ongoing requests. ",13,107,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_fluctuating_ongoing_requests(delay_s): config = AutoscalingConfig( min_replicas=1, max_replicas=10, target_num_ongoing_requests_per_replica=50, upscale_delay_s=delay_s, downscale_delay_s=delay_s) policy = BasicAutoscalingPolicy(config) if delay_s > 0: wait_periods = int(delay_s / CONTROL_LOOP_PERIOD_S) assert wait_periods > 1 underload_requests, overload_requests = [20, 20], [100] trials = 1000 new_num_replicas = None for trial in range(trials): if trial % 2 == 0: new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1) if delay_s > 0: assert new_num_replicas == 1, trial else: assert new_num_replicas == 2, trial else: new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=underload_requests, curr_target_num_replicas=2) if delay_s > 0: assert new_num_replicas == 2, trial else: assert new_num_replicas == 1, trial @pytest.mark.parametrize( ""ongoing_requests"", [[7, 1, 8, 4], [8, 1, 8, 4], [6, 1, 8, 4], [0, 1, 8, 4]]) ``` ###Assistant : Simulates a workload that switches between too many and too few ongoing requests. " 584,"def librosa_pad_lr(x, fsize, fshift, pad_sides=1): assert pad_sides in (1, 2) # return int(fsize // 2) pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0] if pad_sides == 1: return 0, pad else: return pad // 2, pad // 2 + pad % 2 # Conversions","compute right padding (final frame) or both sides padding (first and final frames) ",13,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def librosa_pad_lr(x, fsize, fshift, pad_sides=1): assert pad_sides in (1, 2) # return int(fsize // 2) pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0] if pad_sides == 1: return 0, pad else: return pad // 2, pad // 2 + pad % 2 # Conversions ``` ###Assistant : compute right padding (final frame) or both sides padding (first and final frames) " 585,"def setup_tpu(tpu_driver_version='tpu_driver-0.2'): global TPU_DRIVER_MODE if not TPU_DRIVER_MODE: colab_tpu_addr = os.environ['COLAB_TPU_ADDR'].split(':')[0] url = f'http://{colab_tpu_addr}:8475/requestversion/{tpu_driver_version}' requests.post(url) TPU_DRIVER_MODE = 1 # The following is required to use TPU Driver as JAX's backend. config.FLAGS.jax_xla_backend = ""tpu_driver"" config.FLAGS.jax_backend_target = ""grpc://"" + os.environ['COLAB_TPU_ADDR'] ","Sets up Colab to run on TPU. Note: make sure the Colab Runtime is set to Accelerator: TPU. Args ---- tpu_driver_version : (str) specify the version identifier for the tpu driver. Defaults to ""tpu_driver-0.2"", which can be used with jaxlib 0.3.20. Set to ""tpu_driver_nightly"" to use the nightly tpu driver build. ",51,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def setup_tpu(tpu_driver_version='tpu_driver-0.2'): global TPU_DRIVER_MODE if not TPU_DRIVER_MODE: colab_tpu_addr = os.environ['COLAB_TPU_ADDR'].split(':')[0] url = f'http://{colab_tpu_addr}:8475/requestversion/{tpu_driver_version}' requests.post(url) TPU_DRIVER_MODE = 1 # The following is required to use TPU Driver as JAX's backend. config.FLAGS.jax_xla_backend = ""tpu_driver"" config.FLAGS.jax_backend_target = ""grpc://"" + os.environ['COLAB_TPU_ADDR'] ``` ###Assistant : Sets up Colab to run on TPU. Note: make sure the Colab Runtime is set to Accelerator: TPU. Args ---- tpu_driver_version : (str) specify the version identifier for the tpu driver. Defaults to ""tpu_driver-0.2"", which can be used with jaxlib 0.3.20. Set to ""tpu_driver_nightly"" to use the nightly tpu driver build. " 586,"def test_timeout_lock(self): lock = self.get_success(self.store.try_acquire_lock(""name"", ""key"")) assert lock is not None self.get_success(lock.__aenter__()) # We simulate the process getting stuck by cancelling the looping call # that keeps the lock active. lock._looping_call.stop() # Wait for the lock to timeout. self.reactor.advance(2 * _LOCK_TIMEOUT_MS / 1000) lock2 = self.get_success(self.store.try_acquire_lock(""name"", ""key"")) self.assertIsNotNone(lock2) self.assertFalse(self.get_success(lock.is_still_valid())) ",Test that we time out locks if they're not updated for ages,12,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_timeout_lock(self): lock = self.get_success(self.store.try_acquire_lock(""name"", ""key"")) assert lock is not None self.get_success(lock.__aenter__()) # We simulate the process getting stuck by cancelling the looping call # that keeps the lock active. lock._looping_call.stop() # Wait for the lock to timeout. self.reactor.advance(2 * _LOCK_TIMEOUT_MS / 1000) lock2 = self.get_success(self.store.try_acquire_lock(""name"", ""key"")) self.assertIsNotNone(lock2) self.assertFalse(self.get_success(lock.is_still_valid())) ``` ###Assistant : Test that we time out locks if they're not updated for ages " 587,"def random_brightness(x, brightness_range, scale=True): if len(brightness_range) != 2: raise ValueError( ""`brightness_range should be tuple or list of two floats. "" ""Received: %s"" % (brightness_range,) ) u = np.random.uniform(brightness_range[0], brightness_range[1]) return apply_brightness_shift(x, u, scale) ","Performs a random brightness shift. Deprecated: `tf.keras.preprocessing.image.random_brightness` does not operate on tensors and is not recommended for new code. Prefer `tf.keras.layers.RandomBrightness` which provides equivalent functionality as a preprocessing layer. For more information, see the tutorial for [augmenting images]( https://www.tensorflow.org/tutorials/images/data_augmentation), as well as the [preprocessing layer guide]( https://www.tensorflow.org/guide/keras/preprocessing_layers). Args: x: Input tensor. Must be 3D. brightness_range: Tuple of floats; brightness range. scale: Whether to rescale the image such that minimum and maximum values are 0 and 255 respectively. Default: True. Returns: Numpy image tensor. Raises: ValueError if `brightness_range` isn't a tuple. ",90,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def random_brightness(x, brightness_range, scale=True): if len(brightness_range) != 2: raise ValueError( ""`brightness_range should be tuple or list of two floats. "" ""Received: %s"" % (brightness_range,) ) u = np.random.uniform(brightness_range[0], brightness_range[1]) return apply_brightness_shift(x, u, scale) ``` ###Assistant : Performs a random brightness shift. Deprecated: `tf.keras.preprocessing.image.random_brightness` does not operate on tensors and is not recommended for new code. Prefer `tf.keras.layers.RandomBrightness` which provides equivalent functionality as a preprocessing layer. For more information, see the tutorial for [augmenting images]( https://www.tensorflow.org/tutorials/images/data_augmentation), as well as the [preprocessing layer guide]( https://www.tensorflow.org/guide/keras/preprocessing_layers). Args: x: Input tensor. Must be 3D. brightness_range: Tuple of floats; brightness range. scale: Whether to rescale the image such that minimum and maximum values are 0 and 255 respectively. Default: True. Returns: Numpy image tensor. Raises: ValueError if `brightness_range` isn't a tuple. " 588,"def test_check_loop_sync(caplog): hasync.check_loop() assert ""Detected blocking call inside the event loop"" not in caplog.text ",Test check_loop does nothing when called from thread.,8,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_check_loop_sync(caplog): hasync.check_loop() assert ""Detected blocking call inside the event loop"" not in caplog.text ``` ###Assistant : Test check_loop does nothing when called from thread. " 589,"def update(self) -> bool: try: # Add or remove DeploymentReplica instances in self._replicas. # This should be the only place we adjust total number of replicas # we manage. running_replicas_changed = self._scale_deployment_replicas() # Check the state of existing replicas and transition if necessary. running_replicas_changed |= self._check_and_update_replicas() if running_replicas_changed: self._notify_running_replicas_changed() deleted = self._check_curr_status() except Exception: self._curr_status_info = DeploymentStatusInfo( name=self._name, status=DeploymentStatus.UNHEALTHY, message=""Failed to update deployment:"" f""\n{traceback.format_exc()}"", ) deleted = False return deleted ","Attempts to reconcile this deployment to match its goal state. This is an asynchronous call; it's expected to be called repeatedly. Also updates the internal DeploymentStatusInfo based on the current state of the system. Returns true if this deployment was successfully deleted. ",42,70,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update(self) -> bool: try: # Add or remove DeploymentReplica instances in self._replicas. # This should be the only place we adjust total number of replicas # we manage. running_replicas_changed = self._scale_deployment_replicas() # Check the state of existing replicas and transition if necessary. running_replicas_changed |= self._check_and_update_replicas() if running_replicas_changed: self._notify_running_replicas_changed() deleted = self._check_curr_status() except Exception: self._curr_status_info = DeploymentStatusInfo( name=self._name, status=DeploymentStatus.UNHEALTHY, message=""Failed to update deployment:"" f""\n{traceback.format_exc()}"", ) deleted = False return deleted ``` ###Assistant : Attempts to reconcile this deployment to match its goal state. This is an asynchronous call; it's expected to be called repeatedly. Also updates the internal DeploymentStatusInfo based on the current state of the system. Returns true if this deployment was successfully deleted. " 590,"def test_https_malformed_host(self): req = self._get_request(method=""POST"") req._is_secure_override = True req.META[""HTTP_HOST""] = ""@malformed"" req.META[""HTTP_REFERER""] = ""https://www.evil.org/somepage"" req.META[""SERVER_PORT""] = ""443"" mw = CsrfViewMiddleware(token_view) expected = ( ""Referer checking failed - https://www.evil.org/somepage does not "" ""match any trusted origins."" ) with self.assertRaisesMessage(RejectRequest, expected): mw._check_referer(req) response = mw.process_view(req, token_view, (), {}) self.assertEqual(response.status_code, 403) "," CsrfViewMiddleware generates a 403 response if it receives an HTTPS request with a bad host. ",15,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_https_malformed_host(self): req = self._get_request(method=""POST"") req._is_secure_override = True req.META[""HTTP_HOST""] = ""@malformed"" req.META[""HTTP_REFERER""] = ""https://www.evil.org/somepage"" req.META[""SERVER_PORT""] = ""443"" mw = CsrfViewMiddleware(token_view) expected = ( ""Referer checking failed - https://www.evil.org/somepage does not "" ""match any trusted origins."" ) with self.assertRaisesMessage(RejectRequest, expected): mw._check_referer(req) response = mw.process_view(req, token_view, (), {}) self.assertEqual(response.status_code, 403) ``` ###Assistant : CsrfViewMiddleware generates a 403 response if it receives an HTTPS request with a bad host. " 591,"def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]: # TODO: self.authenticator should optionally pull from self._session.auth customers_stream = Customers(authenticator=self._session.auth) for customer in customers_stream.read_records(sync_mode=SyncMode.full_refresh): yield {""customer_id"": customer[""id""]} "," This stream is sliced per `customer_id`. This has two implications: (1) State can be checkpointed after processing each slice (2) The other parameters (e.g. request_params, path) can be dependent on this slice. This allows us to pull data on a per customer_id basis, since that's what Orb exposes. ",48,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]: # TODO: self.authenticator should optionally pull from self._session.auth customers_stream = Customers(authenticator=self._session.auth) for customer in customers_stream.read_records(sync_mode=SyncMode.full_refresh): yield {""customer_id"": customer[""id""]} ``` ###Assistant : This stream is sliced per `customer_id`. This has two implications: (1) State can be checkpointed after processing each slice (2) The other parameters (e.g. request_params, path) can be dependent on this slice. This allows us to pull data on a per customer_id basis, since that's what Orb exposes. " 592,"def ragged_assert_compatible_and_get_flat_values(values, mask=None): if isinstance(values, list): is_all_ragged = all(isinstance(rt, tf.RaggedTensor) for rt in values) is_any_ragged = any(isinstance(rt, tf.RaggedTensor) for rt in values) else: is_all_ragged = isinstance(values, tf.RaggedTensor) is_any_ragged = is_all_ragged if is_all_ragged and ((mask is None) or isinstance(mask, tf.RaggedTensor)): to_be_stripped = False if not isinstance(values, list): values = [values] to_be_stripped = True # NOTE: we leave the flat_values compatibility to # tf.TensorShape `assert_is_compatible_with` check if both dynamic # dimensions are equal and then use the flat_values. nested_row_split_list = [rt.nested_row_splits for rt in values] assertion_list = _assert_splits_match(nested_row_split_list) # if both are ragged sample_weights also should be ragged with same # dims. if isinstance(mask, tf.RaggedTensor): assertion_list_for_mask = _assert_splits_match( [nested_row_split_list[0], mask.nested_row_splits] ) with tf.control_dependencies(assertion_list_for_mask): mask = tf.expand_dims(mask.flat_values, -1) # values has at least 1 element. flat_values = [] for value in values: with tf.control_dependencies(assertion_list): flat_values.append(tf.expand_dims(value.flat_values, -1)) values = flat_values[0] if to_be_stripped else flat_values elif is_any_ragged: raise TypeError( ""Some of the inputs are not tf.RaggedTensor. "" f""Input received: {values}"" ) # values are empty or value are not ragged and mask is ragged. elif isinstance(mask, tf.RaggedTensor): raise TypeError( ""Ragged mask is not allowed with non-ragged inputs. "" f""Input received: {values}, mask received: {mask}"" ) return values, mask ","If ragged, it checks the compatibility and then returns the flat_values. Note: If two tensors are dense, it does not check their compatibility. Note: Although two ragged tensors with different ragged ranks could have identical overall rank and dimension sizes and hence be compatible, we do not support those cases. Args: values: A list of potentially ragged tensor of the same ragged_rank. mask: A potentially ragged tensor of the same ragged_rank as elements in Values. Returns: A tuple in which the first element is the list of tensors and the second is the mask tensor. ([Values], mask). Mask and the element in Values are equal to the flat_values of the input arguments (if they were ragged). ",116,193,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ragged_assert_compatible_and_get_flat_values(values, mask=None): if isinstance(values, list): is_all_ragged = all(isinstance(rt, tf.RaggedTensor) for rt in values) is_any_ragged = any(isinstance(rt, tf.RaggedTensor) for rt in values) else: is_all_ragged = isinstance(values, tf.RaggedTensor) is_any_ragged = is_all_ragged if is_all_ragged and ((mask is None) or isinstance(mask, tf.RaggedTensor)): to_be_stripped = False if not isinstance(values, list): values = [values] to_be_stripped = True # NOTE: we leave the flat_values compatibility to # tf.TensorShape `assert_is_compatible_with` check if both dynamic # dimensions are equal and then use the flat_values. nested_row_split_list = [rt.nested_row_splits for rt in values] assertion_list = _assert_splits_match(nested_row_split_list) # if both are ragged sample_weights also should be ragged with same # dims. if isinstance(mask, tf.RaggedTensor): assertion_list_for_mask = _assert_splits_match( [nested_row_split_list[0], mask.nested_row_splits] ) with tf.control_dependencies(assertion_list_for_mask): mask = tf.expand_dims(mask.flat_values, -1) # values has at least 1 element. flat_values = [] for value in values: with tf.control_dependencies(assertion_list): flat_values.append(tf.expand_dims(value.flat_values, -1)) values = flat_values[0] if to_be_stripped else flat_values elif is_any_ragged: raise TypeError( ""Some of the inputs are not tf.RaggedTensor. "" f""Input received: {values}"" ) # values are empty or value are not ragged and mask is ragged. elif isinstance(mask, tf.RaggedTensor): raise TypeError( ""Ragged mask is not allowed with non-ragged inputs. "" f""Input received: {values}, mask received: {mask}"" ) return values, mask ``` ###Assistant : If ragged, it checks the compatibility and then returns the flat_values. Note: If two tensors are dense, it does not check their compatibility. Note: Although two ragged tensors with different ragged ranks could have identical overall rank and dimension sizes and hence be compatible, we do not support those cases. Args: values: A list of potentially ragged tensor of the same ragged_rank. mask: A potentially ragged tensor of the same ragged_rank as elements in Values. Returns: A tuple in which the first element is the list of tensors and the second is the mask tensor. ([Values], mask). Mask and the element in Values are equal to the flat_values of the input arguments (if they were ragged). " 593,"def update_sandbox_args(self): if self.is_sandbox: host, port = HubIO.deploy_public_sandbox(self.args) self._sandbox_deployed = True self.first_pod_args.host = host self.first_pod_args.port = port if self.head_args: self.pod_args['head'].host = host self.pod_args['head'].port = port ",Update args of all its pods based on the host and port returned by Hubble,15,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update_sandbox_args(self): if self.is_sandbox: host, port = HubIO.deploy_public_sandbox(self.args) self._sandbox_deployed = True self.first_pod_args.host = host self.first_pod_args.port = port if self.head_args: self.pod_args['head'].host = host self.pod_args['head'].port = port ``` ###Assistant : Update args of all its pods based on the host and port returned by Hubble " 594,"def download_all(): for name in DATA_HUB: download(name) DATA_HUB['kaggle_house_train'] = ( DATA_URL + 'kaggle_house_pred_train.csv', '585e9cc93e70b39160e7921475f9bcd7d31219ce') DATA_HUB['kaggle_house_test'] = ( DATA_URL + 'kaggle_house_pred_test.csv', 'fa19780a7b011d9b009e8bff8e99922a8ee2eb90') ","Download all files in the DATA_HUB. Defined in :numref:`sec_kaggle_house`",9,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def download_all(): for name in DATA_HUB: download(name) DATA_HUB['kaggle_house_train'] = ( DATA_URL + 'kaggle_house_pred_train.csv', '585e9cc93e70b39160e7921475f9bcd7d31219ce') DATA_HUB['kaggle_house_test'] = ( DATA_URL + 'kaggle_house_pred_test.csv', 'fa19780a7b011d9b009e8bff8e99922a8ee2eb90') ``` ###Assistant : Download all files in the DATA_HUB. Defined in :numref:`sec_kaggle_house` " 595,"def generate_matches(self, nodes): r = {} if nodes and self.match(nodes[0], r): yield 1, r "," Generator yielding all matches for this pattern. Default implementation for non-wildcard patterns. ",12,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def generate_matches(self, nodes): r = {} if nodes and self.match(nodes[0], r): yield 1, r ``` ###Assistant : Generator yielding all matches for this pattern. Default implementation for non-wildcard patterns. " 596,"def user_can_delete_obj(self, user, obj): perm_codename = self.get_perm_codename(""delete"") return self.user_has_specific_permission(user, perm_codename) "," Return a boolean to indicate whether `user` is permitted to 'delete' a specific `self.model` instance. ",15,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def user_can_delete_obj(self, user, obj): perm_codename = self.get_perm_codename(""delete"") return self.user_has_specific_permission(user, perm_codename) ``` ###Assistant : Return a boolean to indicate whether `user` is permitted to 'delete' a specific `self.model` instance. " 597,"def _busy_indicator_trace(self, *args) -> None: logger.trace(""Busy indicator trace: %s"", args) # type: ignore if self._busy_tkvar.get(): self._start_busy_indicator() else: self._stop_busy_indicator() "," Show or hide busy indicator based on whether the preview is updating. Parameters ---------- args: unused Required for tkinter event, but unused ",22,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _busy_indicator_trace(self, *args) -> None: logger.trace(""Busy indicator trace: %s"", args) # type: ignore if self._busy_tkvar.get(): self._start_busy_indicator() else: self._stop_busy_indicator() ``` ###Assistant : Show or hide busy indicator based on whether the preview is updating. Parameters ---------- args: unused Required for tkinter event, but unused " 598,"def _gen_html_string(self): self.html_string = _hilite_me( self.code_string, self.language, self.style, self.insert_line_no, ""border:solid gray;border-width:.1em .1em .1em .8em;padding:.2em .6em;"", self.file_path, self.line_no_from, ) if self.generate_html_file: os.makedirs( os.path.join(""assets"", ""codes"", ""generated_html_files""), exist_ok=True, ) with open( os.path.join( ""assets"", ""codes"", ""generated_html_files"", self.file_name + "".html"", ), ""w"", ) as file: file.write(self.html_string) ",Function to generate html string with code highlighted and stores in variable html_string.,13,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _gen_html_string(self): self.html_string = _hilite_me( self.code_string, self.language, self.style, self.insert_line_no, ""border:solid gray;border-width:.1em .1em .1em .8em;padding:.2em .6em;"", self.file_path, self.line_no_from, ) if self.generate_html_file: os.makedirs( os.path.join(""assets"", ""codes"", ""generated_html_files""), exist_ok=True, ) with open( os.path.join( ""assets"", ""codes"", ""generated_html_files"", self.file_name + "".html"", ), ""w"", ) as file: file.write(self.html_string) ``` ###Assistant : Function to generate html string with code highlighted and stores in variable html_string. " 599,"def __getstate__(self): state = self.__dict__.copy() # remove the unpickleable RLock object state.pop(""_cookies_lock"") return state ","Unlike a normal CookieJar, this class is pickleable.",8,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __getstate__(self): state = self.__dict__.copy() # remove the unpickleable RLock object state.pop(""_cookies_lock"") return state ``` ###Assistant : Unlike a normal CookieJar, this class is pickleable. " 600,"def get_model_dir(cfg): for key in cfg.keys(): if type(cfg[key]) == dict and \ (""enable"" in cfg[key].keys() and cfg[key]['enable'] or ""enable"" not in cfg[key].keys()): if ""model_dir"" in cfg[key].keys(): model_dir = cfg[key][""model_dir""] downloaded_model_dir = auto_download_model(model_dir) if downloaded_model_dir: model_dir = downloaded_model_dir cfg[key][""model_dir""] = model_dir print(key, "" model dir: "", model_dir) elif key == ""VEHICLE_PLATE"": det_model_dir = cfg[key][""det_model_dir""] downloaded_det_model_dir = auto_download_model(det_model_dir) if downloaded_det_model_dir: det_model_dir = downloaded_det_model_dir cfg[key][""det_model_dir""] = det_model_dir print(""det_model_dir model dir: "", det_model_dir) rec_model_dir = cfg[key][""rec_model_dir""] downloaded_rec_model_dir = auto_download_model(rec_model_dir) if downloaded_rec_model_dir: rec_model_dir = downloaded_rec_model_dir cfg[key][""rec_model_dir""] = rec_model_dir print(""rec_model_dir model dir: "", rec_model_dir) elif key == ""MOT"": # for idbased and skeletonbased actions model_dir = cfg[key][""model_dir""] downloaded_model_dir = auto_download_model(model_dir) if downloaded_model_dir: model_dir = downloaded_model_dir cfg[key][""model_dir""] = model_dir print(""mot_model_dir model_dir: "", model_dir) "," Auto download inference model if the model_path is a url link. Otherwise it will use the model_path directly. ",18,116,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_model_dir(cfg): for key in cfg.keys(): if type(cfg[key]) == dict and \ (""enable"" in cfg[key].keys() and cfg[key]['enable'] or ""enable"" not in cfg[key].keys()): if ""model_dir"" in cfg[key].keys(): model_dir = cfg[key][""model_dir""] downloaded_model_dir = auto_download_model(model_dir) if downloaded_model_dir: model_dir = downloaded_model_dir cfg[key][""model_dir""] = model_dir print(key, "" model dir: "", model_dir) elif key == ""VEHICLE_PLATE"": det_model_dir = cfg[key][""det_model_dir""] downloaded_det_model_dir = auto_download_model(det_model_dir) if downloaded_det_model_dir: det_model_dir = downloaded_det_model_dir cfg[key][""det_model_dir""] = det_model_dir print(""det_model_dir model dir: "", det_model_dir) rec_model_dir = cfg[key][""rec_model_dir""] downloaded_rec_model_dir = auto_download_model(rec_model_dir) if downloaded_rec_model_dir: rec_model_dir = downloaded_rec_model_dir cfg[key][""rec_model_dir""] = rec_model_dir print(""rec_model_dir model dir: "", rec_model_dir) elif key == ""MOT"": # for idbased and skeletonbased actions model_dir = cfg[key][""model_dir""] downloaded_model_dir = auto_download_model(model_dir) if downloaded_model_dir: model_dir = downloaded_model_dir cfg[key][""model_dir""] = model_dir print(""mot_model_dir model_dir: "", model_dir) ``` ###Assistant : Auto download inference model if the model_path is a url link. Otherwise it will use the model_path directly. " 601,"def component(self, x, y): if x >= 0 and x < self.__height and y >= 0 and y < self.__width: return self.__matrix[x][y] else: raise Exception(""changeComponent: indices out of bounds"") "," returns the specified (x,y) component ",5,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def component(self, x, y): if x >= 0 and x < self.__height and y >= 0 and y < self.__width: return self.__matrix[x][y] else: raise Exception(""changeComponent: indices out of bounds"") ``` ###Assistant : returns the specified (x,y) component " 602,"def toggle(self, all=None, ticks=None, ticklabels=None, label=None): if all: _ticks, _ticklabels, _label = True, True, True elif all is not None: _ticks, _ticklabels, _label = False, False, False else: _ticks, _ticklabels, _label = None, None, None if ticks is not None: _ticks = ticks if ticklabels is not None: _ticklabels = ticklabels if label is not None: _label = label if _ticks is not None: self.major_ticks.set_visible(_ticks) self.minor_ticks.set_visible(_ticks) if _ticklabels is not None: self.major_ticklabels.set_visible(_ticklabels) self.minor_ticklabels.set_visible(_ticklabels) if _label is not None: self.label.set_visible(_label) "," Toggle visibility of ticks, ticklabels, and (axis) label. To turn all off, :: axis.toggle(all=False) To turn all off but ticks on :: axis.toggle(all=False, ticks=True) To turn all on but (axis) label off :: axis.toggle(all=True, label=False) ",35,79,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def toggle(self, all=None, ticks=None, ticklabels=None, label=None): if all: _ticks, _ticklabels, _label = True, True, True elif all is not None: _ticks, _ticklabels, _label = False, False, False else: _ticks, _ticklabels, _label = None, None, None if ticks is not None: _ticks = ticks if ticklabels is not None: _ticklabels = ticklabels if label is not None: _label = label if _ticks is not None: self.major_ticks.set_visible(_ticks) self.minor_ticks.set_visible(_ticks) if _ticklabels is not None: self.major_ticklabels.set_visible(_ticklabels) self.minor_ticklabels.set_visible(_ticklabels) if _label is not None: self.label.set_visible(_label) ``` ###Assistant : Toggle visibility of ticks, ticklabels, and (axis) label. To turn all off, :: axis.toggle(all=False) To turn all off but ticks on :: axis.toggle(all=False, ticks=True) To turn all on but (axis) label off :: axis.toggle(all=True, label=False) " 603,"def mock_json_schema(request, monkeypatch, tmp_path): # Do not patch integration tests if ""integration"" in request.keywords: return # Mock the subclasses list to make it very small, containing only mock nodes monkeypatch.setattr( haystack.nodes._json_schema, ""find_subclasses_in_modules"", lambda *a, **k: [(conftest, MockDocumentStore), (conftest, MockReader), (conftest, MockRetriever)], ) # Point the JSON schema path to tmp_path monkeypatch.setattr(haystack.pipelines.config, ""JSON_SCHEMAS_PATH"", tmp_path) # Generate mock schema in tmp_path filename = f""haystack-pipeline-master.schema.json"" test_schema = _json_schema.get_json_schema(filename=filename, version=""ignore"") with open(tmp_path / filename, ""w"") as schema_file: json.dump(test_schema, schema_file, indent=4) # # Integration # @pytest.mark.integration @pytest.mark.elasticsearch"," JSON schema with the master version and only mocked nodes. ",10,82,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mock_json_schema(request, monkeypatch, tmp_path): # Do not patch integration tests if ""integration"" in request.keywords: return # Mock the subclasses list to make it very small, containing only mock nodes monkeypatch.setattr( haystack.nodes._json_schema, ""find_subclasses_in_modules"", lambda *a, **k: [(conftest, MockDocumentStore), (conftest, MockReader), (conftest, MockRetriever)], ) # Point the JSON schema path to tmp_path monkeypatch.setattr(haystack.pipelines.config, ""JSON_SCHEMAS_PATH"", tmp_path) # Generate mock schema in tmp_path filename = f""haystack-pipeline-master.schema.json"" test_schema = _json_schema.get_json_schema(filename=filename, version=""ignore"") with open(tmp_path / filename, ""w"") as schema_file: json.dump(test_schema, schema_file, indent=4) # # Integration # @pytest.mark.integration @pytest.mark.elasticsearch ``` ###Assistant : JSON schema with the master version and only mocked nodes. " 604,"def _has_nchw_support(): explicitly_on_cpu = _is_current_explicit_device(""CPU"") gpus_available = bool(_get_available_gpus()) return not explicitly_on_cpu and gpus_available # VARIABLE MANIPULATION ","Check whether the current scope supports NCHW ops. TensorFlow does not support NCHW on CPU. Therefore we check if we are not explicitly put on CPU, and have GPUs available. In this case there will be soft-placing on the GPU device. Returns: bool: if the current scope device placement would support nchw ",52,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _has_nchw_support(): explicitly_on_cpu = _is_current_explicit_device(""CPU"") gpus_available = bool(_get_available_gpus()) return not explicitly_on_cpu and gpus_available # VARIABLE MANIPULATION ``` ###Assistant : Check whether the current scope supports NCHW ops. TensorFlow does not support NCHW on CPU. Therefore we check if we are not explicitly put on CPU, and have GPUs available. In this case there will be soft-placing on the GPU device. Returns: bool: if the current scope device placement would support nchw " 605,"def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)): tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1] n_dim = tensor.dim() if n_dim == 4: n_img = len(tensor) img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy() img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR elif n_dim == 3: img_np = tensor.numpy() img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR elif n_dim == 2: img_np = tensor.numpy() else: raise TypeError( 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim)) if out_type == np.uint8: img_np = (img_np * 255.0).round() # Important. Unlike matlab, numpy.unit8() WILL NOT round by default. return img_np.astype(out_type) "," Converts a torch Tensor into an image Numpy array of BGR channel order Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default) # -------------------------------------------- # Augmentation, flipe and/or rotate # -------------------------------------------- # The following two are enough. # (1) augmet_img: numpy image of WxHxC or WxH # (2) augment_img_tensor4: tensor image 1xCxWxH # -------------------------------------------- ",62,117,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)): tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1] n_dim = tensor.dim() if n_dim == 4: n_img = len(tensor) img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy() img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR elif n_dim == 3: img_np = tensor.numpy() img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR elif n_dim == 2: img_np = tensor.numpy() else: raise TypeError( 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim)) if out_type == np.uint8: img_np = (img_np * 255.0).round() # Important. Unlike matlab, numpy.unit8() WILL NOT round by default. return img_np.astype(out_type) ``` ###Assistant : Converts a torch Tensor into an image Numpy array of BGR channel order Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default) # -------------------------------------------- # Augmentation, flipe and/or rotate # -------------------------------------------- # The following two are enough. # (1) augmet_img: numpy image of WxHxC or WxH # (2) augment_img_tensor4: tensor image 1xCxWxH # -------------------------------------------- " 606,"def get_local_ip_address() -> str: try: ip_address = requests.get( ""https://checkip.amazonaws.com/"", timeout=3 ).text.strip() except (requests.ConnectionError, requests.exceptions.ReadTimeout): ip_address = ""No internet connection"" return ip_address ","Gets the public IP address or returns the string ""No internet connection"" if unable to obtain it.",17,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_local_ip_address() -> str: try: ip_address = requests.get( ""https://checkip.amazonaws.com/"", timeout=3 ).text.strip() except (requests.ConnectionError, requests.exceptions.ReadTimeout): ip_address = ""No internet connection"" return ip_address ``` ###Assistant : Gets the public IP address or returns the string ""No internet connection"" if unable to obtain it. " 607,"def get_tip(self): tips = self.get_tips() if len(tips) == 0: raise Exception(""tip not found"") else: return tips[0] ","Returns the TipableVMobject instance's (first) tip, otherwise throws an exception.",10,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_tip(self): tips = self.get_tips() if len(tips) == 0: raise Exception(""tip not found"") else: return tips[0] ``` ###Assistant : Returns the TipableVMobject instance's (first) tip, otherwise throws an exception. " 608,"def shuffle(self, func, lengths, **kwargs): num_splits = len(lengths) # We add these to kwargs and will pop them off before performing the operation. kwargs[""manual_partition""] = True kwargs[""_lengths""] = lengths args = [self.axis, func, num_splits, False] args.extend(self.list_of_blocks) return self._wrap_partitions(self.deploy_axis_func(*args, **kwargs)) "," Shuffle the order of the data in this axis partition based on the `lengths`. Parameters ---------- func : callable The function to apply before splitting. lengths : list The list of partition lengths to split the result into. **kwargs : dict Additional keywords arguments to be passed in `func`. Returns ------- list A list of `PandasDataframePartition` objects split by `lengths`. ",60,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def shuffle(self, func, lengths, **kwargs): num_splits = len(lengths) # We add these to kwargs and will pop them off before performing the operation. kwargs[""manual_partition""] = True kwargs[""_lengths""] = lengths args = [self.axis, func, num_splits, False] args.extend(self.list_of_blocks) return self._wrap_partitions(self.deploy_axis_func(*args, **kwargs)) ``` ###Assistant : Shuffle the order of the data in this axis partition based on the `lengths`. Parameters ---------- func : callable The function to apply before splitting. lengths : list The list of partition lengths to split the result into. **kwargs : dict Additional keywords arguments to be passed in `func`. Returns ------- list A list of `PandasDataframePartition` objects split by `lengths`. " 609,"def test_background_add_room_type_column(self): # Create a room without a type room_id = self._generate_room() # Get event_id of the m.room.create event event_id = self.get_success( self.store.db_pool.simple_select_one_onecol( table=""current_state_events"", keyvalues={ ""room_id"": room_id, ""type"": ""m.room.create"", }, retcol=""event_id"", ) ) # Fake a room creation event with a room type event = { ""content"": { ""creator"": ""@user:server.org"", ""room_version"": ""9"", ""type"": RoomTypes.SPACE, }, ""type"": ""m.room.create"", } self.get_success( self.store.db_pool.simple_update( table=""event_json"", keyvalues={""event_id"": event_id}, updatevalues={""json"": json.dumps(event)}, desc=""test"", ) ) # Insert and run the background update self.get_success( self.store.db_pool.simple_insert( ""background_updates"", { ""update_name"": _BackgroundUpdates.ADD_ROOM_TYPE_COLUMN, ""progress_json"": ""{}"", }, ) ) # ... and tell the DataStore that it hasn't finished all updates yet self.store.db_pool.updates._all_done = False # Now let's actually drive the updates to completion self.wait_for_background_updates() # Make sure the background update filled in the room type room_type_after = self.get_success( self.store.db_pool.simple_select_one_onecol( table=""room_stats_state"", keyvalues={""room_id"": room_id}, retcol=""room_type"", allow_none=True, ) ) self.assertEqual(room_type_after, RoomTypes.SPACE) ","Test that the background update to populate the `room_type` column in `room_stats_state` works properly. ",14,136,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_background_add_room_type_column(self): # Create a room without a type room_id = self._generate_room() # Get event_id of the m.room.create event event_id = self.get_success( self.store.db_pool.simple_select_one_onecol( table=""current_state_events"", keyvalues={ ""room_id"": room_id, ""type"": ""m.room.create"", }, retcol=""event_id"", ) ) # Fake a room creation event with a room type event = { ""content"": { ""creator"": ""@user:server.org"", ""room_version"": ""9"", ""type"": RoomTypes.SPACE, }, ""type"": ""m.room.create"", } self.get_success( self.store.db_pool.simple_update( table=""event_json"", keyvalues={""event_id"": event_id}, updatevalues={""json"": json.dumps(event)}, desc=""test"", ) ) # Insert and run the background update self.get_success( self.store.db_pool.simple_insert( ""background_updates"", { ""update_name"": _BackgroundUpdates.ADD_ROOM_TYPE_COLUMN, ""progress_json"": ""{}"", }, ) ) # ... and tell the DataStore that it hasn't finished all updates yet self.store.db_pool.updates._all_done = False # Now let's actually drive the updates to completion self.wait_for_background_updates() # Make sure the background update filled in the room type room_type_after = self.get_success( self.store.db_pool.simple_select_one_onecol( table=""room_stats_state"", keyvalues={""room_id"": room_id}, retcol=""room_type"", allow_none=True, ) ) self.assertEqual(room_type_after, RoomTypes.SPACE) ``` ###Assistant : Test that the background update to populate the `room_type` column in `room_stats_state` works properly. " 610,"def get_feedback(): labels = DOCUMENT_STORE.get_all_labels() return labels @router.delete(""/feedback"")"," This endpoint allows the API user to retrieve all the feedback that has been submitted through the `POST /feedback` endpoint. ",20,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_feedback(): labels = DOCUMENT_STORE.get_all_labels() return labels @router.delete(""/feedback"") ``` ###Assistant : This endpoint allows the API user to retrieve all the feedback that has been submitted through the `POST /feedback` endpoint. " 611,"def get_formatter_for_filename(fn, **options): fn = basename(fn) for modname, name, _, filenames, _ in FORMATTERS.values(): for filename in filenames: if _fn_matches(fn, filename): if name not in _formatter_cache: _load_formatters(modname) return _formatter_cache[name](**options) for cls in find_plugin_formatters(): for filename in cls.filenames: if _fn_matches(fn, filename): return cls(**options) raise ClassNotFound(""no formatter found for file name %r"" % fn) ","Lookup and instantiate a formatter by filename pattern. Raises ClassNotFound if not found. ",13,52,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_formatter_for_filename(fn, **options): fn = basename(fn) for modname, name, _, filenames, _ in FORMATTERS.values(): for filename in filenames: if _fn_matches(fn, filename): if name not in _formatter_cache: _load_formatters(modname) return _formatter_cache[name](**options) for cls in find_plugin_formatters(): for filename in cls.filenames: if _fn_matches(fn, filename): return cls(**options) raise ClassNotFound(""no formatter found for file name %r"" % fn) ``` ###Assistant : Lookup and instantiate a formatter by filename pattern. Raises ClassNotFound if not found. " 612,"def setup_sigterm_on_parent_death(): try: import ctypes import signal libc = ctypes.CDLL(""libc.so.6"") # Set the parent process death signal of the command process to SIGTERM. libc.prctl(1, signal.SIGTERM) # PR_SET_PDEATHSIG, see prctl.h except OSError as e: _logger.warning(f""Setup libc.prctl PR_SET_PDEATHSIG failed, error {repr(e)}."") "," Uses prctl to automatically send SIGTERM to the child process when its parent is dead. The child process itself should handle SIGTERM properly. ",23,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def setup_sigterm_on_parent_death(): try: import ctypes import signal libc = ctypes.CDLL(""libc.so.6"") # Set the parent process death signal of the command process to SIGTERM. libc.prctl(1, signal.SIGTERM) # PR_SET_PDEATHSIG, see prctl.h except OSError as e: _logger.warning(f""Setup libc.prctl PR_SET_PDEATHSIG failed, error {repr(e)}."") ``` ###Assistant : Uses prctl to automatically send SIGTERM to the child process when its parent is dead. The child process itself should handle SIGTERM properly. " 613,"def force_reads(self) -> ""Dataset[T]"": blocks = self.get_internal_block_refs() bar = ProgressBar(""Force reads"", len(blocks)) bar.block_until_complete(blocks) return self ","Force full evaluation of the blocks of this dataset. This can be used to read all blocks into memory. By default, Datasets doesn't read blocks from the datasource until the first transform. ",32,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def force_reads(self) -> ""Dataset[T]"": blocks = self.get_internal_block_refs() bar = ProgressBar(""Force reads"", len(blocks)) bar.block_until_complete(blocks) return self ``` ###Assistant : Force full evaluation of the blocks of this dataset. This can be used to read all blocks into memory. By default, Datasets doesn't read blocks from the datasource until the first transform. " 614,"def __iter__(self) -> Iterator[tuple[Widget, Region, Region, Size, Size]]: layers = sorted(self.map.items(), key=lambda item: item[1].order, reverse=True) intersection = Region.intersection for widget, (region, _order, clip, virtual_size, container_size) in layers: yield ( widget, intersection(region, clip), region, virtual_size, container_size, ) ","Iterate map with information regarding each widget and is position Yields: Iterator[tuple[Widget, Region, Region, Size, Size]]: Iterates a tuple of Widget, clip region, region, virtual size, and container size. ",29,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __iter__(self) -> Iterator[tuple[Widget, Region, Region, Size, Size]]: layers = sorted(self.map.items(), key=lambda item: item[1].order, reverse=True) intersection = Region.intersection for widget, (region, _order, clip, virtual_size, container_size) in layers: yield ( widget, intersection(region, clip), region, virtual_size, container_size, ) ``` ###Assistant : Iterate map with information regarding each widget and is position Yields: Iterator[tuple[Widget, Region, Region, Size, Size]]: Iterates a tuple of Widget, clip region, region, virtual size, and container size. " 615,"def get_names_flat(adtype): listnames = [] names = adtype.names for name in names: listnames.append(name) current = adtype[name] if current.names is not None: listnames.extend(get_names_flat(current)) return tuple(listnames) "," Returns the field names of the input datatype as a tuple. Input datatype has to have fields otherwise error is raised. Nested structure are flattened beforehand. Parameters ---------- adtype : dtype Input datatype Examples -------- >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None False >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype) ('A', 'B') >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) >>> rfn.get_names_flat(adtype) ('a', 'b', 'ba', 'bb') ",72,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_names_flat(adtype): listnames = [] names = adtype.names for name in names: listnames.append(name) current = adtype[name] if current.names is not None: listnames.extend(get_names_flat(current)) return tuple(listnames) ``` ###Assistant : Returns the field names of the input datatype as a tuple. Input datatype has to have fields otherwise error is raised. Nested structure are flattened beforehand. Parameters ---------- adtype : dtype Input datatype Examples -------- >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None False >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype) ('A', 'B') >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) >>> rfn.get_names_flat(adtype) ('a', 'b', 'ba', 'bb') " 616,"def test_memory_leak(self): import gc import weakref results = {} for kind in plotting.PlotAccessor._all_kinds: args = {} if kind in [""hexbin"", ""scatter"", ""pie""]: df = DataFrame( { ""A"": np.random.uniform(size=20), ""B"": np.random.uniform(size=20), ""C"": np.arange(20) + np.random.uniform(size=20), } ) args = {""x"": ""A"", ""y"": ""B""} elif kind == ""area"": df = tm.makeTimeDataFrame().abs() else: df = tm.makeTimeDataFrame() # Use a weakref so we can see if the object gets collected without # also preventing it from being collected results[kind] = weakref.proxy(df.plot(kind=kind, **args)) # have matplotlib delete all the figures tm.close() # force a garbage collection gc.collect() msg = ""weakly-referenced object no longer exists"" for key in results: # check that every plot was collected with pytest.raises(ReferenceError, match=msg): # need to actually access something to get an error results[key].lines ",Check that every plot type gets properly collected.,8,124,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_memory_leak(self): import gc import weakref results = {} for kind in plotting.PlotAccessor._all_kinds: args = {} if kind in [""hexbin"", ""scatter"", ""pie""]: df = DataFrame( { ""A"": np.random.uniform(size=20), ""B"": np.random.uniform(size=20), ""C"": np.arange(20) + np.random.uniform(size=20), } ) args = {""x"": ""A"", ""y"": ""B""} elif kind == ""area"": df = tm.makeTimeDataFrame().abs() else: df = tm.makeTimeDataFrame() # Use a weakref so we can see if the object gets collected without # also preventing it from being collected results[kind] = weakref.proxy(df.plot(kind=kind, **args)) # have matplotlib delete all the figures tm.close() # force a garbage collection gc.collect() msg = ""weakly-referenced object no longer exists"" for key in results: # check that every plot was collected with pytest.raises(ReferenceError, match=msg): # need to actually access something to get an error results[key].lines ``` ###Assistant : Check that every plot type gets properly collected. " 617,"def disabled_excepthook() -> Iterator[None]: old_excepthook = sys.excepthook sys.excepthook = sys.__excepthook__ try: yield finally: # If the code we did run did change sys.excepthook, we leave it # unchanged. Otherwise, we reset it. if sys.excepthook is sys.__excepthook__: sys.excepthook = old_excepthook ",Run code with the exception hook temporarily disabled.,8,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def disabled_excepthook() -> Iterator[None]: old_excepthook = sys.excepthook sys.excepthook = sys.__excepthook__ try: yield finally: # If the code we did run did change sys.excepthook, we leave it # unchanged. Otherwise, we reset it. if sys.excepthook is sys.__excepthook__: sys.excepthook = old_excepthook ``` ###Assistant : Run code with the exception hook temporarily disabled. " 618,"def trigger_import(*dfs): if ASV_USE_STORAGE_FORMAT != ""hdk"" or ASV_USE_IMPL == ""pandas"": return from modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import ( DbWorker, ) for df in dfs: df.shape # to trigger real execution df._query_compiler._modin_frame._partitions[0][ 0 ].frame_id = DbWorker().import_arrow_table( df._query_compiler._modin_frame._partitions[0][0].get() ) # to trigger real execution "," Trigger import execution for DataFrames obtained by HDK engine. Parameters ---------- *dfs : iterable DataFrames to trigger import. ",18,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def trigger_import(*dfs): if ASV_USE_STORAGE_FORMAT != ""hdk"" or ASV_USE_IMPL == ""pandas"": return from modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import ( DbWorker, ) for df in dfs: df.shape # to trigger real execution df._query_compiler._modin_frame._partitions[0][ 0 ].frame_id = DbWorker().import_arrow_table( df._query_compiler._modin_frame._partitions[0][0].get() ) # to trigger real execution ``` ###Assistant : Trigger import execution for DataFrames obtained by HDK engine. Parameters ---------- *dfs : iterable DataFrames to trigger import. " 619,"def _jacfwd(f, primals): jac_flat = [] flat_primals = tf.nest.flatten(primals) tangent_mask = [tf.zeros_like(primal) for primal in flat_primals] for primal_index, primal in enumerate(flat_primals): primal_vector = tf.reshape(primal, [-1]) primal_vector_length = tf.size(primal_vector) jac_columns = [] for element_index in tf.range(primal_vector_length): mask = tf.one_hot(element_index, primal_vector_length) tangent_mask[primal_index] = tf.reshape(mask, tf.shape(primal)) jac_columns.append( tf.nest.map_structure( functools.partial(tf.reshape, shape=[-1]), _jvp( f, primals, tf.nest.pack_sequence_as(primals, tangent_mask), )[1], ) ) jac_flat.append(tf.stack(jac_columns, axis=1)) tangent_mask[primal_index] = tf.zeros_like(primal) return tf.nest.pack_sequence_as(primals, jac_flat) ",Compute the jacobian of `f` at `primals` using forward-mode autodiff.,10,63,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _jacfwd(f, primals): jac_flat = [] flat_primals = tf.nest.flatten(primals) tangent_mask = [tf.zeros_like(primal) for primal in flat_primals] for primal_index, primal in enumerate(flat_primals): primal_vector = tf.reshape(primal, [-1]) primal_vector_length = tf.size(primal_vector) jac_columns = [] for element_index in tf.range(primal_vector_length): mask = tf.one_hot(element_index, primal_vector_length) tangent_mask[primal_index] = tf.reshape(mask, tf.shape(primal)) jac_columns.append( tf.nest.map_structure( functools.partial(tf.reshape, shape=[-1]), _jvp( f, primals, tf.nest.pack_sequence_as(primals, tangent_mask), )[1], ) ) jac_flat.append(tf.stack(jac_columns, axis=1)) tangent_mask[primal_index] = tf.zeros_like(primal) return tf.nest.pack_sequence_as(primals, jac_flat) ``` ###Assistant : Compute the jacobian of `f` at `primals` using forward-mode autodiff. " 620,"def record_timing(name, duration=None, description=None): timing_information = getattr(flask.g, ""timing_information"", {}) if name in timing_information: raise KeyError(f'Duplicate resource name ""{name}"" found.') timing_information[name] = {""dur"": round(duration * 1000), ""desc"": description} setattr(flask.g, ""timing_information"", timing_information) ","Records timing information for a server resource. :param name: The name of the resource. :type name: string :param duration: The time in seconds to report. Internally, this is rounded to the nearest millisecond. :type duration: float or None :param description: A description of the resource. :type description: string or None ",50,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def record_timing(name, duration=None, description=None): timing_information = getattr(flask.g, ""timing_information"", {}) if name in timing_information: raise KeyError(f'Duplicate resource name ""{name}"" found.') timing_information[name] = {""dur"": round(duration * 1000), ""desc"": description} setattr(flask.g, ""timing_information"", timing_information) ``` ###Assistant : Records timing information for a server resource. :param name: The name of the resource. :type name: string :param duration: The time in seconds to report. Internally, this is rounded to the nearest millisecond. :type duration: float or None :param description: A description of the resource. :type description: string or None " 621,"def admin_actions(context): context[""action_index""] = context.get(""action_index"", -1) + 1 return context @register.tag(name=""admin_actions"")"," Track the number of times the action field has been rendered on the page, so we know which value to use. ",21,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def admin_actions(context): context[""action_index""] = context.get(""action_index"", -1) + 1 return context @register.tag(name=""admin_actions"") ``` ###Assistant : Track the number of times the action field has been rendered on the page, so we know which value to use. " 622,"async def test_lights(hass, mock_bridge_v2, v2_resources_test_data): await mock_bridge_v2.api.load_test_data(v2_resources_test_data) await setup_platform(hass, mock_bridge_v2, ""light"") # there shouldn't have been any requests at this point assert len(mock_bridge_v2.mock_requests) == 0 # 6 entities should be created from test data (grouped_lights are disabled by default) assert len(hass.states.async_all()) == 6 # test light which supports color and color temperature light_1 = hass.states.get(""light.hue_light_with_color_and_color_temperature_1"") assert light_1 is not None assert ( light_1.attributes[""friendly_name""] == ""Hue light with color and color temperature 1"" ) assert light_1.state == ""on"" assert light_1.attributes[""brightness""] == int(46.85 / 100 * 255) assert light_1.attributes[""mode""] == ""normal"" assert light_1.attributes[""color_mode""] == COLOR_MODE_XY assert set(light_1.attributes[""supported_color_modes""]) == { COLOR_MODE_COLOR_TEMP, COLOR_MODE_XY, } assert light_1.attributes[""xy_color""] == (0.5614, 0.4058) assert light_1.attributes[""min_mireds""] == 153 assert light_1.attributes[""max_mireds""] == 500 assert light_1.attributes[""dynamics""] == ""dynamic_palette"" assert light_1.attributes[""effect_list""] == [""None"", ""candle"", ""fire""] assert light_1.attributes[""effect""] == ""None"" # test light which supports color temperature only light_2 = hass.states.get(""light.hue_light_with_color_temperature_only"") assert light_2 is not None assert ( light_2.attributes[""friendly_name""] == ""Hue light with color temperature only"" ) assert light_2.state == ""off"" assert light_2.attributes[""mode""] == ""normal"" assert light_2.attributes[""supported_color_modes""] == [COLOR_MODE_COLOR_TEMP] assert light_2.attributes[""min_mireds""] == 153 assert light_2.attributes[""max_mireds""] == 454 assert light_2.attributes[""dynamics""] == ""none"" assert light_2.attributes[""effect_list""] == [""None"", ""candle"", ""sunrise""] # test light which supports color only light_3 = hass.states.get(""light.hue_light_with_color_only"") assert light_3 is not None assert light_3.attributes[""friendly_name""] == ""Hue light with color only"" assert light_3.state == ""on"" assert light_3.attributes[""brightness""] == 128 assert light_3.attributes[""mode""] == ""normal"" assert light_3.attributes[""supported_color_modes""] == [COLOR_MODE_XY] assert light_3.attributes[""color_mode""] == COLOR_MODE_XY assert light_3.attributes[""dynamics""] == ""dynamic_palette"" # test light which supports on/off only light_4 = hass.states.get(""light.hue_on_off_light"") assert light_4 is not None assert light_4.attributes[""friendly_name""] == ""Hue on/off light"" assert light_4.state == ""off"" assert light_4.attributes[""mode""] == ""normal"" assert light_4.attributes[""supported_color_modes""] == [] ",Test if all v2 lights get created with correct features.,10,264,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_lights(hass, mock_bridge_v2, v2_resources_test_data): await mock_bridge_v2.api.load_test_data(v2_resources_test_data) await setup_platform(hass, mock_bridge_v2, ""light"") # there shouldn't have been any requests at this point assert len(mock_bridge_v2.mock_requests) == 0 # 6 entities should be created from test data (grouped_lights are disabled by default) assert len(hass.states.async_all()) == 6 # test light which supports color and color temperature light_1 = hass.states.get(""light.hue_light_with_color_and_color_temperature_1"") assert light_1 is not None assert ( light_1.attributes[""friendly_name""] == ""Hue light with color and color temperature 1"" ) assert light_1.state == ""on"" assert light_1.attributes[""brightness""] == int(46.85 / 100 * 255) assert light_1.attributes[""mode""] == ""normal"" assert light_1.attributes[""color_mode""] == COLOR_MODE_XY assert set(light_1.attributes[""supported_color_modes""]) == { COLOR_MODE_COLOR_TEMP, COLOR_MODE_XY, } assert light_1.attributes[""xy_color""] == (0.5614, 0.4058) assert light_1.attributes[""min_mireds""] == 153 assert light_1.attributes[""max_mireds""] == 500 assert light_1.attributes[""dynamics""] == ""dynamic_palette"" assert light_1.attributes[""effect_list""] == [""None"", ""candle"", ""fire""] assert light_1.attributes[""effect""] == ""None"" # test light which supports color temperature only light_2 = hass.states.get(""light.hue_light_with_color_temperature_only"") assert light_2 is not None assert ( light_2.attributes[""friendly_name""] == ""Hue light with color temperature only"" ) assert light_2.state == ""off"" assert light_2.attributes[""mode""] == ""normal"" assert light_2.attributes[""supported_color_modes""] == [COLOR_MODE_COLOR_TEMP] assert light_2.attributes[""min_mireds""] == 153 assert light_2.attributes[""max_mireds""] == 454 assert light_2.attributes[""dynamics""] == ""none"" assert light_2.attributes[""effect_list""] == [""None"", ""candle"", ""sunrise""] # test light which supports color only light_3 = hass.states.get(""light.hue_light_with_color_only"") assert light_3 is not None assert light_3.attributes[""friendly_name""] == ""Hue light with color only"" assert light_3.state == ""on"" assert light_3.attributes[""brightness""] == 128 assert light_3.attributes[""mode""] == ""normal"" assert light_3.attributes[""supported_color_modes""] == [COLOR_MODE_XY] assert light_3.attributes[""color_mode""] == COLOR_MODE_XY assert light_3.attributes[""dynamics""] == ""dynamic_palette"" # test light which supports on/off only light_4 = hass.states.get(""light.hue_on_off_light"") assert light_4 is not None assert light_4.attributes[""friendly_name""] == ""Hue on/off light"" assert light_4.state == ""off"" assert light_4.attributes[""mode""] == ""normal"" assert light_4.attributes[""supported_color_modes""] == [] ``` ###Assistant : Test if all v2 lights get created with correct features. " 623,"def setup_data(self, path): for message, new_episode in super().setup_data(path): assert ( message['text'] == '__SILENCE__' ), 'The expected original context string is not found!' message['text'] = 'Person 1:' yield message, new_episode "," Modify each output message to add in an OPT-compatible context string. ",11,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def setup_data(self, path): for message, new_episode in super().setup_data(path): assert ( message['text'] == '__SILENCE__' ), 'The expected original context string is not found!' message['text'] = 'Person 1:' yield message, new_episode ``` ###Assistant : Modify each output message to add in an OPT-compatible context string. " 624,"def _get_input_locations(self) -> List[str]: if not self._args.batch_mode or os.path.isfile(self._args.input_dir): return [self._args.input_dir] # Not batch mode or a single file retval = [os.path.join(self._args.input_dir, fname) for fname in os.listdir(self._args.input_dir) if os.path.isdir(os.path.join(self._args.input_dir, fname)) or os.path.splitext(fname)[-1].lower() in _video_extensions] logger.debug(""Input locations: %s"", retval) return retval "," Obtain the full path to input locations. Will be a list of locations if batch mode is selected, or a containing a single location if batch mode is not selected. Returns ------- list: The list of input location paths ",39,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_input_locations(self) -> List[str]: if not self._args.batch_mode or os.path.isfile(self._args.input_dir): return [self._args.input_dir] # Not batch mode or a single file retval = [os.path.join(self._args.input_dir, fname) for fname in os.listdir(self._args.input_dir) if os.path.isdir(os.path.join(self._args.input_dir, fname)) or os.path.splitext(fname)[-1].lower() in _video_extensions] logger.debug(""Input locations: %s"", retval) return retval ``` ###Assistant : Obtain the full path to input locations. Will be a list of locations if batch mode is selected, or a containing a single location if batch mode is not selected. Returns ------- list: The list of input location paths " 625,"def get_party_gle_currency(party_type, party, company): def generator(): existing_gle_currency = frappe.db.sql( , {""company"": company, ""party_type"": party_type, ""party"": party}, ) return existing_gle_currency[0][0] if existing_gle_currency else None return frappe.local_cache( ""party_gle_currency"", (party_type, party, company), generator, regenerate_if_none=True ) ","select account_currency from `tabGL Entry` where docstatus=1 and company=%(company)s and party_type=%(party_type)s and party=%(party)s limit 1",15,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_party_gle_currency(party_type, party, company): def generator(): existing_gle_currency = frappe.db.sql( , {""company"": company, ""party_type"": party_type, ""party"": party}, ) return existing_gle_currency[0][0] if existing_gle_currency else None return frappe.local_cache( ""party_gle_currency"", (party_type, party, company), generator, regenerate_if_none=True ) ``` ###Assistant : select account_currency from `tabGL Entry` where docstatus=1 and company=%(company)s and party_type=%(party_type)s and party=%(party)s limit 1 " 626,"def load_breast_cancer(*, return_X_y=False, as_frame=False): data_file_name = ""breast_cancer.csv"" data, target, target_names, fdescr = load_csv_data( data_file_name=data_file_name, descr_file_name=""breast_cancer.rst"" ) feature_names = np.array( [ ""mean radius"", ""mean texture"", ""mean perimeter"", ""mean area"", ""mean smoothness"", ""mean compactness"", ""mean concavity"", ""mean concave points"", ""mean symmetry"", ""mean fractal dimension"", ""radius error"", ""texture error"", ""perimeter error"", ""area error"", ""smoothness error"", ""compactness error"", ""concavity error"", ""concave points error"", ""symmetry error"", ""fractal dimension error"", ""worst radius"", ""worst texture"", ""worst perimeter"", ""worst area"", ""worst smoothness"", ""worst compactness"", ""worst concavity"", ""worst concave points"", ""worst symmetry"", ""worst fractal dimension"", ] ) frame = None target_columns = [ ""target"", ] if as_frame: frame, data, target = _convert_data_dataframe( ""load_breast_cancer"", data, target, feature_names, target_columns ) if return_X_y: return data, target return Bunch( data=data, target=target, frame=frame, target_names=target_names, DESCR=fdescr, feature_names=feature_names, filename=data_file_name, data_module=DATA_MODULE, ) ","Load and return the breast cancer wisconsin dataset (classification). The breast cancer dataset is a classic and very easy binary classification dataset. ================= ============== Classes 2 Samples per class 212(M),357(B) Samples total 569 Dimensionality 30 Features real, positive ================= ============== The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is downloaded from: https://goo.gl/U2Uwz2 Read more in the :ref:`User Guide `. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.18 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (569, 30) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target : {ndarray, Series} of shape (569,) The classification target. If `as_frame=True`, `target` will be a pandas Series. feature_names : list The names of the dataset columns. target_names : list The names of target classes. frame : DataFrame of shape (569, 31) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR : str The full description of the dataset. filename : str The path to the location of the data. .. versionadded:: 0.20 (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarrays by default. The first contains a 2D ndarray of shape (569, 30) with each row representing one sample and each column representing the features. The second ndarray of shape (569,) contains the target samples. If `as_frame=True`, both arrays are pandas objects, i.e. `X` a dataframe and `y` a series. .. versionadded:: 0.18 Examples -------- Let's say you are interested in the samples 10, 50, and 85, and want to know their class name. >>> from sklearn.datasets import load_breast_cancer >>> data = load_breast_cancer() >>> data.target[[10, 50, 85]] array([0, 1, 0]) >>> list(data.target_names) ['malignant', 'benign'] ",356,125,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_breast_cancer(*, return_X_y=False, as_frame=False): data_file_name = ""breast_cancer.csv"" data, target, target_names, fdescr = load_csv_data( data_file_name=data_file_name, descr_file_name=""breast_cancer.rst"" ) feature_names = np.array( [ ""mean radius"", ""mean texture"", ""mean perimeter"", ""mean area"", ""mean smoothness"", ""mean compactness"", ""mean concavity"", ""mean concave points"", ""mean symmetry"", ""mean fractal dimension"", ""radius error"", ""texture error"", ""perimeter error"", ""area error"", ""smoothness error"", ""compactness error"", ""concavity error"", ""concave points error"", ""symmetry error"", ""fractal dimension error"", ""worst radius"", ""worst texture"", ""worst perimeter"", ""worst area"", ""worst smoothness"", ""worst compactness"", ""worst concavity"", ""worst concave points"", ""worst symmetry"", ""worst fractal dimension"", ] ) frame = None target_columns = [ ""target"", ] if as_frame: frame, data, target = _convert_data_dataframe( ""load_breast_cancer"", data, target, feature_names, target_columns ) if return_X_y: return data, target return Bunch( data=data, target=target, frame=frame, target_names=target_names, DESCR=fdescr, feature_names=feature_names, filename=data_file_name, data_module=DATA_MODULE, ) ``` ###Assistant : Load and return the breast cancer wisconsin dataset (classification). The breast cancer dataset is a classic and very easy binary classification dataset. ================= ============== Classes 2 Samples per class 212(M),357(B) Samples total 569 Dimensionality 30 Features real, positive ================= ============== The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is downloaded from: https://goo.gl/U2Uwz2 Read more in the :ref:`User Guide `. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.18 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (569, 30) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target : {ndarray, Series} of shape (569,) The classification target. If `as_frame=True`, `target` will be a pandas Series. feature_names : list The names of the dataset columns. target_names : list The names of target classes. frame : DataFrame of shape (569, 31) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR : str The full description of the dataset. filename : str The path to the location of the data. .. versionadded:: 0.20 (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarrays by default. The first contains a 2D ndarray of shape (569, 30) with each row representing one sample and each column representing the features. The second ndarray of shape (569,) contains the target samples. If `as_frame=True`, both arrays are pandas objects, i.e. `X` a dataframe and `y` a series. .. versionadded:: 0.18 Examples -------- Let's say you are interested in the samples 10, 50, and 85, and want to know their class name. >>> from sklearn.datasets import load_breast_cancer >>> data = load_breast_cancer() >>> data.target[[10, 50, 85]] array([0, 1, 0]) >>> list(data.target_names) ['malignant', 'benign'] " 627,"def get_filter_by_name(filtername, **options): cls = find_filter_class(filtername) if cls: return cls(**options) else: raise ClassNotFound('filter %r not found' % filtername) ","Return an instantiated filter. Options are passed to the filter initializer if wanted. Raise a ClassNotFound if not found. ",19,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_filter_by_name(filtername, **options): cls = find_filter_class(filtername) if cls: return cls(**options) else: raise ClassNotFound('filter %r not found' % filtername) ``` ###Assistant : Return an instantiated filter. Options are passed to the filter initializer if wanted. Raise a ClassNotFound if not found. " 628,"def wide_resnet50_2(pretrained=False, **kwargs): kwargs['width'] = 64 * 2 return _resnet('wide_resnet50_2', BottleneckBlock, 50, pretrained, **kwargs) ","Wide ResNet-50-2 model from `""Wide Residual Networks"" `_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet Examples: .. code-block:: python import paddle from paddle.vision.models import wide_resnet50_2 # build model model = wide_resnet50_2() # build model and load imagenet pretrained weight # model = wide_resnet50_2(pretrained=True) x = paddle.rand([1, 3, 224, 224]) out = model(x) print(out.shape) ",57,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def wide_resnet50_2(pretrained=False, **kwargs): kwargs['width'] = 64 * 2 return _resnet('wide_resnet50_2', BottleneckBlock, 50, pretrained, **kwargs) ``` ###Assistant : Wide ResNet-50-2 model from `""Wide Residual Networks"" `_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet Examples: .. code-block:: python import paddle from paddle.vision.models import wide_resnet50_2 # build model model = wide_resnet50_2() # build model and load imagenet pretrained weight # model = wide_resnet50_2(pretrained=True) x = paddle.rand([1, 3, 224, 224]) out = model(x) print(out.shape) " 629,"def test_username_available(self) -> None: url = ""%s?username=%s"" % (self.url, ""allowed"") channel = self.make_request(""GET"", url, access_token=self.admin_user_tok) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertTrue(channel.json_body[""available""]) "," The endpoint should return a HTTPStatus.OK response if the username does not exist ",13,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_username_available(self) -> None: url = ""%s?username=%s"" % (self.url, ""allowed"") channel = self.make_request(""GET"", url, access_token=self.admin_user_tok) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertTrue(channel.json_body[""available""]) ``` ###Assistant : The endpoint should return a HTTPStatus.OK response if the username does not exist " 630,"def test_with_include_glob_filtering_case4a_include_strong(): incl_dom = {} incl_glob = {""*working""} incl_ent = {""binary_sensor.specificly_included""} excl_dom = {} excl_glob = {""*broken"", ""*notworking"", ""binary_sensor.*""} excl_ent = {""light.ignoreme""} testfilter = generate_filter( incl_dom, incl_ent, excl_dom, excl_ent, incl_glob, excl_glob ) assert testfilter(""sensor.working"") is True assert testfilter(""sensor.notworking"") is True # include is stronger assert testfilter(""sensor.broken"") is False assert testfilter(""light.test"") is False assert testfilter(""light.notworking"") is True # include is stronger assert testfilter(""light.ignoreme"") is False assert testfilter(""binary_sensor.not_working"") is True # include is stronger assert testfilter(""binary_sensor.another"") is False assert testfilter(""binary_sensor.specificly_included"") is True assert testfilter(""sun.sun"") is False ","Test case 4 - include and exclude specified, both have globs, and a specifically included entity.",16,84,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_with_include_glob_filtering_case4a_include_strong(): incl_dom = {} incl_glob = {""*working""} incl_ent = {""binary_sensor.specificly_included""} excl_dom = {} excl_glob = {""*broken"", ""*notworking"", ""binary_sensor.*""} excl_ent = {""light.ignoreme""} testfilter = generate_filter( incl_dom, incl_ent, excl_dom, excl_ent, incl_glob, excl_glob ) assert testfilter(""sensor.working"") is True assert testfilter(""sensor.notworking"") is True # include is stronger assert testfilter(""sensor.broken"") is False assert testfilter(""light.test"") is False assert testfilter(""light.notworking"") is True # include is stronger assert testfilter(""light.ignoreme"") is False assert testfilter(""binary_sensor.not_working"") is True # include is stronger assert testfilter(""binary_sensor.another"") is False assert testfilter(""binary_sensor.specificly_included"") is True assert testfilter(""sun.sun"") is False ``` ###Assistant : Test case 4 - include and exclude specified, both have globs, and a specifically included entity. " 631,"def get_sales_orders(self): so_filter = item_filter = """" bom_item = ""bom.item = so_item.item_code"" date_field_mapper = { ""from_date"": ("">="", ""so.transaction_date""), ""to_date"": (""<="", ""so.transaction_date""), ""from_delivery_date"": ("">="", ""so_item.delivery_date""), ""to_delivery_date"": (""<="", ""so_item.delivery_date""), } for field, value in date_field_mapper.items(): if self.get(field): so_filter += f"" and {value[1]} {value[0]} %({field})s"" for field in [""customer"", ""project"", ""sales_order_status""]: if self.get(field): so_field = ""status"" if field == ""sales_order_status"" else field so_filter += f"" and so.{so_field} = %({field})s"" if self.item_code and frappe.db.exists(""Item"", self.item_code): bom_item = self.get_bom_item() or bom_item item_filter += "" and so_item.item_code = %(item_code)s"" open_so = frappe.db.sql( f, self.as_dict(), as_dict=1, ) return open_so @frappe.whitelist()"," select distinct so.name, so.transaction_date, so.customer, so.base_grand_total from `tabSales Order` so, `tabSales Order Item` so_item where so_item.parent = so.name and so.docstatus = 1 and so.status not in (""Stopped"", ""Closed"") and so.company = %(company)s and so_item.qty > so_item.work_order_qty {so_filter} {item_filter} and (exists (select name from `tabBOM` bom where {bom_item} and bom.is_active = 1) or exists (select name from `tabPacked Item` pi where pi.parent = so.name and pi.parent_item = so_item.item_code and exists (select name from `tabBOM` bom where bom.item=pi.item_code and bom.is_active = 1))) ",80,93,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_sales_orders(self): so_filter = item_filter = """" bom_item = ""bom.item = so_item.item_code"" date_field_mapper = { ""from_date"": ("">="", ""so.transaction_date""), ""to_date"": (""<="", ""so.transaction_date""), ""from_delivery_date"": ("">="", ""so_item.delivery_date""), ""to_delivery_date"": (""<="", ""so_item.delivery_date""), } for field, value in date_field_mapper.items(): if self.get(field): so_filter += f"" and {value[1]} {value[0]} %({field})s"" for field in [""customer"", ""project"", ""sales_order_status""]: if self.get(field): so_field = ""status"" if field == ""sales_order_status"" else field so_filter += f"" and so.{so_field} = %({field})s"" if self.item_code and frappe.db.exists(""Item"", self.item_code): bom_item = self.get_bom_item() or bom_item item_filter += "" and so_item.item_code = %(item_code)s"" open_so = frappe.db.sql( f, self.as_dict(), as_dict=1, ) return open_so @frappe.whitelist() ``` ###Assistant : select distinct so.name, so.transaction_date, so.customer, so.base_grand_total from `tabSales Order` so, `tabSales Order Item` so_item where so_item.parent = so.name and so.docstatus = 1 and so.status not in (""Stopped"", ""Closed"") and so.company = %(company)s and so_item.qty > so_item.work_order_qty {so_filter} {item_filter} and (exists (select name from `tabBOM` bom where {bom_item} and bom.is_active = 1) or exists (select name from `tabPacked Item` pi where pi.parent = so.name and pi.parent_item = so_item.item_code and exists (select name from `tabBOM` bom where bom.item=pi.item_code and bom.is_active = 1))) " 632,"def test_copy_published_emits_signal(self): christmas_page = EventPage.objects.get(url_path=""/home/events/christmas/"") signal_fired = False signal_page = None ",Test that copying of a published page emits a page_published signal.,11,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_copy_published_emits_signal(self): christmas_page = EventPage.objects.get(url_path=""/home/events/christmas/"") signal_fired = False signal_page = None ``` ###Assistant : Test that copying of a published page emits a page_published signal. " 633,"def get_all_tests(): test_root_dir = os.path.join(PATH_TO_TRANFORMERS, ""tests"") # test folders/files directly under `tests` folder tests = os.listdir(test_root_dir) tests = sorted( list(filter(lambda x: os.path.isdir(x) or x.startswith(""tests/test_""), [f""tests/{x}"" for x in tests])) ) # model specific test folders model_tests_folders = os.listdir(os.path.join(test_root_dir, ""models"")) model_test_folders = sorted(list(filter(os.path.isdir, [f""tests/models/{x}"" for x in model_tests_folders]))) tests.remove(""tests/models"") tests = model_test_folders + tests return tests "," Return a list of paths to all test folders and files under `tests`. All paths are rooted at `tests`. - folders under `tests`: `tokenization`, `pipelines`, etc. The folder `models` is excluded. - folders under `tests/models`: `bert`, `gpt2`, etc. - test files under `tests`: `test_modeling_common.py`, `test_tokenization_common.py`, etc. ",46,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_all_tests(): test_root_dir = os.path.join(PATH_TO_TRANFORMERS, ""tests"") # test folders/files directly under `tests` folder tests = os.listdir(test_root_dir) tests = sorted( list(filter(lambda x: os.path.isdir(x) or x.startswith(""tests/test_""), [f""tests/{x}"" for x in tests])) ) # model specific test folders model_tests_folders = os.listdir(os.path.join(test_root_dir, ""models"")) model_test_folders = sorted(list(filter(os.path.isdir, [f""tests/models/{x}"" for x in model_tests_folders]))) tests.remove(""tests/models"") tests = model_test_folders + tests return tests ``` ###Assistant : Return a list of paths to all test folders and files under `tests`. All paths are rooted at `tests`. - folders under `tests`: `tokenization`, `pipelines`, etc. The folder `models` is excluded. - folders under `tests/models`: `bert`, `gpt2`, etc. - test files under `tests`: `test_modeling_common.py`, `test_tokenization_common.py`, etc. " 634,"def test_vtrace(self): seq_len = 5 batch_size = 10 # Create log_rhos such that rho will span from near-zero to above the # clipping thresholds. In particular, calculate log_rhos in # [-2.5, 2.5), # so that rho is in approx [0.08, 12.2). space_w_time = Box(-1.0, 1.0, (seq_len, batch_size), np.float32) space_only_batch = Box(-1.0, 1.0, (batch_size,), np.float32) log_rhos = space_w_time.sample() / (batch_size * seq_len) log_rhos = 5 * (log_rhos - 0.5) # [0.0, 1.0) -> [-2.5, 2.5). values = { ""log_rhos"": log_rhos, # T, B where B_i: [0.9 / (i+1)] * T ""discounts"": np.array( [[0.9 / (b + 1) for b in range(batch_size)] for _ in range(seq_len)] ), ""rewards"": space_w_time.sample(), ""values"": space_w_time.sample() / batch_size, ""bootstrap_value"": space_only_batch.sample() + 1.0, ""clip_rho_threshold"": 3.7, ""clip_pg_rho_threshold"": 2.2, } for fw, sess in framework_iterator(frameworks=(""torch"", ""tf""), session=True): vtrace = vtrace_tf if fw != ""torch"" else vtrace_torch output = vtrace.from_importance_weights(**values) if sess: output = sess.run(output) ground_truth_v = _ground_truth_calculation(vtrace, **values) check(output, ground_truth_v) ",Tests V-trace against ground truth data calculated in python.,9,150,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_vtrace(self): seq_len = 5 batch_size = 10 # Create log_rhos such that rho will span from near-zero to above the # clipping thresholds. In particular, calculate log_rhos in # [-2.5, 2.5), # so that rho is in approx [0.08, 12.2). space_w_time = Box(-1.0, 1.0, (seq_len, batch_size), np.float32) space_only_batch = Box(-1.0, 1.0, (batch_size,), np.float32) log_rhos = space_w_time.sample() / (batch_size * seq_len) log_rhos = 5 * (log_rhos - 0.5) # [0.0, 1.0) -> [-2.5, 2.5). values = { ""log_rhos"": log_rhos, # T, B where B_i: [0.9 / (i+1)] * T ""discounts"": np.array( [[0.9 / (b + 1) for b in range(batch_size)] for _ in range(seq_len)] ), ""rewards"": space_w_time.sample(), ""values"": space_w_time.sample() / batch_size, ""bootstrap_value"": space_only_batch.sample() + 1.0, ""clip_rho_threshold"": 3.7, ""clip_pg_rho_threshold"": 2.2, } for fw, sess in framework_iterator(frameworks=(""torch"", ""tf""), session=True): vtrace = vtrace_tf if fw != ""torch"" else vtrace_torch output = vtrace.from_importance_weights(**values) if sess: output = sess.run(output) ground_truth_v = _ground_truth_calculation(vtrace, **values) check(output, ground_truth_v) ``` ###Assistant : Tests V-trace against ground truth data calculated in python. " 635,"def pdfdump(self, filename=None, **kargs): # type: (Optional[str], **Any) -> None from scapy.config import conf from scapy.utils import get_temp_file, ContextManagerSubprocess canvas = self.canvas_dump(**kargs) if filename is None: fname = get_temp_file(autoext=kargs.get(""suffix"", "".pdf"")) canvas.writePDFfile(fname) if WINDOWS and not conf.prog.pdfreader: os.startfile(fname) else: with ContextManagerSubprocess(conf.prog.pdfreader): subprocess.Popen([conf.prog.pdfreader, fname]) else: canvas.writePDFfile(filename) print() "," pdfdump(filename=None, layer_shift=0, rebuild=1) Creates a PDF file describing a packet. If filename is not provided a temporary file is created and xpdf is called. :param filename: the file's filename ",29,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def pdfdump(self, filename=None, **kargs): # type: (Optional[str], **Any) -> None from scapy.config import conf from scapy.utils import get_temp_file, ContextManagerSubprocess canvas = self.canvas_dump(**kargs) if filename is None: fname = get_temp_file(autoext=kargs.get(""suffix"", "".pdf"")) canvas.writePDFfile(fname) if WINDOWS and not conf.prog.pdfreader: os.startfile(fname) else: with ContextManagerSubprocess(conf.prog.pdfreader): subprocess.Popen([conf.prog.pdfreader, fname]) else: canvas.writePDFfile(filename) print() ``` ###Assistant : pdfdump(filename=None, layer_shift=0, rebuild=1) Creates a PDF file describing a packet. If filename is not provided a temporary file is created and xpdf is called. :param filename: the file's filename " 636,"def index_sample(x, index): x_s = x.shape dim = len(index.shape) - 1 assert x_s[:dim] == index.shape[:dim] if len(x_s) == 3 and dim == 1: r_x = paddle.reshape(x, shape=[-1, x_s[1], x_s[-1]]) else: r_x = paddle.reshape(x, shape=[-1, x_s[-1]]) index = paddle.reshape(index, shape=[len(r_x), -1, 1]) # Generate arange index, shape like index arr_index = paddle.arange(start=0, end=len(index), dtype=index.dtype) arr_index = paddle.unsqueeze(arr_index, axis=[1, 2]) arr_index = paddle.expand(arr_index, index.shape) # Genrate new index new_index = paddle.concat((arr_index, index), -1) new_index = paddle.reshape(new_index, (-1, 2)) # Get output out = paddle.gather_nd(r_x, new_index) if len(x_s) == 3 and dim == 2: out = paddle.reshape(out, shape=[x_s[0], x_s[1], -1]) else: out = paddle.reshape(out, shape=[x_s[0], -1]) return out "," Select input value according to index Arags: input: input matrix index: index matrix Returns: output >>> input [ [1, 2, 3], [4, 5, 6] ] >>> index [ [1, 2], [0, 1] ] >>> index_sample(input, index) [ [2, 3], [4, 5] ] ",42,105,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def index_sample(x, index): x_s = x.shape dim = len(index.shape) - 1 assert x_s[:dim] == index.shape[:dim] if len(x_s) == 3 and dim == 1: r_x = paddle.reshape(x, shape=[-1, x_s[1], x_s[-1]]) else: r_x = paddle.reshape(x, shape=[-1, x_s[-1]]) index = paddle.reshape(index, shape=[len(r_x), -1, 1]) # Generate arange index, shape like index arr_index = paddle.arange(start=0, end=len(index), dtype=index.dtype) arr_index = paddle.unsqueeze(arr_index, axis=[1, 2]) arr_index = paddle.expand(arr_index, index.shape) # Genrate new index new_index = paddle.concat((arr_index, index), -1) new_index = paddle.reshape(new_index, (-1, 2)) # Get output out = paddle.gather_nd(r_x, new_index) if len(x_s) == 3 and dim == 2: out = paddle.reshape(out, shape=[x_s[0], x_s[1], -1]) else: out = paddle.reshape(out, shape=[x_s[0], -1]) return out ``` ###Assistant : Select input value according to index Arags: input: input matrix index: index matrix Returns: output >>> input [ [1, 2, 3], [4, 5, 6] ] >>> index [ [1, 2], [0, 1] ] >>> index_sample(input, index) [ [2, 3], [4, 5] ] " 637,"def __new__(cls, name, patch, symbols=None, relations={}, **kwargs): if not isinstance(name, Str): name = Str(name) # canonicallize the symbols if symbols is None: names = kwargs.get('names', None) if names is None: symbols = Tuple( *[Symbol('%s_%s' % (name.name, i), real=True) for i in range(patch.dim)] ) else: sympy_deprecation_warning( f, deprecated_since_version=""1.7"", active_deprecations_target=""deprecated-diffgeom-mutable"", ) symbols = Tuple( *[Symbol(n, real=True) for n in names] ) else: syms = [] for s in symbols: if isinstance(s, Symbol): syms.append(Symbol(s.name, **s._assumptions.generator)) elif isinstance(s, str): sympy_deprecation_warning( f, deprecated_since_version=""1.7"", active_deprecations_target=""deprecated-diffgeom-mutable"", ) syms.append(Symbol(s, real=True)) symbols = Tuple(*syms) # canonicallize the relations rel_temp = {} for k,v in relations.items(): s1, s2 = k if not isinstance(s1, Str): s1 = Str(s1) if not isinstance(s2, Str): s2 = Str(s2) key = Tuple(s1, s2) # Old version used Lambda as a value. if isinstance(v, Lambda): v = (tuple(v.signature), tuple(v.expr)) else: v = (tuple(v[0]), tuple(v[1])) rel_temp[key] = v relations = Dict(rel_temp) # construct the object obj = super().__new__(cls, name, patch, symbols, relations) # Add deprecated attributes obj.transforms = _deprecated_dict( , {}) obj._names = [str(n) for n in symbols] obj.patch.coord_systems.append(obj) # deprecated obj._dummies = [Dummy(str(n)) for n in symbols] # deprecated obj._dummy = Dummy() return obj "," The 'names' argument to CoordSystem is deprecated. Use 'symbols' instead. That is, replace CoordSystem(..., names={names}) with CoordSystem(..., symbols=[{', '.join([""Symbol("" + repr(n) + "", real=True)"" for n in names])}]) Passing a string as the coordinate symbol name to CoordSystem is deprecated. Pass a Symbol with the appropriate name and assumptions instead. That is, replace {s} with Symbol({s!r}, real=True). CoordSystem.transforms is deprecated. The CoordSystem class is now immutable. Use the 'relations' keyword argument to the CoordSystems() constructor to specify relations. ",78,188,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __new__(cls, name, patch, symbols=None, relations={}, **kwargs): if not isinstance(name, Str): name = Str(name) # canonicallize the symbols if symbols is None: names = kwargs.get('names', None) if names is None: symbols = Tuple( *[Symbol('%s_%s' % (name.name, i), real=True) for i in range(patch.dim)] ) else: sympy_deprecation_warning( f, deprecated_since_version=""1.7"", active_deprecations_target=""deprecated-diffgeom-mutable"", ) symbols = Tuple( *[Symbol(n, real=True) for n in names] ) else: syms = [] for s in symbols: if isinstance(s, Symbol): syms.append(Symbol(s.name, **s._assumptions.generator)) elif isinstance(s, str): sympy_deprecation_warning( f, deprecated_since_version=""1.7"", active_deprecations_target=""deprecated-diffgeom-mutable"", ) syms.append(Symbol(s, real=True)) symbols = Tuple(*syms) # canonicallize the relations rel_temp = {} for k,v in relations.items(): s1, s2 = k if not isinstance(s1, Str): s1 = Str(s1) if not isinstance(s2, Str): s2 = Str(s2) key = Tuple(s1, s2) # Old version used Lambda as a value. if isinstance(v, Lambda): v = (tuple(v.signature), tuple(v.expr)) else: v = (tuple(v[0]), tuple(v[1])) rel_temp[key] = v relations = Dict(rel_temp) # construct the object obj = super().__new__(cls, name, patch, symbols, relations) # Add deprecated attributes obj.transforms = _deprecated_dict( , {}) obj._names = [str(n) for n in symbols] obj.patch.coord_systems.append(obj) # deprecated obj._dummies = [Dummy(str(n)) for n in symbols] # deprecated obj._dummy = Dummy() return obj ``` ###Assistant : The 'names' argument to CoordSystem is deprecated. Use 'symbols' instead. That is, replace CoordSystem(..., names={names}) with CoordSystem(..., symbols=[{', '.join([""Symbol("" + repr(n) + "", real=True)"" for n in names])}]) Passing a string as the coordinate symbol name to CoordSystem is deprecated. Pass a Symbol with the appropriate name and assumptions instead. That is, replace {s} with Symbol({s!r}, real=True). CoordSystem.transforms is deprecated. The CoordSystem class is now immutable. Use the 'relations' keyword argument to the CoordSystems() constructor to specify relations. " 638,"def tab_focus(*, info): model = _tabs(win_id_filter=lambda win_id: win_id == info.win_id, add_win_id=False, current_win_id=info.win_id) special = [ (""last"", ""Focus the last-focused tab""), (""stack-next"", ""Go forward through a stack of focused tabs""), (""stack-prev"", ""Go backward through a stack of focused tabs""), ] model.add_category(listcategory.ListCategory(""Special"", special)) return model ",A model to complete on open tabs in the current window.,11,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def tab_focus(*, info): model = _tabs(win_id_filter=lambda win_id: win_id == info.win_id, add_win_id=False, current_win_id=info.win_id) special = [ (""last"", ""Focus the last-focused tab""), (""stack-next"", ""Go forward through a stack of focused tabs""), (""stack-prev"", ""Go backward through a stack of focused tabs""), ] model.add_category(listcategory.ListCategory(""Special"", special)) return model ``` ###Assistant : A model to complete on open tabs in the current window. " 639,"def get_mode_of_payment_details(filters): mode_of_payment_details = {} invoice_list = get_invoices(filters) invoice_list_names = "","".join(""'"" + invoice[""name""] + ""'"" for invoice in invoice_list) if invoice_list: inv_mop_detail = frappe.db.sql( .format( invoice_list_names=invoice_list_names ), as_dict=1, ) inv_change_amount = frappe.db.sql( .format( invoice_list_names=invoice_list_names ), as_dict=1, ) for d in inv_change_amount: for det in inv_mop_detail: if ( det[""owner""] == d[""owner""] and det[""posting_date""] == d[""posting_date""] and det[""mode_of_payment""] == d[""mode_of_payment""] ): paid_amount = det[""paid_amount""] - d[""change_amount""] det[""paid_amount""] = paid_amount for d in inv_mop_detail: mode_of_payment_details.setdefault(d[""owner""] + cstr(d[""posting_date""]), []).append( (d.mode_of_payment, d.paid_amount) ) return mode_of_payment_details "," select t.owner, t.posting_date, t.mode_of_payment, sum(t.paid_amount) as paid_amount from ( select a.owner, a.posting_date, ifnull(b.mode_of_payment, '') as mode_of_payment, sum(b.base_amount) as paid_amount from `tabSales Invoice` a, `tabSales Invoice Payment` b where a.name = b.parent and a.docstatus = 1 and a.name in ({invoice_list_names}) group by a.owner, a.posting_date, mode_of_payment union select a.owner,a.posting_date, ifnull(b.mode_of_payment, '') as mode_of_payment, sum(c.allocated_amount) as paid_amount from `tabSales Invoice` a, `tabPayment Entry` b,`tabPayment Entry Reference` c where a.name = c.reference_name and b.name = c.parent and b.docstatus = 1 and a.name in ({invoice_list_names}) group by a.owner, a.posting_date, mode_of_payment union select a.owner, a.posting_date, ifnull(a.voucher_type,'') as mode_of_payment, sum(b.credit) from `tabJournal Entry` a, `tabJournal Entry Account` b where a.name = b.parent and a.docstatus = 1 and b.reference_type = 'Sales Invoice' and b.reference_name in ({invoice_list_names}) group by a.owner, a.posting_date, mode_of_payment ) t group by t.owner, t.posting_date, t.mode_of_payment select a.owner, a.posting_date, ifnull(b.mode_of_payment, '') as mode_of_payment, sum(a.base_change_amount) as change_amount from `tabSales Invoice` a, `tabSales Invoice Payment` b where a.name = b.parent and a.name in ({invoice_list_names}) and b.type = 'Cash' and a.base_change_amount > 0 group by a.owner, a.posting_date, mode_of_payment",169,80,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_mode_of_payment_details(filters): mode_of_payment_details = {} invoice_list = get_invoices(filters) invoice_list_names = "","".join(""'"" + invoice[""name""] + ""'"" for invoice in invoice_list) if invoice_list: inv_mop_detail = frappe.db.sql( .format( invoice_list_names=invoice_list_names ), as_dict=1, ) inv_change_amount = frappe.db.sql( .format( invoice_list_names=invoice_list_names ), as_dict=1, ) for d in inv_change_amount: for det in inv_mop_detail: if ( det[""owner""] == d[""owner""] and det[""posting_date""] == d[""posting_date""] and det[""mode_of_payment""] == d[""mode_of_payment""] ): paid_amount = det[""paid_amount""] - d[""change_amount""] det[""paid_amount""] = paid_amount for d in inv_mop_detail: mode_of_payment_details.setdefault(d[""owner""] + cstr(d[""posting_date""]), []).append( (d.mode_of_payment, d.paid_amount) ) return mode_of_payment_details ``` ###Assistant : select t.owner, t.posting_date, t.mode_of_payment, sum(t.paid_amount) as paid_amount from ( select a.owner, a.posting_date, ifnull(b.mode_of_payment, '') as mode_of_payment, sum(b.base_amount) as paid_amount from `tabSales Invoice` a, `tabSales Invoice Payment` b where a.name = b.parent and a.docstatus = 1 and a.name in ({invoice_list_names}) group by a.owner, a.posting_date, mode_of_payment union select a.owner,a.posting_date, ifnull(b.mode_of_payment, '') as mode_of_payment, sum(c.allocated_amount) as paid_amount from `tabSales Invoice` a, `tabPayment Entry` b,`tabPayment Entry Reference` c where a.name = c.reference_name and b.name = c.parent and b.docstatus = 1 and a.name in ({invoice_list_names}) group by a.owner, a.posting_date, mode_of_payment union select a.owner, a.posting_date, ifnull(a.voucher_type,'') as mode_of_payment, sum(b.credit) from `tabJournal Entry` a, `tabJournal Entry Account` b where a.name = b.parent and a.docstatus = 1 and b.reference_type = 'Sales Invoice' and b.reference_name in ({invoice_list_names}) group by a.owner, a.posting_date, mode_of_payment ) t group by t.owner, t.posting_date, t.mode_of_payment select a.owner, a.posting_date, ifnull(b.mode_of_payment, '') as mode_of_payment, sum(a.base_change_amount) as change_amount from `tabSales Invoice` a, `tabSales Invoice Payment` b where a.name = b.parent and a.name in ({invoice_list_names}) and b.type = 'Cash' and a.base_change_amount > 0 group by a.owner, a.posting_date, mode_of_payment " 640,"def get_config_context(self): data = {} if not hasattr(self, 'config_context_data'): # The annotation is not available, so we fall back to manually querying for the config context objects config_context_data = ConfigContext.objects.get_for_object(self, aggregate_data=True) else: # The attribute may exist, but the annotated value could be None if there is no config context data config_context_data = self.config_context_data or [] for context in config_context_data: data = deepmerge(data, context) # If the object has local config context data defined, merge it last if self.local_context_data: data = deepmerge(data, self.local_context_data) return data "," Compile all config data, overwriting lower-weight values with higher-weight values where a collision occurs. Return the rendered configuration context for a device or VM. ",24,85,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_config_context(self): data = {} if not hasattr(self, 'config_context_data'): # The annotation is not available, so we fall back to manually querying for the config context objects config_context_data = ConfigContext.objects.get_for_object(self, aggregate_data=True) else: # The attribute may exist, but the annotated value could be None if there is no config context data config_context_data = self.config_context_data or [] for context in config_context_data: data = deepmerge(data, context) # If the object has local config context data defined, merge it last if self.local_context_data: data = deepmerge(data, self.local_context_data) return data ``` ###Assistant : Compile all config data, overwriting lower-weight values with higher-weight values where a collision occurs. Return the rendered configuration context for a device or VM. " 641,"def test_displayname_is_set_avatar_is_none(self) -> None: channel = self.make_request( ""POST"", self.url, access_token=self.admin_user_tok, content={ ""user_id"": self.other_user, ""content"": {""msgtype"": ""m.text"", ""body"": ""test msg""}, }, ) self.assertEqual(200, channel.code, msg=channel.json_body) # user has one invite self._check_invite_and_join_status(self.other_user, 1, 0) "," Tests that sending a server notices is successfully, if a display_name is set, avatar_url is `None` and ""check avatar size and mime type"" is set. ",25,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_displayname_is_set_avatar_is_none(self) -> None: channel = self.make_request( ""POST"", self.url, access_token=self.admin_user_tok, content={ ""user_id"": self.other_user, ""content"": {""msgtype"": ""m.text"", ""body"": ""test msg""}, }, ) self.assertEqual(200, channel.code, msg=channel.json_body) # user has one invite self._check_invite_and_join_status(self.other_user, 1, 0) ``` ###Assistant : Tests that sending a server notices is successfully, if a display_name is set, avatar_url is `None` and ""check avatar size and mime type"" is set. " 642,"def cur_iter_done(self) -> bool: return all( self._get_result_time(result) >= self._cumul_r for result in self._live_trials.values() ) ","Checks if all iterations have completed. TODO(rliaw): also check that `t.iterations == self._r`",13,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cur_iter_done(self) -> bool: return all( self._get_result_time(result) >= self._cumul_r for result in self._live_trials.values() ) ``` ###Assistant : Checks if all iterations have completed. TODO(rliaw): also check that `t.iterations == self._r` " 643,"def get_attendance_years(): year_list = frappe.db.sql_list( ) if not year_list: year_list = [getdate().year] return ""\n"".join(str(year) for year in year_list) ",select distinct YEAR(date) from `tabStudent Attendance` ORDER BY YEAR(date) DESC,10,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_attendance_years(): year_list = frappe.db.sql_list( ) if not year_list: year_list = [getdate().year] return ""\n"".join(str(year) for year in year_list) ``` ###Assistant : select distinct YEAR(date) from `tabStudent Attendance` ORDER BY YEAR(date) DESC " 644,"async def test_discovery_notification(hass): mock_integration(hass, MockModule(""test"")) mock_entity_platform(hass, ""config_flow.test"", None) with patch.dict(config_entries.HANDLERS): ",Test that we create/dismiss a notification when source is discovery.,10,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_discovery_notification(hass): mock_integration(hass, MockModule(""test"")) mock_entity_platform(hass, ""config_flow.test"", None) with patch.dict(config_entries.HANDLERS): ``` ###Assistant : Test that we create/dismiss a notification when source is discovery. " 645,"def get_preference(self, identifier, resolutions, candidates, information): raise NotImplementedError ","Produce a sort key for given requirement based on preference. The preference is defined as ""I think this requirement should be resolved first"". The lower the return value is, the more preferred this group of arguments is. :param identifier: An identifier as returned by ``identify()``. This identifies the dependency matches of which should be returned. :param resolutions: Mapping of candidates currently pinned by the resolver. Each key is an identifier, and the value a candidate. The candidate may conflict with requirements from ``information``. :param candidates: Mapping of each dependency's possible candidates. Each value is an iterator of candidates. :param information: Mapping of requirement information of each package. Each value is an iterator of *requirement information*. A *requirement information* instance is a named tuple with two members: * ``requirement`` specifies a requirement contributing to the current list of candidates. * ``parent`` specifies the candidate that provides (dependend on) the requirement, or ``None`` to indicate a root requirement. The preference could depend on a various of issues, including (not necessarily in this order): * Is this package pinned in the current resolution result? * How relaxed is the requirement? Stricter ones should probably be worked on first? (I don't know, actually.) * How many possibilities are there to satisfy this requirement? Those with few left should likely be worked on first, I guess? * Are there any known conflicts for this requirement? We should probably work on those with the most known conflicts. A sortable value should be returned (this will be used as the ``key`` parameter of the built-in sorting function). The smaller the value is, the more preferred this requirement is (i.e. the sorting function is called with ``reverse=False``). ",279,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_preference(self, identifier, resolutions, candidates, information): raise NotImplementedError ``` ###Assistant : Produce a sort key for given requirement based on preference. The preference is defined as ""I think this requirement should be resolved first"". The lower the return value is, the more preferred this group of arguments is. :param identifier: An identifier as returned by ``identify()``. This identifies the dependency matches of which should be returned. :param resolutions: Mapping of candidates currently pinned by the resolver. Each key is an identifier, and the value a candidate. The candidate may conflict with requirements from ``information``. :param candidates: Mapping of each dependency's possible candidates. Each value is an iterator of candidates. :param information: Mapping of requirement information of each package. Each value is an iterator of *requirement information*. A *requirement information* instance is a named tuple with two members: * ``requirement`` specifies a requirement contributing to the current list of candidates. * ``parent`` specifies the candidate that provides (dependend on) the requirement, or ``None`` to indicate a root requirement. The preference could depend on a various of issues, including (not necessarily in this order): * Is this package pinned in the current resolution result? * How relaxed is the requirement? Stricter ones should probably be worked on first? (I don't know, actually.) * How many possibilities are there to satisfy this requirement? Those with few left should likely be worked on first, I guess? * Are there any known conflicts for this requirement? We should probably work on those with the most known conflicts. A sortable value should be returned (this will be used as the ``key`` parameter of the built-in sorting function). The smaller the value is, the more preferred this requirement is (i.e. the sorting function is called with ``reverse=False``). " 646,"def prepare_metadata(self) -> None: assert self.source_dir details = self.name or f""from {self.link}"" if self.use_pep517: assert self.pep517_backend is not None if ( self.editable and self.permit_editable_wheels and self.supports_pyproject_editable() ): self.metadata_directory = generate_editable_metadata( build_env=self.build_env, backend=self.pep517_backend, details=details, ) else: self.metadata_directory = generate_metadata( build_env=self.build_env, backend=self.pep517_backend, details=details, ) else: self.metadata_directory = generate_metadata_legacy( build_env=self.build_env, setup_py_path=self.setup_py_path, source_dir=self.unpacked_source_directory, isolated=self.isolated, details=details, ) # Act on the newly generated metadata, based on the name and version. if not self.name: self._set_requirement() else: self.warn_on_mismatching_name() self.assert_source_matches_version() ","Ensure that project metadata is available. Under PEP 517 and PEP 660, call the backend hook to prepare the metadata. Under legacy processing, call setup.py egg-info. ",26,72,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def prepare_metadata(self) -> None: assert self.source_dir details = self.name or f""from {self.link}"" if self.use_pep517: assert self.pep517_backend is not None if ( self.editable and self.permit_editable_wheels and self.supports_pyproject_editable() ): self.metadata_directory = generate_editable_metadata( build_env=self.build_env, backend=self.pep517_backend, details=details, ) else: self.metadata_directory = generate_metadata( build_env=self.build_env, backend=self.pep517_backend, details=details, ) else: self.metadata_directory = generate_metadata_legacy( build_env=self.build_env, setup_py_path=self.setup_py_path, source_dir=self.unpacked_source_directory, isolated=self.isolated, details=details, ) # Act on the newly generated metadata, based on the name and version. if not self.name: self._set_requirement() else: self.warn_on_mismatching_name() self.assert_source_matches_version() ``` ###Assistant : Ensure that project metadata is available. Under PEP 517 and PEP 660, call the backend hook to prepare the metadata. Under legacy processing, call setup.py egg-info. " 647,"def get_image_filename(self, image, filterspec): name, ext = os.path.splitext(os.path.basename(image.file.name)) return ""{}images/{}.{}{}"".format(settings.MEDIA_URL, name, filterspec, ext) "," Get the generated filename for a resized image ",8,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_image_filename(self, image, filterspec): name, ext = os.path.splitext(os.path.basename(image.file.name)) return ""{}images/{}.{}{}"".format(settings.MEDIA_URL, name, filterspec, ext) ``` ###Assistant : Get the generated filename for a resized image " 648,"def min(self, other, context=None): other = _convert_other(other, raiseit=True) if context is None: context = getcontext() if self._is_special or other._is_special: # If one operand is a quiet NaN and the other is number, then the # number is always returned sn = self._isnan() on = other._isnan() if sn or on: if on == 1 and sn == 0: return self._fix(context) if sn == 1 and on == 0: return other._fix(context) return self._check_nans(other, context) c = self._cmp(other) if c == 0: c = self.compare_total(other) if c == -1: ans = self else: ans = other return ans._fix(context) ","Returns the smaller value. Like min(self, other) except if one is not a number, returns NaN (and signals if one is sNaN). Also rounds. ",24,95,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def min(self, other, context=None): other = _convert_other(other, raiseit=True) if context is None: context = getcontext() if self._is_special or other._is_special: # If one operand is a quiet NaN and the other is number, then the # number is always returned sn = self._isnan() on = other._isnan() if sn or on: if on == 1 and sn == 0: return self._fix(context) if sn == 1 and on == 0: return other._fix(context) return self._check_nans(other, context) c = self._cmp(other) if c == 0: c = self.compare_total(other) if c == -1: ans = self else: ans = other return ans._fix(context) ``` ###Assistant : Returns the smaller value. Like min(self, other) except if one is not a number, returns NaN (and signals if one is sNaN). Also rounds. " 649,"def test_simplelistfilter_with_none_returning_lookups(self): modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site) request = self.request_factory.get(""/"", {}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) filterspec = changelist.get_filters(request)[0] self.assertEqual(len(filterspec), 0) "," A SimpleListFilter lookups method can return None but disables the filter completely. ",12,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_simplelistfilter_with_none_returning_lookups(self): modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site) request = self.request_factory.get(""/"", {}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) filterspec = changelist.get_filters(request)[0] self.assertEqual(len(filterspec), 0) ``` ###Assistant : A SimpleListFilter lookups method can return None but disables the filter completely. " 650,"def precompute_fill_value(dataset_cols, feature, preprocessing_parameters, backend): missing_value_strategy = preprocessing_parameters[""missing_value_strategy""] if missing_value_strategy == FILL_WITH_CONST: return preprocessing_parameters[""fill_value""] elif missing_value_strategy == FILL_WITH_MODE: return dataset_cols[feature[COLUMN]].value_counts().index[0] elif missing_value_strategy == FILL_WITH_MEAN: if feature[TYPE] != NUMBER: raise ValueError( f""Filling missing values with mean is supported "" f""only for number types, not for type {feature[TYPE]}."", ) return backend.df_engine.compute(dataset_cols[feature[COLUMN]].mean()) elif missing_value_strategy == FILL_WITH_FALSE: distinct_values = backend.df_engine.compute( dataset_cols[feature[COLUMN]].drop_duplicates().dropna() ).values.tolist() if len(distinct_values) > 2: raise ValueError( f""Missing value strategy `fill_with_false` "" f""for column {feature[COLUMN]} expects 2 distinct values, "" f""found: {len(distinct_values)} (ex: {distinct_values[:10]})"" ) # Determine the False label. # Distinct values are sorted in reverse to mirror the selection of the default fallback_true_label (in # binary_feature.get_feature_meta) for binary columns with unconventional boolean values, ""human""/""bot"". for v in sorted(distinct_values, reverse=True): fallback_true_label = preprocessing_parameters.get(""fallback_true_label"", ""true"") if strings_utils.str2bool(v, fallback_true_label) is False: return v raise ValueError( f""Unable to determine False value for column {feature[COLUMN]} with distinct values: {distinct_values}."" ) # Otherwise, we cannot precompute the fill value for this dataset return None ","Precomputes the fill value for a feature. NOTE: this is called before NaNs are removed from the dataset. Modifications here must handle NaNs gracefully. ",24,157,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def precompute_fill_value(dataset_cols, feature, preprocessing_parameters, backend): missing_value_strategy = preprocessing_parameters[""missing_value_strategy""] if missing_value_strategy == FILL_WITH_CONST: return preprocessing_parameters[""fill_value""] elif missing_value_strategy == FILL_WITH_MODE: return dataset_cols[feature[COLUMN]].value_counts().index[0] elif missing_value_strategy == FILL_WITH_MEAN: if feature[TYPE] != NUMBER: raise ValueError( f""Filling missing values with mean is supported "" f""only for number types, not for type {feature[TYPE]}."", ) return backend.df_engine.compute(dataset_cols[feature[COLUMN]].mean()) elif missing_value_strategy == FILL_WITH_FALSE: distinct_values = backend.df_engine.compute( dataset_cols[feature[COLUMN]].drop_duplicates().dropna() ).values.tolist() if len(distinct_values) > 2: raise ValueError( f""Missing value strategy `fill_with_false` "" f""for column {feature[COLUMN]} expects 2 distinct values, "" f""found: {len(distinct_values)} (ex: {distinct_values[:10]})"" ) # Determine the False label. # Distinct values are sorted in reverse to mirror the selection of the default fallback_true_label (in # binary_feature.get_feature_meta) for binary columns with unconventional boolean values, ""human""/""bot"". for v in sorted(distinct_values, reverse=True): fallback_true_label = preprocessing_parameters.get(""fallback_true_label"", ""true"") if strings_utils.str2bool(v, fallback_true_label) is False: return v raise ValueError( f""Unable to determine False value for column {feature[COLUMN]} with distinct values: {distinct_values}."" ) # Otherwise, we cannot precompute the fill value for this dataset return None ``` ###Assistant : Precomputes the fill value for a feature. NOTE: this is called before NaNs are removed from the dataset. Modifications here must handle NaNs gracefully. " 651,"def loss_labels(self, outputs, targets, indices, num_boxes, log=True): if ""logits"" not in outputs: raise ValueError(""No logits were found in the outputs"") source_logits = outputs[""logits""] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t[""class_labels""][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros( [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device, ) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:, :, :-1] loss_ce = ( sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1] ) losses = {""loss_ce"": loss_ce} return losses ","Classification loss (NLL) targets dicts must contain the key ""labels"" containing a tensor of dim [nb_target_boxes] ",16,84,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def loss_labels(self, outputs, targets, indices, num_boxes, log=True): if ""logits"" not in outputs: raise ValueError(""No logits were found in the outputs"") source_logits = outputs[""logits""] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t[""class_labels""][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros( [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device, ) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:, :, :-1] loss_ce = ( sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1] ) losses = {""loss_ce"": loss_ce} return losses ``` ###Assistant : Classification loss (NLL) targets dicts must contain the key ""labels"" containing a tensor of dim [nb_target_boxes] " 652,"def test_stacking_classifier_multilabel_predict_proba(estimator): X_train, X_test, y_train, y_test = train_test_split( X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42 ) n_outputs = 3 estimators = [(""est"", estimator)] stacker = StackingClassifier( estimators=estimators, final_estimator=KNeighborsClassifier(), stack_method=""predict_proba"", ).fit(X_train, y_train) X_trans = stacker.transform(X_test) assert X_trans.shape == (X_test.shape[0], n_outputs) # we should not have any collinear classes and thus nothing should sum to 1 assert not any(np.isclose(X_trans.sum(axis=1), 1.0)) y_pred = stacker.predict(X_test) assert y_pred.shape == y_test.shape ","Check the behaviour for the multilabel classification case and the `predict_proba` stacking method. Estimators are not consistent with the output arrays and we need to ensure that we handle all cases. ",31,62,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_stacking_classifier_multilabel_predict_proba(estimator): X_train, X_test, y_train, y_test = train_test_split( X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42 ) n_outputs = 3 estimators = [(""est"", estimator)] stacker = StackingClassifier( estimators=estimators, final_estimator=KNeighborsClassifier(), stack_method=""predict_proba"", ).fit(X_train, y_train) X_trans = stacker.transform(X_test) assert X_trans.shape == (X_test.shape[0], n_outputs) # we should not have any collinear classes and thus nothing should sum to 1 assert not any(np.isclose(X_trans.sum(axis=1), 1.0)) y_pred = stacker.predict(X_test) assert y_pred.shape == y_test.shape ``` ###Assistant : Check the behaviour for the multilabel classification case and the `predict_proba` stacking method. Estimators are not consistent with the output arrays and we need to ensure that we handle all cases. " 653,"def get_page(self, url): # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api scheme, netloc, path, _, _, _ = urlparse(url) if scheme == 'file' and os.path.isdir(url2pathname(path)): url = urljoin(ensure_slash(url), 'index.html') if url in self._page_cache: result = self._page_cache[url] logger.debug('Returning %s from cache: %s', url, result) else: host = netloc.split(':', 1)[0] result = None if host in self._bad_hosts: logger.debug('Skipping %s due to bad host %s', url, host) else: req = Request(url, headers={'Accept-encoding': 'identity'}) try: logger.debug('Fetching %s', url) resp = self.opener.open(req, timeout=self.timeout) logger.debug('Fetched %s', url) headers = resp.info() content_type = headers.get('Content-Type', '') if HTML_CONTENT_TYPE.match(content_type): final_url = resp.geturl() data = resp.read() encoding = headers.get('Content-Encoding') if encoding: decoder = self.decoders[encoding] # fail if not found data = decoder(data) encoding = 'utf-8' m = CHARSET.search(content_type) if m: encoding = m.group(1) try: data = data.decode(encoding) except UnicodeError: # pragma: no cover data = data.decode('latin-1') # fallback result = Page(data, final_url) self._page_cache[final_url] = result except HTTPError as e: if e.code != 404: logger.exception('Fetch failed: %s: %s', url, e) except URLError as e: # pragma: no cover logger.exception('Fetch failed: %s: %s', url, e) with self._lock: self._bad_hosts.add(host) except Exception as e: # pragma: no cover logger.exception('Fetch failed: %s: %s', url, e) finally: self._page_cache[url] = result # even if None (failure) return result _distname_re = re.compile(']*>([^<]+)<') "," Get the HTML for an URL, possibly from an in-memory cache. XXX TODO Note: this cache is never actually cleared. It's assumed that the data won't get stale over the lifetime of a locator instance (not necessarily true for the default_locator). ",41,199,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_page(self, url): # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api scheme, netloc, path, _, _, _ = urlparse(url) if scheme == 'file' and os.path.isdir(url2pathname(path)): url = urljoin(ensure_slash(url), 'index.html') if url in self._page_cache: result = self._page_cache[url] logger.debug('Returning %s from cache: %s', url, result) else: host = netloc.split(':', 1)[0] result = None if host in self._bad_hosts: logger.debug('Skipping %s due to bad host %s', url, host) else: req = Request(url, headers={'Accept-encoding': 'identity'}) try: logger.debug('Fetching %s', url) resp = self.opener.open(req, timeout=self.timeout) logger.debug('Fetched %s', url) headers = resp.info() content_type = headers.get('Content-Type', '') if HTML_CONTENT_TYPE.match(content_type): final_url = resp.geturl() data = resp.read() encoding = headers.get('Content-Encoding') if encoding: decoder = self.decoders[encoding] # fail if not found data = decoder(data) encoding = 'utf-8' m = CHARSET.search(content_type) if m: encoding = m.group(1) try: data = data.decode(encoding) except UnicodeError: # pragma: no cover data = data.decode('latin-1') # fallback result = Page(data, final_url) self._page_cache[final_url] = result except HTTPError as e: if e.code != 404: logger.exception('Fetch failed: %s: %s', url, e) except URLError as e: # pragma: no cover logger.exception('Fetch failed: %s: %s', url, e) with self._lock: self._bad_hosts.add(host) except Exception as e: # pragma: no cover logger.exception('Fetch failed: %s: %s', url, e) finally: self._page_cache[url] = result # even if None (failure) return result _distname_re = re.compile(']*>([^<]+)<') ``` ###Assistant : Get the HTML for an URL, possibly from an in-memory cache. XXX TODO Note: this cache is never actually cleared. It's assumed that the data won't get stale over the lifetime of a locator instance (not necessarily true for the default_locator). " 654,"def enable_all_warnings() -> None: __diag__.enable_all_warnings() # hide abstract class del __config_flags "," Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`). ",8,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def enable_all_warnings() -> None: __diag__.enable_all_warnings() # hide abstract class del __config_flags ``` ###Assistant : Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`). " 655,"def export_yaml(self): yaml_data = [obj.to_yaml() for obj in self.queryset] return '---\n'.join(yaml_data) "," Export the queryset of objects as concatenated YAML documents. ",9,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def export_yaml(self): yaml_data = [obj.to_yaml() for obj in self.queryset] return '---\n'.join(yaml_data) ``` ###Assistant : Export the queryset of objects as concatenated YAML documents. " 656,"def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False): if url is not None: url = url.strip() if not url: return False if allowed_hosts is None: allowed_hosts = set() elif isinstance(allowed_hosts, str): allowed_hosts = {allowed_hosts} # Chrome treats \ completely as / in paths but it could be part of some # basic auth credentials so we need to check both URLs. return _url_has_allowed_host_and_scheme( url, allowed_hosts, require_https=require_https ) and _url_has_allowed_host_and_scheme( url.replace(""\\"", ""/""), allowed_hosts, require_https=require_https ) # Copied from urllib.parse.urlparse() but uses fixed urlsplit() function."," Return ``True`` if the url uses an allowed host and a safe scheme. Always return ``False`` on an empty url. If ``require_https`` is ``True``, only 'https' will be considered a valid scheme, as opposed to 'http' and 'https' with the default, ``False``. Note: ""True"" doesn't entail that a URL is ""safe"". It may still be e.g. quoted incorrectly. Ensure to also use django.utils.encoding.iri_to_uri() on the path component of untrusted URLs. ",70,79,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False): if url is not None: url = url.strip() if not url: return False if allowed_hosts is None: allowed_hosts = set() elif isinstance(allowed_hosts, str): allowed_hosts = {allowed_hosts} # Chrome treats \ completely as / in paths but it could be part of some # basic auth credentials so we need to check both URLs. return _url_has_allowed_host_and_scheme( url, allowed_hosts, require_https=require_https ) and _url_has_allowed_host_and_scheme( url.replace(""\\"", ""/""), allowed_hosts, require_https=require_https ) # Copied from urllib.parse.urlparse() but uses fixed urlsplit() function. ``` ###Assistant : Return ``True`` if the url uses an allowed host and a safe scheme. Always return ``False`` on an empty url. If ``require_https`` is ``True``, only 'https' will be considered a valid scheme, as opposed to 'http' and 'https' with the default, ``False``. Note: ""True"" doesn't entail that a URL is ""safe"". It may still be e.g. quoted incorrectly. Ensure to also use django.utils.encoding.iri_to_uri() on the path component of untrusted URLs. " 657,"def get_ps(module, pattern): found = False if platform.system() == 'SunOS': flags = '-ef' else: flags = 'auxww' psbin = module.get_bin_path('ps', True) (rc, psout, pserr) = module.run_command([psbin, flags]) if rc == 0: for line in psout.splitlines(): if pattern in line: # FIXME: should add logic to prevent matching 'self', though that should be extremely rare found = True break return found "," Last resort to find a service by trying to match pattern to programs in memory ",15,60,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_ps(module, pattern): found = False if platform.system() == 'SunOS': flags = '-ef' else: flags = 'auxww' psbin = module.get_bin_path('ps', True) (rc, psout, pserr) = module.run_command([psbin, flags]) if rc == 0: for line in psout.splitlines(): if pattern in line: # FIXME: should add logic to prevent matching 'self', though that should be extremely rare found = True break return found ``` ###Assistant : Last resort to find a service by trying to match pattern to programs in memory " 658,"def apply(self, sample, context=None): im = sample['image'] im = im.astype(np.float32, copy=False) if self.is_scale: scale = 1.0 / 255.0 im *= scale if self.norm_type == 'mean_std': mean = np.array(self.mean)[np.newaxis, np.newaxis, :] std = np.array(self.std)[np.newaxis, np.newaxis, :] im -= mean im /= std sample['image'] = im return sample @register_op","Normalize the image. Operators: 1.(optional) Scale the pixel to [0,1] 2.(optional) Each pixel minus mean and is divided by std ",20,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def apply(self, sample, context=None): im = sample['image'] im = im.astype(np.float32, copy=False) if self.is_scale: scale = 1.0 / 255.0 im *= scale if self.norm_type == 'mean_std': mean = np.array(self.mean)[np.newaxis, np.newaxis, :] std = np.array(self.std)[np.newaxis, np.newaxis, :] im -= mean im /= std sample['image'] = im return sample @register_op ``` ###Assistant : Normalize the image. Operators: 1.(optional) Scale the pixel to [0,1] 2.(optional) Each pixel minus mean and is divided by std " 659,"def _check_m2m_through_same_relationship(cls): errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = ( f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields, ) if signature in seen_intermediary_signatures: errors.append( checks.Error( ""The model has two identical many-to-many relations "" ""through the intermediate model '%s'."" % f.remote_field.through._meta.label, obj=cls, id=""models.E003"", ) ) else: seen_intermediary_signatures.append(signature) return errors ",Check if no relationship model is used by more than one m2m field.,13,88,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_m2m_through_same_relationship(cls): errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = ( f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields, ) if signature in seen_intermediary_signatures: errors.append( checks.Error( ""The model has two identical many-to-many relations "" ""through the intermediate model '%s'."" % f.remote_field.through._meta.label, obj=cls, id=""models.E003"", ) ) else: seen_intermediary_signatures.append(signature) return errors ``` ###Assistant : Check if no relationship model is used by more than one m2m field. " 660,"def image(self) -> ""np.ndarray"": assert self._image is not None return self._image ", :class:`numpy.ndarray`: The source frame for this object. ,7,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def image(self) -> ""np.ndarray"": assert self._image is not None return self._image ``` ###Assistant : :class:`numpy.ndarray`: The source frame for this object. " 661,"def test_sends_deployment_notification(self, record_analytics): release = self.create_release() version_parsed = self.version_parsed = parse_release(release.version)[""description""] url = f""/api/0/organizations/{self.organization.slug}/releases/{release.version}/deploys/"" with self.tasks(): response = self.client.post( url, format=""json"", data={""environment"": self.environment.name} ) assert response.status_code == 201, response.content msg = mail.outbox[0] # check the txt version assert f""Version {version_parsed} was deployed to {self.environment.name} on"" in msg.body # check the html version assert ( f""Version {version_parsed} was deployed to {self.environment.name}\n \n"" in msg.alternatives[0][0] ) attachment, text = get_attachment() assert ( text == f""Release {version_parsed} was deployed to {self.environment.name} for this project"" ) assert ( attachment[""actions""][0][""url""] == f""http://testserver/organizations/{self.organization.slug}/releases/{release.version}/?project={self.project.id}&unselectedSeries=Healthy/"" ) assert ( attachment[""footer""] == f""{self.project.slug} | "" ) assert analytics_called_with_args( record_analytics, ""integrations.email.notification_sent"", user_id=self.user.id, actor_id=self.user.actor_id, organization_id=self.organization.id, ) assert analytics_called_with_args( record_analytics, ""integrations.slack.notification_sent"", user_id=self.user.id, actor_id=self.user.actor_id, organization_id=self.organization.id, ) "," Test that an email AND Slack notification are sent with the expected values when a release is deployed. ",18,113,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_sends_deployment_notification(self, record_analytics): release = self.create_release() version_parsed = self.version_parsed = parse_release(release.version)[""description""] url = f""/api/0/organizations/{self.organization.slug}/releases/{release.version}/deploys/"" with self.tasks(): response = self.client.post( url, format=""json"", data={""environment"": self.environment.name} ) assert response.status_code == 201, response.content msg = mail.outbox[0] # check the txt version assert f""Version {version_parsed} was deployed to {self.environment.name} on"" in msg.body # check the html version assert ( f""Version {version_parsed} was deployed to {self.environment.name}\n \n"" in msg.alternatives[0][0] ) attachment, text = get_attachment() assert ( text == f""Release {version_parsed} was deployed to {self.environment.name} for this project"" ) assert ( attachment[""actions""][0][""url""] == f""http://testserver/organizations/{self.organization.slug}/releases/{release.version}/?project={self.project.id}&unselectedSeries=Healthy/"" ) assert ( attachment[""footer""] == f""{self.project.slug} | "" ) assert analytics_called_with_args( record_analytics, ""integrations.email.notification_sent"", user_id=self.user.id, actor_id=self.user.actor_id, organization_id=self.organization.id, ) assert analytics_called_with_args( record_analytics, ""integrations.slack.notification_sent"", user_id=self.user.id, actor_id=self.user.actor_id, organization_id=self.organization.id, ) ``` ###Assistant : Test that an email AND Slack notification are sent with the expected values when a release is deployed. " 662,"def get_all_mode_of_payments(doc): return frappe.db.sql( , {""company"": doc.company}, as_dict=1, ) "," select mpa.default_account, mpa.parent, mp.type as type from `tabMode of Payment Account` mpa,`tabMode of Payment` mp where mpa.parent = mp.name and mpa.company = %(company)s and mp.enabled = 1",27,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_all_mode_of_payments(doc): return frappe.db.sql( , {""company"": doc.company}, as_dict=1, ) ``` ###Assistant : select mpa.default_account, mpa.parent, mp.type as type from `tabMode of Payment Account` mpa,`tabMode of Payment` mp where mpa.parent = mp.name and mpa.company = %(company)s and mp.enabled = 1 " 663,"def _on_move(self, event): if not self.button_pressed: return if self.get_navigate_mode() is not None: # we don't want to rotate if we are zooming/panning # from the toolbar return if self.M is None: return x, y = event.xdata, event.ydata # In case the mouse is out of bounds. if x is None or event.inaxes != self: return dx, dy = x - self._sx, y - self._sy w = self._pseudo_w h = self._pseudo_h # Rotation if self.button_pressed in self._rotate_btn: # rotate viewing point # get the x and y pixel coords if dx == 0 and dy == 0: return roll = np.deg2rad(self.roll) delev = -(dy/h)*180*np.cos(roll) + (dx/w)*180*np.sin(roll) dazim = -(dy/h)*180*np.sin(roll) - (dx/w)*180*np.cos(roll) self.elev = self.elev + delev self.azim = self.azim + dazim self.stale = True elif self.button_pressed in self._pan_btn: # Start the pan event with pixel coordinates px, py = self.transData.transform([self._sx, self._sy]) self.start_pan(px, py, 2) # pan view (takes pixel coordinate input) self.drag_pan(2, None, event.x, event.y) self.end_pan() # Zoom elif self.button_pressed in self._zoom_btn: # zoom view (dragging down zooms in) scale = h/(h - dy) self._scale_axis_limits(scale, scale, scale) # Store the event coordinates for the next time through. self._sx, self._sy = x, y # Always request a draw update at the end of interaction self.figure.canvas.draw_idle() "," Mouse moving. By default, button-1 rotates, button-2 pans, and button-3 zooms; these buttons can be modified via `mouse_init`. ",18,203,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _on_move(self, event): if not self.button_pressed: return if self.get_navigate_mode() is not None: # we don't want to rotate if we are zooming/panning # from the toolbar return if self.M is None: return x, y = event.xdata, event.ydata # In case the mouse is out of bounds. if x is None or event.inaxes != self: return dx, dy = x - self._sx, y - self._sy w = self._pseudo_w h = self._pseudo_h # Rotation if self.button_pressed in self._rotate_btn: # rotate viewing point # get the x and y pixel coords if dx == 0 and dy == 0: return roll = np.deg2rad(self.roll) delev = -(dy/h)*180*np.cos(roll) + (dx/w)*180*np.sin(roll) dazim = -(dy/h)*180*np.sin(roll) - (dx/w)*180*np.cos(roll) self.elev = self.elev + delev self.azim = self.azim + dazim self.stale = True elif self.button_pressed in self._pan_btn: # Start the pan event with pixel coordinates px, py = self.transData.transform([self._sx, self._sy]) self.start_pan(px, py, 2) # pan view (takes pixel coordinate input) self.drag_pan(2, None, event.x, event.y) self.end_pan() # Zoom elif self.button_pressed in self._zoom_btn: # zoom view (dragging down zooms in) scale = h/(h - dy) self._scale_axis_limits(scale, scale, scale) # Store the event coordinates for the next time through. self._sx, self._sy = x, y # Always request a draw update at the end of interaction self.figure.canvas.draw_idle() ``` ###Assistant : Mouse moving. By default, button-1 rotates, button-2 pans, and button-3 zooms; these buttons can be modified via `mouse_init`. " 664,"def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs): targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug(""Removing these fileset(s)/rpm package(s) %s: %s"", name, targets) # Get a list of the currently installed pkgs. old = list_pkgs() # Install the fileset (normally ends with bff or rte) or rpm package(s) errors = [] for target in targets: filename = os.path.basename(target) if filename.endswith("".bff"") or filename.endswith("".rte""): if _is_installed(target): continue cmd = ""/usr/sbin/installp -acYXg"" if test: cmd += ""p"" cmd += "" -d "" dirpath = os.path.dirname(target) cmd += dirpath + "" "" + filename out = __salt__[""cmd.run_all""](cmd, python_shell=False) else: if _is_installed_rpm(filename.split("".aix"")[0]): continue # assume use dnf or yum cmdflags = "" install --allowerasing "" if pathlib.Path(""/opt/freeware/bin/dnf"").is_file(): cmdexe = ""/opt/freeware/bin/dnf"" if test: cmdflags += "" --assumeno"" else: cmdflags += "" --assumeyes"" if refresh: cmdflags += "" --refresh"" elif pathlib.Path(""/opt/freeware/bin/yum"").is_file(): cmdexe = ""/opt/freeware/bin/yum"" if test: cmdflags += "" --assumeno"" else: cmdflags += "" --assumeyes"" if refresh: cmdflags += "" --refresh"" elif pathlib.Path(""/usr/bin/yum"").is_file(): cmdexe = ""/usr/bin/yum"" if test: cmdflags += "" --assumeno"" else: cmdflags += "" --assumeyes"" else: cmdexe = ""/usr/bin/rpm"" cmdflags = "" -Uivh "" if test: cmdflags += "" --test"" cmd = [cmdexe, cmdflags, target] out = __salt__[""cmd.run_all""](cmd, python_shell=False) if 0 != out[""retcode""]: errors.append(out[""stderr""]) # Get a list of the packages after the uninstall __context__.pop(""pkg.list_pkgs"", None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( ""Problems encountered installing filesets(s)/package(s)"", info={""changes"": ret, ""errors"": errors}, ) # No error occurred if test: return ""Test succeeded."" return ret "," Install the named fileset(s)/rpm package(s). .. versionadded:: 3005 preference to install rpm packages are to use in the following order: /opt/freeware/bin/dnf /opt/freeware/bin/yum /usr/bin/yum /usr/bin/rpm Note: use of rpm to install implies that rpm's dependencies must have been previously installed. dnf and yum automatically install rpm's dependencies as part of the install process name The name of the fileset or rpm package to be installed. refresh Whether or not to update the yum database before executing. Multiple Package Installation Options: pkgs A list of filesets and/or rpm packages to install. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. version Install a specific version of a fileset/rpm package. (Unused at present). test Verify that command functions correctly: Returns a dict containing the new fileset(s)/rpm package(s) names and versions: {'': {'old': '', 'new': ''}} CLI Example: .. code-block:: bash salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm refresh=True salt '*' pkg.install /stage/middleware/AIX/VIOS2211_update/tpc_4.1.1.85.bff salt '*' pkg.install /stage/middleware/AIX/Xlc/usr/sys/inst.images/xlC.rte salt '*' pkg.install /stage/middleware/AIX/Firefox/ppc-AIX53/Firefox.base salt '*' pkg.install pkgs='[""foo"", ""bar""]' ",172,248,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs): targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug(""Removing these fileset(s)/rpm package(s) %s: %s"", name, targets) # Get a list of the currently installed pkgs. old = list_pkgs() # Install the fileset (normally ends with bff or rte) or rpm package(s) errors = [] for target in targets: filename = os.path.basename(target) if filename.endswith("".bff"") or filename.endswith("".rte""): if _is_installed(target): continue cmd = ""/usr/sbin/installp -acYXg"" if test: cmd += ""p"" cmd += "" -d "" dirpath = os.path.dirname(target) cmd += dirpath + "" "" + filename out = __salt__[""cmd.run_all""](cmd, python_shell=False) else: if _is_installed_rpm(filename.split("".aix"")[0]): continue # assume use dnf or yum cmdflags = "" install --allowerasing "" if pathlib.Path(""/opt/freeware/bin/dnf"").is_file(): cmdexe = ""/opt/freeware/bin/dnf"" if test: cmdflags += "" --assumeno"" else: cmdflags += "" --assumeyes"" if refresh: cmdflags += "" --refresh"" elif pathlib.Path(""/opt/freeware/bin/yum"").is_file(): cmdexe = ""/opt/freeware/bin/yum"" if test: cmdflags += "" --assumeno"" else: cmdflags += "" --assumeyes"" if refresh: cmdflags += "" --refresh"" elif pathlib.Path(""/usr/bin/yum"").is_file(): cmdexe = ""/usr/bin/yum"" if test: cmdflags += "" --assumeno"" else: cmdflags += "" --assumeyes"" else: cmdexe = ""/usr/bin/rpm"" cmdflags = "" -Uivh "" if test: cmdflags += "" --test"" cmd = [cmdexe, cmdflags, target] out = __salt__[""cmd.run_all""](cmd, python_shell=False) if 0 != out[""retcode""]: errors.append(out[""stderr""]) # Get a list of the packages after the uninstall __context__.pop(""pkg.list_pkgs"", None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( ""Problems encountered installing filesets(s)/package(s)"", info={""changes"": ret, ""errors"": errors}, ) # No error occurred if test: return ""Test succeeded."" return ret ``` ###Assistant : Install the named fileset(s)/rpm package(s). .. versionadded:: 3005 preference to install rpm packages are to use in the following order: /opt/freeware/bin/dnf /opt/freeware/bin/yum /usr/bin/yum /usr/bin/rpm Note: use of rpm to install implies that rpm's dependencies must have been previously installed. dnf and yum automatically install rpm's dependencies as part of the install process name The name of the fileset or rpm package to be installed. refresh Whether or not to update the yum database before executing. Multiple Package Installation Options: pkgs A list of filesets and/or rpm packages to install. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. version Install a specific version of a fileset/rpm package. (Unused at present). test Verify that command functions correctly: Returns a dict containing the new fileset(s)/rpm package(s) names and versions: {'': {'old': '', 'new': ''}} CLI Example: .. code-block:: bash salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm refresh=True salt '*' pkg.install /stage/middleware/AIX/VIOS2211_update/tpc_4.1.1.85.bff salt '*' pkg.install /stage/middleware/AIX/Xlc/usr/sys/inst.images/xlC.rte salt '*' pkg.install /stage/middleware/AIX/Firefox/ppc-AIX53/Firefox.base salt '*' pkg.install pkgs='[""foo"", ""bar""]' " 665,"def is_false(self, ds_key_long): value = self.get_value(ds_key_long) return False if value is None else not bool(value) "," Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very specific question of whether the value is set to `False` (and it's not set to `True`` or isn't set). ",38,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_false(self, ds_key_long): value = self.get_value(ds_key_long) return False if value is None else not bool(value) ``` ###Assistant : Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very specific question of whether the value is set to `False` (and it's not set to `True`` or isn't set). " 666,"def extract_pytorch_structures(): for opt in lmo.optimizer_registry: # Get the torch class: optimizer_class = lmo.optimizer_registry[opt][0] # Parse and clean the class structure: path = get_fully_qualified_class_name(optimizer_class) opt_struct = get_pytkdocs_structure_for_path(path, ""google"")[""objects""][0] prune_pytorch_structures(opt_struct) # Write it to a file: parent_dir = str(Path(__file__).parent.parent) filename = os.path.join(parent_dir, ""ludwig/validation/generated/torch/"", optimizer_class.__name__) + "".json"" os.makedirs(os.path.dirname(filename), exist_ok=True) with open(filename, ""w"") as outfile: json.dump( opt_struct, outfile, indent=4, sort_keys=True, separators=("","", "": ""), ) outfile.write(""\n"") ","Extracts and saves the parsed structure of all pytorch classes referenced in `ludwig.modules.optimization_modules.optimizer_registry` as JSON files under `ludwig/validation/generated/torch/`.",18,62,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def extract_pytorch_structures(): for opt in lmo.optimizer_registry: # Get the torch class: optimizer_class = lmo.optimizer_registry[opt][0] # Parse and clean the class structure: path = get_fully_qualified_class_name(optimizer_class) opt_struct = get_pytkdocs_structure_for_path(path, ""google"")[""objects""][0] prune_pytorch_structures(opt_struct) # Write it to a file: parent_dir = str(Path(__file__).parent.parent) filename = os.path.join(parent_dir, ""ludwig/validation/generated/torch/"", optimizer_class.__name__) + "".json"" os.makedirs(os.path.dirname(filename), exist_ok=True) with open(filename, ""w"") as outfile: json.dump( opt_struct, outfile, indent=4, sort_keys=True, separators=("","", "": ""), ) outfile.write(""\n"") ``` ###Assistant : Extracts and saves the parsed structure of all pytorch classes referenced in `ludwig.modules.optimization_modules.optimizer_registry` as JSON files under `ludwig/validation/generated/torch/`. " 667,"def test_hf_classification_bin(self, mock_handler): # create predictor create_sql = model_name = 'spam_classifier' predict_sql = self.hf_test_run(mock_handler, model_name, create_sql, predict_sql) "," CREATE PREDICTOR huggingface.spam_classifier predict PRED USING task='text-classification', model_name= ""mrm8488/bert-tiny-finetuned-sms-spam-detection"", input_column = 'text_spammy', labels=['ham','spam'] SELECT h.* FROM pg.df as t JOIN huggingface.spam_classifier as h ",23,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_hf_classification_bin(self, mock_handler): # create predictor create_sql = model_name = 'spam_classifier' predict_sql = self.hf_test_run(mock_handler, model_name, create_sql, predict_sql) ``` ###Assistant : CREATE PREDICTOR huggingface.spam_classifier predict PRED USING task='text-classification', model_name= ""mrm8488/bert-tiny-finetuned-sms-spam-detection"", input_column = 'text_spammy', labels=['ham','spam'] SELECT h.* FROM pg.df as t JOIN huggingface.spam_classifier as h " 668,"def match_submerged_margins(layoutgrids, fig): for sfig in fig.subfigs: match_submerged_margins(layoutgrids, sfig) axs = [a for a in fig.get_axes() if a.get_subplotspec() is not None and a.get_in_layout()] for ax1 in axs: ss1 = ax1.get_subplotspec() if ss1.get_gridspec() not in layoutgrids: axs.remove(ax1) continue lg1 = layoutgrids[ss1.get_gridspec()] # interior columns: if len(ss1.colspan) > 1: maxsubl = np.max( lg1.margin_vals['left'][ss1.colspan[1:]] + lg1.margin_vals['leftcb'][ss1.colspan[1:]] ) maxsubr = np.max( lg1.margin_vals['right'][ss1.colspan[:-1]] + lg1.margin_vals['rightcb'][ss1.colspan[:-1]] ) for ax2 in axs: ss2 = ax2.get_subplotspec() lg2 = layoutgrids[ss2.get_gridspec()] if lg2 is not None and len(ss2.colspan) > 1: maxsubl2 = np.max( lg2.margin_vals['left'][ss2.colspan[1:]] + lg2.margin_vals['leftcb'][ss2.colspan[1:]]) if maxsubl2 > maxsubl: maxsubl = maxsubl2 maxsubr2 = np.max( lg2.margin_vals['right'][ss2.colspan[:-1]] + lg2.margin_vals['rightcb'][ss2.colspan[:-1]]) if maxsubr2 > maxsubr: maxsubr = maxsubr2 for i in ss1.colspan[1:]: lg1.edit_margin_min('left', maxsubl, cell=i) for i in ss1.colspan[:-1]: lg1.edit_margin_min('right', maxsubr, cell=i) # interior rows: if len(ss1.rowspan) > 1: maxsubt = np.max( lg1.margin_vals['top'][ss1.rowspan[1:]] + lg1.margin_vals['topcb'][ss1.rowspan[1:]] ) maxsubb = np.max( lg1.margin_vals['bottom'][ss1.rowspan[:-1]] + lg1.margin_vals['bottomcb'][ss1.rowspan[:-1]] ) for ax2 in axs: ss2 = ax2.get_subplotspec() lg2 = layoutgrids[ss2.get_gridspec()] if lg2 is not None: if len(ss2.rowspan) > 1: maxsubt = np.max([np.max( lg2.margin_vals['top'][ss2.rowspan[1:]] + lg2.margin_vals['topcb'][ss2.rowspan[1:]] ), maxsubt]) maxsubb = np.max([np.max( lg2.margin_vals['bottom'][ss2.rowspan[:-1]] + lg2.margin_vals['bottomcb'][ss2.rowspan[:-1]] ), maxsubb]) for i in ss1.rowspan[1:]: lg1.edit_margin_min('top', maxsubt, cell=i) for i in ss1.rowspan[:-1]: lg1.edit_margin_min('bottom', maxsubb, cell=i) "," Make the margins that are submerged inside an Axes the same size. This allows axes that span two columns (or rows) that are offset from one another to have the same size. This gives the proper layout for something like:: fig = plt.figure(constrained_layout=True) axs = fig.subplot_mosaic(""AAAB\nCCDD"") Without this routine, the axes D will be wider than C, because the margin width between the two columns in C has no width by default, whereas the margins between the two columns of D are set by the width of the margin between A and B. However, obviously the user would like C and D to be the same size, so we need to add constraints to these ""submerged"" margins. This routine makes all the interior margins the same, and the spacing between the three columns in A and the two column in C are all set to the margins between the two columns of D. See test_constrained_layout::test_constrained_layout12 for an example. ",158,190,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def match_submerged_margins(layoutgrids, fig): for sfig in fig.subfigs: match_submerged_margins(layoutgrids, sfig) axs = [a for a in fig.get_axes() if a.get_subplotspec() is not None and a.get_in_layout()] for ax1 in axs: ss1 = ax1.get_subplotspec() if ss1.get_gridspec() not in layoutgrids: axs.remove(ax1) continue lg1 = layoutgrids[ss1.get_gridspec()] # interior columns: if len(ss1.colspan) > 1: maxsubl = np.max( lg1.margin_vals['left'][ss1.colspan[1:]] + lg1.margin_vals['leftcb'][ss1.colspan[1:]] ) maxsubr = np.max( lg1.margin_vals['right'][ss1.colspan[:-1]] + lg1.margin_vals['rightcb'][ss1.colspan[:-1]] ) for ax2 in axs: ss2 = ax2.get_subplotspec() lg2 = layoutgrids[ss2.get_gridspec()] if lg2 is not None and len(ss2.colspan) > 1: maxsubl2 = np.max( lg2.margin_vals['left'][ss2.colspan[1:]] + lg2.margin_vals['leftcb'][ss2.colspan[1:]]) if maxsubl2 > maxsubl: maxsubl = maxsubl2 maxsubr2 = np.max( lg2.margin_vals['right'][ss2.colspan[:-1]] + lg2.margin_vals['rightcb'][ss2.colspan[:-1]]) if maxsubr2 > maxsubr: maxsubr = maxsubr2 for i in ss1.colspan[1:]: lg1.edit_margin_min('left', maxsubl, cell=i) for i in ss1.colspan[:-1]: lg1.edit_margin_min('right', maxsubr, cell=i) # interior rows: if len(ss1.rowspan) > 1: maxsubt = np.max( lg1.margin_vals['top'][ss1.rowspan[1:]] + lg1.margin_vals['topcb'][ss1.rowspan[1:]] ) maxsubb = np.max( lg1.margin_vals['bottom'][ss1.rowspan[:-1]] + lg1.margin_vals['bottomcb'][ss1.rowspan[:-1]] ) for ax2 in axs: ss2 = ax2.get_subplotspec() lg2 = layoutgrids[ss2.get_gridspec()] if lg2 is not None: if len(ss2.rowspan) > 1: maxsubt = np.max([np.max( lg2.margin_vals['top'][ss2.rowspan[1:]] + lg2.margin_vals['topcb'][ss2.rowspan[1:]] ), maxsubt]) maxsubb = np.max([np.max( lg2.margin_vals['bottom'][ss2.rowspan[:-1]] + lg2.margin_vals['bottomcb'][ss2.rowspan[:-1]] ), maxsubb]) for i in ss1.rowspan[1:]: lg1.edit_margin_min('top', maxsubt, cell=i) for i in ss1.rowspan[:-1]: lg1.edit_margin_min('bottom', maxsubb, cell=i) ``` ###Assistant : Make the margins that are submerged inside an Axes the same size. This allows axes that span two columns (or rows) that are offset from one another to have the same size. This gives the proper layout for something like:: fig = plt.figure(constrained_layout=True) axs = fig.subplot_mosaic(""AAAB\nCCDD"") Without this routine, the axes D will be wider than C, because the margin width between the two columns in C has no width by default, whereas the margins between the two columns of D are set by the width of the margin between A and B. However, obviously the user would like C and D to be the same size, so we need to add constraints to these ""submerged"" margins. This routine makes all the interior margins the same, and the spacing between the three columns in A and the two column in C are all set to the margins between the two columns of D. See test_constrained_layout::test_constrained_layout12 for an example. " 669,"def parse_version_info(version_str): version_info = [] for x in version_str.split('.'): if x.isdigit(): version_info.append(int(x)) elif x.find('rc') != -1: patch_version = x.split('rc') version_info.append(int(patch_version[0])) version_info.append(f'rc{patch_version[1]}') return tuple(version_info) version_info = parse_version_info(__version__) ","Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int | str]: The version info, e.g., ""1.3.0"" is parsed into (1, 3, 0), and ""2.0.0rc1"" is parsed into (2, 0, 0, 'rc1'). ",37,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parse_version_info(version_str): version_info = [] for x in version_str.split('.'): if x.isdigit(): version_info.append(int(x)) elif x.find('rc') != -1: patch_version = x.split('rc') version_info.append(int(patch_version[0])) version_info.append(f'rc{patch_version[1]}') return tuple(version_info) version_info = parse_version_info(__version__) ``` ###Assistant : Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int | str]: The version info, e.g., ""1.3.0"" is parsed into (1, 3, 0), and ""2.0.0rc1"" is parsed into (2, 0, 0, 'rc1'). " 670,"def adjust_settings_for_relay_tests(settings): settings.ALLOWED_HOSTS = [ ""localhost"", ""testserver"", ""host.docker.internal"", ""0.0.0.0"", ""127.0.0.1"", ] settings.KAFKA_CLUSTERS = { ""default"": { ""common"": {""bootstrap.servers"": ""127.0.0.1:9092""}, ""producers"": { ""compression.type"": ""lz4"", ""message.max.bytes"": 50000000, # 50MB, default is 1MB }, } } settings.SENTRY_RELAY_WHITELIST_PK = [""SMSesqan65THCV6M4qs4kBzPai60LzuDn-xNsvYpuP8""] settings.SENTRY_USE_RELAY = True @pytest.fixture"," Adjusts the application settings to accept calls from a Relay instance running inside a docker container. :param settings: the app settings ",21,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def adjust_settings_for_relay_tests(settings): settings.ALLOWED_HOSTS = [ ""localhost"", ""testserver"", ""host.docker.internal"", ""0.0.0.0"", ""127.0.0.1"", ] settings.KAFKA_CLUSTERS = { ""default"": { ""common"": {""bootstrap.servers"": ""127.0.0.1:9092""}, ""producers"": { ""compression.type"": ""lz4"", ""message.max.bytes"": 50000000, # 50MB, default is 1MB }, } } settings.SENTRY_RELAY_WHITELIST_PK = [""SMSesqan65THCV6M4qs4kBzPai60LzuDn-xNsvYpuP8""] settings.SENTRY_USE_RELAY = True @pytest.fixture ``` ###Assistant : Adjusts the application settings to accept calls from a Relay instance running inside a docker container. :param settings: the app settings " 671,"def clear_backends(): if xc._version < 79: raise RuntimeError(""clear_backends is not supported in the jaxlib used."" ""Please update your jaxlib package."") xb._clear_backends() jax.lib.xla_bridge._backends = {} dispatch.xla_callable.cache_clear() # type: ignore dispatch.xla_primitive_callable.cache_clear() _cpp_jit_cache.clear() jax_jit.CompiledFunctionCache.clear_all() "," Clear all backend clients so that new backend clients can be created later. ",13,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def clear_backends(): if xc._version < 79: raise RuntimeError(""clear_backends is not supported in the jaxlib used."" ""Please update your jaxlib package."") xb._clear_backends() jax.lib.xla_bridge._backends = {} dispatch.xla_callable.cache_clear() # type: ignore dispatch.xla_primitive_callable.cache_clear() _cpp_jit_cache.clear() jax_jit.CompiledFunctionCache.clear_all() ``` ###Assistant : Clear all backend clients so that new backend clients can be created later. " 672,"def get_power_utilization(self): powerfeeds = PowerFeed.objects.filter(rack=self) available_power_total = sum(pf.available_power for pf in powerfeeds) print(f'available_power_total: {available_power_total}') if not available_power_total: return 0 powerports = [] for powerfeed in powerfeeds: powerports.extend([ peer for peer in powerfeed.link_peers if isinstance(peer, PowerPort) ]) allocated_draw = 0 for powerport in powerports: allocated_draw += powerport.get_power_draw()['allocated'] print(f'allocated_draw: {allocated_draw}') return int(allocated_draw / available_power_total * 100) "," Determine the utilization rate of power in the rack and return it as a percentage. ",15,54,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_power_utilization(self): powerfeeds = PowerFeed.objects.filter(rack=self) available_power_total = sum(pf.available_power for pf in powerfeeds) print(f'available_power_total: {available_power_total}') if not available_power_total: return 0 powerports = [] for powerfeed in powerfeeds: powerports.extend([ peer for peer in powerfeed.link_peers if isinstance(peer, PowerPort) ]) allocated_draw = 0 for powerport in powerports: allocated_draw += powerport.get_power_draw()['allocated'] print(f'allocated_draw: {allocated_draw}') return int(allocated_draw / available_power_total * 100) ``` ###Assistant : Determine the utilization rate of power in the rack and return it as a percentage. " 673,"def cleanup(self): orphaned = [] for w in self.workers[::]: if not w.alive: # the worker process has exited # 1. take the task it was running and enqueue the error # callbacks # 2. take any pending tasks delivered to its queue and # send them to another worker logger.error('worker pid:{} is gone (exit={})'.format(w.pid, w.exitcode)) if w.current_task: if w.current_task != 'QUIT': try: for j in UnifiedJob.objects.filter(celery_task_id=w.current_task['uuid']): reaper.reap_job(j, 'failed') except Exception: logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid'])) orphaned.extend(w.orphaned_tasks) self.workers.remove(w) elif (len(self.workers) > self.min_workers) and w.ready_to_scale_down: # the process has an empty queue (it's idle) and we have # more processes in the pool than we need (> min) # send this process a message so it will exit gracefully # at the next opportunity logger.info(f'scaling down worker pid:{w.pid} prior total:{len(self.workers)}') w.quit() self.workers.remove(w) if w.alive: # if we discover a task manager invocation that's been running # too long, reap it (because otherwise it'll just hold the postgres # advisory lock forever); the goal of this code is to discover # deadlocks or other serious issues in the task manager that cause # the task manager to never do more work current_task = w.current_task if current_task and isinstance(current_task, dict): endings = ['tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager'] current_task_name = current_task.get('task', '') if any(current_task_name.endswith(e) for e in endings): if 'started' not in current_task: w.managed_tasks[current_task['uuid']]['started'] = time.time() age = time.time() - current_task['started'] w.managed_tasks[current_task['uuid']]['age'] = age if age > self.task_manager_timeout: logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGTERM to {w.pid}') os.kill(w.pid, signal.SIGTERM) for m in orphaned: # if all the workers are dead, spawn at least one if not len(self.workers): self.up() idx = random.choice(range(len(self.workers))) self.write(idx, m) "," Perform some internal account and cleanup. This is run on every cluster node heartbeat: 1. Discover worker processes that exited, and recover messages they were handling. 2. Clean up unnecessary, idle workers. IMPORTANT: this function is one of the few places in the dispatcher (aside from setting lookups) where we talk to the database. As such, if there's an outage, this method _can_ throw various django.db.utils.Error exceptions. Act accordingly. ",69,270,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cleanup(self): orphaned = [] for w in self.workers[::]: if not w.alive: # the worker process has exited # 1. take the task it was running and enqueue the error # callbacks # 2. take any pending tasks delivered to its queue and # send them to another worker logger.error('worker pid:{} is gone (exit={})'.format(w.pid, w.exitcode)) if w.current_task: if w.current_task != 'QUIT': try: for j in UnifiedJob.objects.filter(celery_task_id=w.current_task['uuid']): reaper.reap_job(j, 'failed') except Exception: logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid'])) orphaned.extend(w.orphaned_tasks) self.workers.remove(w) elif (len(self.workers) > self.min_workers) and w.ready_to_scale_down: # the process has an empty queue (it's idle) and we have # more processes in the pool than we need (> min) # send this process a message so it will exit gracefully # at the next opportunity logger.info(f'scaling down worker pid:{w.pid} prior total:{len(self.workers)}') w.quit() self.workers.remove(w) if w.alive: # if we discover a task manager invocation that's been running # too long, reap it (because otherwise it'll just hold the postgres # advisory lock forever); the goal of this code is to discover # deadlocks or other serious issues in the task manager that cause # the task manager to never do more work current_task = w.current_task if current_task and isinstance(current_task, dict): endings = ['tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager'] current_task_name = current_task.get('task', '') if any(current_task_name.endswith(e) for e in endings): if 'started' not in current_task: w.managed_tasks[current_task['uuid']]['started'] = time.time() age = time.time() - current_task['started'] w.managed_tasks[current_task['uuid']]['age'] = age if age > self.task_manager_timeout: logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGTERM to {w.pid}') os.kill(w.pid, signal.SIGTERM) for m in orphaned: # if all the workers are dead, spawn at least one if not len(self.workers): self.up() idx = random.choice(range(len(self.workers))) self.write(idx, m) ``` ###Assistant : Perform some internal account and cleanup. This is run on every cluster node heartbeat: 1. Discover worker processes that exited, and recover messages they were handling. 2. Clean up unnecessary, idle workers. IMPORTANT: this function is one of the few places in the dispatcher (aside from setting lookups) where we talk to the database. As such, if there's an outage, this method _can_ throw various django.db.utils.Error exceptions. Act accordingly. " 674,"def test_token_node_empty_csrf_cookie(self): req = self._get_request(cookie="""") mw = CsrfViewMiddleware(token_view) mw.process_view(req, token_view, (), {}) resp = token_view(req) token = get_token(req) self.assertIsNotNone(token) csrf_secret = _unmask_cipher_token(token) self._check_token_present(resp, csrf_secret) "," A new token is sent if the csrf_cookie is the empty string. ",12,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_token_node_empty_csrf_cookie(self): req = self._get_request(cookie="""") mw = CsrfViewMiddleware(token_view) mw.process_view(req, token_view, (), {}) resp = token_view(req) token = get_token(req) self.assertIsNotNone(token) csrf_secret = _unmask_cipher_token(token) self._check_token_present(resp, csrf_secret) ``` ###Assistant : A new token is sent if the csrf_cookie is the empty string. " 675,"def save(self, envs): assert isinstance(envs, list), ""envs should be a list"" if len(envs) > 0: for env in envs: assert isstr(env), ""env should be a string"" return self._send( { ""data"": envs, }, ""save"", ) "," This function allows the user to save envs that are alive on the Tornado server. The envs can be specified as a list of env ids. ",26,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save(self, envs): assert isinstance(envs, list), ""envs should be a list"" if len(envs) > 0: for env in envs: assert isstr(env), ""env should be a string"" return self._send( { ""data"": envs, }, ""save"", ) ``` ###Assistant : This function allows the user to save envs that are alive on the Tornado server. The envs can be specified as a list of env ids. " 676,"def test_has_related_field_in_list_display_o2o(self): media = Media.objects.create(name=""Foo"") Vodcast.objects.create(media=media) response = self.client.get(reverse(""admin:admin_views_vodcast_changelist""), {}) response.context[""cl""].list_display = [""media""] self.assertIs(response.context[""cl""].has_related_field_in_list_display(), True) response.context[""cl""].list_display = [""media_id""] self.assertIs(response.context[""cl""].has_related_field_in_list_display(), False) ",Joins shouldn't be performed for _id fields in list display.,10,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_has_related_field_in_list_display_o2o(self): media = Media.objects.create(name=""Foo"") Vodcast.objects.create(media=media) response = self.client.get(reverse(""admin:admin_views_vodcast_changelist""), {}) response.context[""cl""].list_display = [""media""] self.assertIs(response.context[""cl""].has_related_field_in_list_display(), True) response.context[""cl""].list_display = [""media_id""] self.assertIs(response.context[""cl""].has_related_field_in_list_display(), False) ``` ###Assistant : Joins shouldn't be performed for _id fields in list display. " 677,"def binary_crossentropy(target, output, from_logits=False): target = tf.convert_to_tensor(target) output = tf.convert_to_tensor(output) # Use logits whenever they are available. `softmax` and `sigmoid` # activations cache logits on the `output` Tensor. if hasattr(output, ""_keras_logits""): output = output._keras_logits # pylint: disable=protected-access if from_logits: warnings.warn( '""`binary_crossentropy` received `from_logits=True`, but the `output`' "" argument was produced by a sigmoid or softmax activation and thus "" 'does not represent logits. Was this intended?""', stacklevel=2, ) from_logits = True if from_logits: return tf.nn.sigmoid_cross_entropy_with_logits( labels=target, logits=output ) if ( not isinstance(output, (tf.__internal__.EagerTensor, tf.Variable)) and output.op.type == ""Sigmoid"" ) and not hasattr(output, ""_keras_history""): # When sigmoid activation function is used for output operation, we # use logits from the sigmoid function directly to compute loss in order # to prevent collapsing zero when training. assert len(output.op.inputs) == 1 output = output.op.inputs[0] return tf.nn.sigmoid_cross_entropy_with_logits( labels=target, logits=output ) epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype) output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_) # Compute cross entropy from probabilities. bce = target * tf.math.log(output + epsilon()) bce += (1 - target) * tf.math.log(1 - output + epsilon()) return -bce @keras_export(""keras.backend.binary_focal_crossentropy"") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs","Binary crossentropy between an output tensor and a target tensor. Args: target: A tensor with the same shape as `output`. output: A tensor. from_logits: Whether `output` is expected to be a logits tensor. By default, we consider that `output` encodes a probability distribution. Returns: A tensor. ",46,176,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def binary_crossentropy(target, output, from_logits=False): target = tf.convert_to_tensor(target) output = tf.convert_to_tensor(output) # Use logits whenever they are available. `softmax` and `sigmoid` # activations cache logits on the `output` Tensor. if hasattr(output, ""_keras_logits""): output = output._keras_logits # pylint: disable=protected-access if from_logits: warnings.warn( '""`binary_crossentropy` received `from_logits=True`, but the `output`' "" argument was produced by a sigmoid or softmax activation and thus "" 'does not represent logits. Was this intended?""', stacklevel=2, ) from_logits = True if from_logits: return tf.nn.sigmoid_cross_entropy_with_logits( labels=target, logits=output ) if ( not isinstance(output, (tf.__internal__.EagerTensor, tf.Variable)) and output.op.type == ""Sigmoid"" ) and not hasattr(output, ""_keras_history""): # When sigmoid activation function is used for output operation, we # use logits from the sigmoid function directly to compute loss in order # to prevent collapsing zero when training. assert len(output.op.inputs) == 1 output = output.op.inputs[0] return tf.nn.sigmoid_cross_entropy_with_logits( labels=target, logits=output ) epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype) output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_) # Compute cross entropy from probabilities. bce = target * tf.math.log(output + epsilon()) bce += (1 - target) * tf.math.log(1 - output + epsilon()) return -bce @keras_export(""keras.backend.binary_focal_crossentropy"") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs ``` ###Assistant : Binary crossentropy between an output tensor and a target tensor. Args: target: A tensor with the same shape as `output`. output: A tensor. from_logits: Whether `output` is expected to be a logits tensor. By default, we consider that `output` encodes a probability distribution. Returns: A tensor. " 678,"def test_process_pulled_event_with_missing_state(self) -> None: return self._test_process_pulled_event_with_missing_state(False) ","Ensure that we correctly handle pulled events with lots of missing state In this test, we pretend we are processing a ""pulled"" event (eg, via backfill or get_missing_events). The pulled event has a prev_event we haven't previously seen, so the server requests the state at that prev_event. There is a lot of state we don't have, so we expect the server to make a /state request. We check that the pulled event is correctly persisted, and that the state is as we expect. ",83,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_process_pulled_event_with_missing_state(self) -> None: return self._test_process_pulled_event_with_missing_state(False) ``` ###Assistant : Ensure that we correctly handle pulled events with lots of missing state In this test, we pretend we are processing a ""pulled"" event (eg, via backfill or get_missing_events). The pulled event has a prev_event we haven't previously seen, so the server requests the state at that prev_event. There is a lot of state we don't have, so we expect the server to make a /state request. We check that the pulled event is correctly persisted, and that the state is as we expect. " 679,"def test_resolved_in_release(self, mock_func): notification = ResolvedInReleaseActivityNotification( Activity( project=self.project, group=self.group, user=self.user, type=ActivityType.SET_RESOLVED_IN_RELEASE, data={""version"": ""meow""}, ) ) with self.tasks(): notification.send() attachment, text = get_attachment() release_name = notification.activity.data[""version""] assert text == f""Issue marked as resolved in {release_name} by {self.name}"" assert ( attachment[""footer""] == f""{self.project.slug} | "" ) "," Test that a Slack message is sent with the expected payload when an issue is resolved in a release ",19,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_resolved_in_release(self, mock_func): notification = ResolvedInReleaseActivityNotification( Activity( project=self.project, group=self.group, user=self.user, type=ActivityType.SET_RESOLVED_IN_RELEASE, data={""version"": ""meow""}, ) ) with self.tasks(): notification.send() attachment, text = get_attachment() release_name = notification.activity.data[""version""] assert text == f""Issue marked as resolved in {release_name} by {self.name}"" assert ( attachment[""footer""] == f""{self.project.slug} | "" ) ``` ###Assistant : Test that a Slack message is sent with the expected payload when an issue is resolved in a release " 680,"def get_vocabulary(self, include_special_tokens=True): # The lookup table data will not be sorted, so we will create a inverted # lookup here, and use that to lookup a range of indices [0, # vocab_size). if self.lookup_table.size() == 0: vocab, indices = [], [] else: keys, values = self.lookup_table.export() vocab, indices = (values, keys) if self.invert else (keys, values) vocab, indices = ( self._tensor_vocab_to_numpy(vocab), indices.numpy(), ) lookup = collections.defaultdict( lambda: self.oov_token, zip(indices, vocab) ) vocab = [lookup[x] for x in range(self.vocabulary_size())] if self.mask_token is not None and self.output_mode == INT: vocab[0] = self.mask_token if not include_special_tokens: vocab = vocab[self._token_start_index() :] return vocab ","Returns the current vocabulary of the layer. Args: include_special_tokens: If True, the returned vocabulary will include mask and OOV tokens, and a term's index in the vocabulary will equal the term's index when calling the layer. If False, the returned vocabulary will not include any mask or OOV tokens. ",49,100,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_vocabulary(self, include_special_tokens=True): # The lookup table data will not be sorted, so we will create a inverted # lookup here, and use that to lookup a range of indices [0, # vocab_size). if self.lookup_table.size() == 0: vocab, indices = [], [] else: keys, values = self.lookup_table.export() vocab, indices = (values, keys) if self.invert else (keys, values) vocab, indices = ( self._tensor_vocab_to_numpy(vocab), indices.numpy(), ) lookup = collections.defaultdict( lambda: self.oov_token, zip(indices, vocab) ) vocab = [lookup[x] for x in range(self.vocabulary_size())] if self.mask_token is not None and self.output_mode == INT: vocab[0] = self.mask_token if not include_special_tokens: vocab = vocab[self._token_start_index() :] return vocab ``` ###Assistant : Returns the current vocabulary of the layer. Args: include_special_tokens: If True, the returned vocabulary will include mask and OOV tokens, and a term's index in the vocabulary will equal the term's index when calling the layer. If False, the returned vocabulary will not include any mask or OOV tokens. " 681,"def test_error_raised_with_float_limited_eval_batches(): model = BoringModel() dl_size = len(model.val_dataloader()) limit_val_batches = 1 / (dl_size + 2) trainer = Trainer(limit_val_batches=limit_val_batches) trainer._data_connector.attach_data(model) with pytest.raises( MisconfigurationException, match=fr""{limit_val_batches} \* {dl_size} < 1. Please increase the `limit_val_batches`"", ): trainer._data_connector._reset_eval_dataloader(RunningStage.VALIDATING, model) @pytest.mark.parametrize( ""val_dl"", [ DataLoader(dataset=RandomDataset(32, 64), shuffle=True), CombinedLoader(DataLoader(dataset=RandomDataset(32, 64), shuffle=True)), CombinedLoader( [DataLoader(dataset=RandomDataset(32, 64)), DataLoader(dataset=RandomDataset(32, 64), shuffle=True)] ), CombinedLoader( { ""dl1"": DataLoader(dataset=RandomDataset(32, 64)), ""dl2"": DataLoader(dataset=RandomDataset(32, 64), shuffle=True), } ), ], )","Test that an error is raised if there are not enough batches when passed with float value of limit_eval_batches.",19,63,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_error_raised_with_float_limited_eval_batches(): model = BoringModel() dl_size = len(model.val_dataloader()) limit_val_batches = 1 / (dl_size + 2) trainer = Trainer(limit_val_batches=limit_val_batches) trainer._data_connector.attach_data(model) with pytest.raises( MisconfigurationException, match=fr""{limit_val_batches} \* {dl_size} < 1. Please increase the `limit_val_batches`"", ): trainer._data_connector._reset_eval_dataloader(RunningStage.VALIDATING, model) @pytest.mark.parametrize( ""val_dl"", [ DataLoader(dataset=RandomDataset(32, 64), shuffle=True), CombinedLoader(DataLoader(dataset=RandomDataset(32, 64), shuffle=True)), CombinedLoader( [DataLoader(dataset=RandomDataset(32, 64)), DataLoader(dataset=RandomDataset(32, 64), shuffle=True)] ), CombinedLoader( { ""dl1"": DataLoader(dataset=RandomDataset(32, 64)), ""dl2"": DataLoader(dataset=RandomDataset(32, 64), shuffle=True), } ), ], ) ``` ###Assistant : Test that an error is raised if there are not enough batches when passed with float value of limit_eval_batches. " 682,"def not_none_device_or_backend_on_jit(backend, device, num_ins): # TODO(yashkatariya): Remove this entire function when backend and device are # removed as arguments on jit. from jax.experimental import sharding if device is not None and backend is not None: raise ValueError(""can't specify both a device and a backend for jit, "" ""got device={} and backend={}"".format(device, backend)) if backend is not None: da = [xb.get_backend(backend).get_default_device_assignment(1)[0]] else: assert device is not None da = [device] assert len(da) == 1 # Set committed to True for this path because it simulates a device_put on # behalf of a user. committed = True # in_shardings will be marked as replicated regardless of whatever the input # had. Given that only a single device is allowed above, this is correct. in_shardings = [sharding.OpShardingSharding.get_replicated(da)] * num_ins return committed, da, in_shardings ","This is to support the backend and device argument on jit. It's a feature that's deprecated but needs to be supported for feature parity and so that we can delete the non-Array paths when Array is switched on. ",38,130,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def not_none_device_or_backend_on_jit(backend, device, num_ins): # TODO(yashkatariya): Remove this entire function when backend and device are # removed as arguments on jit. from jax.experimental import sharding if device is not None and backend is not None: raise ValueError(""can't specify both a device and a backend for jit, "" ""got device={} and backend={}"".format(device, backend)) if backend is not None: da = [xb.get_backend(backend).get_default_device_assignment(1)[0]] else: assert device is not None da = [device] assert len(da) == 1 # Set committed to True for this path because it simulates a device_put on # behalf of a user. committed = True # in_shardings will be marked as replicated regardless of whatever the input # had. Given that only a single device is allowed above, this is correct. in_shardings = [sharding.OpShardingSharding.get_replicated(da)] * num_ins return committed, da, in_shardings ``` ###Assistant : This is to support the backend and device argument on jit. It's a feature that's deprecated but needs to be supported for feature parity and so that we can delete the non-Array paths when Array is switched on. " 683,"def is_mouse_scrolling(self, *args): return 'button' in self.profile and 'scroll' in self.button ","Returns True if the touch event is a mousewheel scrolling .. versionadded:: 1.6.0 ",13,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_mouse_scrolling(self, *args): return 'button' in self.profile and 'scroll' in self.button ``` ###Assistant : Returns True if the touch event is a mousewheel scrolling .. versionadded:: 1.6.0 " 684,"def easy_print(*args, size=(None, None), end=None, sep=None, location=(None, None), relative_location=(None, None), font=None, no_titlebar=False, no_button=False, grab_anywhere=False, keep_on_top=None, do_not_reroute_stdout=True, echo_stdout=False, text_color=None, background_color=None, colors=None, c=None, erase_all=False, resizable=True, blocking=None): if _DebugWin.debug_window is None: _DebugWin.debug_window = _DebugWin(size=size, location=location, relative_location=relative_location, font=font, no_titlebar=no_titlebar, no_button=no_button, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top, do_not_reroute_stdout=do_not_reroute_stdout, echo_stdout=echo_stdout, resizable=resizable, blocking=blocking) txt_color, bg_color = _parse_colors_parm(c or colors) _DebugWin.debug_window.Print(*args, end=end, sep=sep, text_color=text_color or txt_color, background_color=background_color or bg_color, erase_all=erase_all, font=font, blocking=blocking) "," Works like a ""print"" statement but with windowing options. Routes output to the ""Debug Window"" In addition to the normal text and background colors, you can use a ""colors"" tuple/string The ""colors"" or ""c"" parameter defines both the text and background in a single parm. It can be a tuple or a single single. Both text and background colors need to be specified colors -(str, str) or str. A combined text/background color definition in a single parameter c - (str, str) - Colors tuple has format (foreground, backgrouned) c - str - can also be a string of the format ""foreground on background"" (""white on red"") :param *args: stuff to output :type *args: (Any) :param size: (w,h) w=characters-wide, h=rows-high :type size: (int, int) :param end: end character :type end: (str) :param sep: separator character :type sep: (str) :param location: Location of upper left corner of the window :type location: (int, int) :param relative_location: (x,y) location relative to the default location of the window, in pixels. Normally the window centers. This location is relative to the location the window would be created. Note they can be negative. :type relative_location: (int, int) :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike :type font: (str or (str, int[, str]) or None) :param no_titlebar: If True no titlebar will be shown :type no_titlebar: (bool) :param no_button: don't show button :type no_button: (bool) :param grab_anywhere: If True: can grab anywhere to move the window (Default = False) :type grab_anywhere: (bool) :param background_color: color of background :type background_color: (str) :param text_color: color of the text :type text_color: (str) :param keep_on_top: If True the window will remain above all current windows :type keep_on_top: (bool) :param location: Location of upper left corner of the window :type location: (int, int) :param do_not_reroute_stdout: do not reroute stdout and stderr. If False, both stdout and stderr will reroute to here :type do_not_reroute_stdout: (bool) :param echo_stdout: If True stdout is sent to both the console and the debug window :type echo_stdout: (bool) :param colors: Either a tuple or a string that has both the text and background colors :type colors: (str) or (str, str) :param c: Either a tuple or a string that has both the text and background colors :type c: (str) or (str, str) :param resizable: if True, the user can resize the debug window. Default is True :type resizable: (bool) :param erase_all: If True when erase the output before printing :type erase_all: (bool) :param blocking: if True, makes the window block instead of returning immediately. The ""Quit"" button changers to ""More"" :type blocking: (bool | None) :return: :rtype: ",444,60,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def easy_print(*args, size=(None, None), end=None, sep=None, location=(None, None), relative_location=(None, None), font=None, no_titlebar=False, no_button=False, grab_anywhere=False, keep_on_top=None, do_not_reroute_stdout=True, echo_stdout=False, text_color=None, background_color=None, colors=None, c=None, erase_all=False, resizable=True, blocking=None): if _DebugWin.debug_window is None: _DebugWin.debug_window = _DebugWin(size=size, location=location, relative_location=relative_location, font=font, no_titlebar=no_titlebar, no_button=no_button, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top, do_not_reroute_stdout=do_not_reroute_stdout, echo_stdout=echo_stdout, resizable=resizable, blocking=blocking) txt_color, bg_color = _parse_colors_parm(c or colors) _DebugWin.debug_window.Print(*args, end=end, sep=sep, text_color=text_color or txt_color, background_color=background_color or bg_color, erase_all=erase_all, font=font, blocking=blocking) ``` ###Assistant : Works like a ""print"" statement but with windowing options. Routes output to the ""Debug Window"" In addition to the normal text and background colors, you can use a ""colors"" tuple/string The ""colors"" or ""c"" parameter defines both the text and background in a single parm. It can be a tuple or a single single. Both text and background colors need to be specified colors -(str, str) or str. A combined text/background color definition in a single parameter c - (str, str) - Colors tuple has format (foreground, backgrouned) c - str - can also be a string of the format ""foreground on background"" (""white on red"") :param *args: stuff to output :type *args: (Any) :param size: (w,h) w=characters-wide, h=rows-high :type size: (int, int) :param end: end character :type end: (str) :param sep: separator character :type sep: (str) :param location: Location of upper left corner of the window :type location: (int, int) :param relative_location: (x,y) location relative to the default location of the window, in pixels. Normally the window centers. This location is relative to the location the window would be created. Note they can be negative. :type relative_location: (int, int) :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike :type font: (str or (str, int[, str]) or None) :param no_titlebar: If True no titlebar will be shown :type no_titlebar: (bool) :param no_button: don't show button :type no_button: (bool) :param grab_anywhere: If True: can grab anywhere to move the window (Default = False) :type grab_anywhere: (bool) :param background_color: color of background :type background_color: (str) :param text_color: color of the text :type text_color: (str) :param keep_on_top: If True the window will remain above all current windows :type keep_on_top: (bool) :param location: Location of upper left corner of the window :type location: (int, int) :param do_not_reroute_stdout: do not reroute stdout and stderr. If False, both stdout and stderr will reroute to here :type do_not_reroute_stdout: (bool) :param echo_stdout: If True stdout is sent to both the console and the debug window :type echo_stdout: (bool) :param colors: Either a tuple or a string that has both the text and background colors :type colors: (str) or (str, str) :param c: Either a tuple or a string that has both the text and background colors :type c: (str) or (str, str) :param resizable: if True, the user can resize the debug window. Default is True :type resizable: (bool) :param erase_all: If True when erase the output before printing :type erase_all: (bool) :param blocking: if True, makes the window block instead of returning immediately. The ""Quit"" button changers to ""More"" :type blocking: (bool | None) :return: :rtype: " 685,"def wait_start_success(self): _timeout = self.args.timeout_ready if _timeout <= 0: _timeout = None else: _timeout /= 1e3 if self._wait_for_ready_or_shutdown(_timeout): self._check_failed_to_start() self.logger.debug(__ready_msg__) else: self._fail_start_timeout(_timeout) ","Block until all pods starts successfully. If not success, it will raise an error hoping the outer function to catch it ",21,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def wait_start_success(self): _timeout = self.args.timeout_ready if _timeout <= 0: _timeout = None else: _timeout /= 1e3 if self._wait_for_ready_or_shutdown(_timeout): self._check_failed_to_start() self.logger.debug(__ready_msg__) else: self._fail_start_timeout(_timeout) ``` ###Assistant : Block until all pods starts successfully. If not success, it will raise an error hoping the outer function to catch it " 686,"def _rank_decomposition(M, iszerofunc=_iszero, simplify=False): r F, pivot_cols = M.rref(simplify=simplify, iszerofunc=iszerofunc, pivots=True) rank = len(pivot_cols) C = M.extract(range(M.rows), pivot_cols) F = F[:rank, :] return C, F ","Returns a pair of matrices (`C`, `F`) with matching rank such that `A = C F`. Parameters ========== iszerofunc : Function, optional A function used for detecting whether an element can act as a pivot. ``lambda x: x.is_zero`` is used by default. simplify : Bool or Function, optional A function used to simplify elements when looking for a pivot. By default SymPy's ``simplify`` is used. Returns ======= (C, F) : Matrices `C` and `F` are full-rank matrices with rank as same as `A`, whose product gives `A`. See Notes for additional mathematical details. Examples ======== >>> from sympy import Matrix >>> A = Matrix([ ... [1, 3, 1, 4], ... [2, 7, 3, 9], ... [1, 5, 3, 1], ... [1, 2, 0, 8] ... ]) >>> C, F = A.rank_decomposition() >>> C Matrix([ [1, 3, 4], [2, 7, 9], [1, 5, 1], [1, 2, 8]]) >>> F Matrix([ [1, 0, -2, 0], [0, 1, 1, 0], [0, 0, 0, 1]]) >>> C * F == A True Notes ===== Obtaining `F`, an RREF of `A`, is equivalent to creating a product .. math:: E_n E_{n-1} ... E_1 A = F where `E_n, E_{n-1}, \dots, E_1` are the elimination matrices or permutation matrices equivalent to each row-reduction step. The inverse of the same product of elimination matrices gives `C`: .. math:: C = \left(E_n E_{n-1} \dots E_1\right)^{-1} It is not necessary, however, to actually compute the inverse: the columns of `C` are those from the original matrix with the same column indices as the indices of the pivot columns of `F`. References ========== .. [1] https://en.wikipedia.org/wiki/Rank_factorization .. [2] Piziak, R.; Odell, P. L. (1 June 1999). ""Full Rank Factorization of Matrices"". Mathematics Magazine. 72 (3): 193. doi:10.2307/2690882 See Also ======== sympy.matrices.matrices.MatrixReductions.rref ",291,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _rank_decomposition(M, iszerofunc=_iszero, simplify=False): r F, pivot_cols = M.rref(simplify=simplify, iszerofunc=iszerofunc, pivots=True) rank = len(pivot_cols) C = M.extract(range(M.rows), pivot_cols) F = F[:rank, :] return C, F ``` ###Assistant : Returns a pair of matrices (`C`, `F`) with matching rank such that `A = C F`. Parameters ========== iszerofunc : Function, optional A function used for detecting whether an element can act as a pivot. ``lambda x: x.is_zero`` is used by default. simplify : Bool or Function, optional A function used to simplify elements when looking for a pivot. By default SymPy's ``simplify`` is used. Returns ======= (C, F) : Matrices `C` and `F` are full-rank matrices with rank as same as `A`, whose product gives `A`. See Notes for additional mathematical details. Examples ======== >>> from sympy import Matrix >>> A = Matrix([ ... [1, 3, 1, 4], ... [2, 7, 3, 9], ... [1, 5, 3, 1], ... [1, 2, 0, 8] ... ]) >>> C, F = A.rank_decomposition() >>> C Matrix([ [1, 3, 4], [2, 7, 9], [1, 5, 1], [1, 2, 8]]) >>> F Matrix([ [1, 0, -2, 0], [0, 1, 1, 0], [0, 0, 0, 1]]) >>> C * F == A True Notes ===== Obtaining `F`, an RREF of `A`, is equivalent to creating a product .. math:: E_n E_{n-1} ... E_1 A = F where `E_n, E_{n-1}, \dots, E_1` are the elimination matrices or permutation matrices equivalent to each row-reduction step. The inverse of the same product of elimination matrices gives `C`: .. math:: C = \left(E_n E_{n-1} \dots E_1\right)^{-1} It is not necessary, however, to actually compute the inverse: the columns of `C` are those from the original matrix with the same column indices as the indices of the pivot columns of `F`. References ========== .. [1] https://en.wikipedia.org/wiki/Rank_factorization .. [2] Piziak, R.; Odell, P. L. (1 June 1999). ""Full Rank Factorization of Matrices"". Mathematics Magazine. 72 (3): 193. doi:10.2307/2690882 See Also ======== sympy.matrices.matrices.MatrixReductions.rref " 687,"def normalize(X, norm=""l2"", *, axis=1, copy=True, return_norm=False): if norm not in (""l1"", ""l2"", ""max""): raise ValueError(""'%s' is not a supported norm"" % norm) if axis == 0: sparse_format = ""csc"" elif axis == 1: sparse_format = ""csr"" else: raise ValueError(""'%d' is not a supported axis"" % axis) X = check_array( X, accept_sparse=sparse_format, copy=copy, estimator=""the normalize function"", dtype=FLOAT_DTYPES, ) if axis == 0: X = X.T if sparse.issparse(X): if return_norm and norm in (""l1"", ""l2""): raise NotImplementedError( ""return_norm=True is not implemented "" ""for sparse matrices with norm 'l1' "" ""or norm 'l2'"" ) if norm == ""l1"": inplace_csr_row_normalize_l1(X) elif norm == ""l2"": inplace_csr_row_normalize_l2(X) elif norm == ""max"": mins, maxes = min_max_axis(X, 1) norms = np.maximum(abs(mins), maxes) norms_elementwise = norms.repeat(np.diff(X.indptr)) mask = norms_elementwise != 0 X.data[mask] /= norms_elementwise[mask] else: if norm == ""l1"": norms = np.abs(X).sum(axis=1) elif norm == ""l2"": norms = row_norms(X) elif norm == ""max"": norms = np.max(abs(X), axis=1) norms = _handle_zeros_in_scale(norms, copy=False) X /= norms[:, np.newaxis] if axis == 0: X = X.T if return_norm: return X, norms else: return X ","Scale input vectors individually to unit norm (vector length). Read more in the :ref:`User Guide `. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to normalize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. norm : {'l1', 'l2', 'max'}, default='l2' The norm to use to normalize each non zero sample (or each non-zero feature if axis is 0). axis : {0, 1}, default=1 axis used to normalize the data along. If 1, independently normalize each sample, otherwise (if 0) normalize each feature. copy : bool, default=True Set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). return_norm : bool, default=False Whether to return the computed norms. Returns ------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) Normalized input X. norms : ndarray of shape (n_samples, ) if axis=1 else (n_features, ) An array of norms along given axis for X. When X is sparse, a NotImplementedError will be raised for norm 'l1' or 'l2'. See Also -------- Normalizer : Performs normalization using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). Notes ----- For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py `. ",220,172,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def normalize(X, norm=""l2"", *, axis=1, copy=True, return_norm=False): if norm not in (""l1"", ""l2"", ""max""): raise ValueError(""'%s' is not a supported norm"" % norm) if axis == 0: sparse_format = ""csc"" elif axis == 1: sparse_format = ""csr"" else: raise ValueError(""'%d' is not a supported axis"" % axis) X = check_array( X, accept_sparse=sparse_format, copy=copy, estimator=""the normalize function"", dtype=FLOAT_DTYPES, ) if axis == 0: X = X.T if sparse.issparse(X): if return_norm and norm in (""l1"", ""l2""): raise NotImplementedError( ""return_norm=True is not implemented "" ""for sparse matrices with norm 'l1' "" ""or norm 'l2'"" ) if norm == ""l1"": inplace_csr_row_normalize_l1(X) elif norm == ""l2"": inplace_csr_row_normalize_l2(X) elif norm == ""max"": mins, maxes = min_max_axis(X, 1) norms = np.maximum(abs(mins), maxes) norms_elementwise = norms.repeat(np.diff(X.indptr)) mask = norms_elementwise != 0 X.data[mask] /= norms_elementwise[mask] else: if norm == ""l1"": norms = np.abs(X).sum(axis=1) elif norm == ""l2"": norms = row_norms(X) elif norm == ""max"": norms = np.max(abs(X), axis=1) norms = _handle_zeros_in_scale(norms, copy=False) X /= norms[:, np.newaxis] if axis == 0: X = X.T if return_norm: return X, norms else: return X ``` ###Assistant : Scale input vectors individually to unit norm (vector length). Read more in the :ref:`User Guide `. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to normalize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. norm : {'l1', 'l2', 'max'}, default='l2' The norm to use to normalize each non zero sample (or each non-zero feature if axis is 0). axis : {0, 1}, default=1 axis used to normalize the data along. If 1, independently normalize each sample, otherwise (if 0) normalize each feature. copy : bool, default=True Set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). return_norm : bool, default=False Whether to return the computed norms. Returns ------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) Normalized input X. norms : ndarray of shape (n_samples, ) if axis=1 else (n_features, ) An array of norms along given axis for X. When X is sparse, a NotImplementedError will be raised for norm 'l1' or 'l2'. See Also -------- Normalizer : Performs normalization using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). Notes ----- For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py `. " 688,"def find_version_to_install(self, name): version = Version.parse(name) if version.patch is not None: return name try: best_match = max( ( inst_version for inst_version in self.iter_installable_versions() if inst_version.matches_minor(version) ), key=operator.attrgetter(""cmpkey""), ) except ValueError: raise ValueError( f""no installable version found for {name!r}"", ) return best_match ","Find a version in the installer from the version supplied. A ValueError is raised if a matching version cannot be found. ",21,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_version_to_install(self, name): version = Version.parse(name) if version.patch is not None: return name try: best_match = max( ( inst_version for inst_version in self.iter_installable_versions() if inst_version.matches_minor(version) ), key=operator.attrgetter(""cmpkey""), ) except ValueError: raise ValueError( f""no installable version found for {name!r}"", ) return best_match ``` ###Assistant : Find a version in the installer from the version supplied. A ValueError is raised if a matching version cannot be found. " 689,"def copy_safe_request(request): meta = { k: request.META[k] for k in HTTP_REQUEST_META_SAFE_COPY if k in request.META and isinstance(request.META[k], str) } return NetBoxFakeRequest({ 'META': meta, 'COOKIES': request.COOKIES, 'POST': request.POST, 'GET': request.GET, 'FILES': request.FILES, 'user': request.user, 'path': request.path, 'id': getattr(request, 'id', None), # UUID assigned by middleware }) "," Copy selected attributes from a request object into a new fake request object. This is needed in places where thread safe pickling of the useful request data is needed. ",29,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def copy_safe_request(request): meta = { k: request.META[k] for k in HTTP_REQUEST_META_SAFE_COPY if k in request.META and isinstance(request.META[k], str) } return NetBoxFakeRequest({ 'META': meta, 'COOKIES': request.COOKIES, 'POST': request.POST, 'GET': request.GET, 'FILES': request.FILES, 'user': request.user, 'path': request.path, 'id': getattr(request, 'id', None), # UUID assigned by middleware }) ``` ###Assistant : Copy selected attributes from a request object into a new fake request object. This is needed in places where thread safe pickling of the useful request data is needed. " 690,"def batchify(self, obs_batch, sort=False): batch = super().batchify(obs_batch, sort=sort) if batch.valid_indices is None: return batch batch.classifier_label = torch.tensor( [ [obs_batch[i].get('classifier_label_idx', -1)] for i in batch.valid_indices ] ) batch.is_ltr = torch.tensor( [[obs_batch[i].get('is_ltr', False)] for i in batch.valid_indices] ) return batch "," This method calls the parent class's batchify method and then add classifier_label and is_ltr property to the the batch. ",19,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def batchify(self, obs_batch, sort=False): batch = super().batchify(obs_batch, sort=sort) if batch.valid_indices is None: return batch batch.classifier_label = torch.tensor( [ [obs_batch[i].get('classifier_label_idx', -1)] for i in batch.valid_indices ] ) batch.is_ltr = torch.tensor( [[obs_batch[i].get('is_ltr', False)] for i in batch.valid_indices] ) return batch ``` ###Assistant : This method calls the parent class's batchify method and then add classifier_label and is_ltr property to the the batch. " 691,"def calculate_post_conv_height(height, kernel_size, stride, pad, n_convs): for _ in range(n_convs): height = (height - kernel_size + 2 * pad) // stride + 1 return height ",Height of spec after n convolutions with fixed kernel/stride/pad.,9,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def calculate_post_conv_height(height, kernel_size, stride, pad, n_convs): for _ in range(n_convs): height = (height - kernel_size + 2 * pad) // stride + 1 return height ``` ###Assistant : Height of spec after n convolutions with fixed kernel/stride/pad. " 692,"async def test_hls_playlist_view(hass, setup_component, hls_stream, stream_worker_sync): stream = create_stream(hass, STREAM_SOURCE, {}, dynamic_stream_settings()) stream_worker_sync.pause() hls = stream.add_provider(HLS_PROVIDER) for i in range(2): segment = Segment(sequence=i, duration=SEGMENT_DURATION) hls.put(segment) await hass.async_block_till_done() hls_client = await hls_stream(stream) resp = await hls_client.get(""/playlist.m3u8"") assert resp.status == HTTPStatus.OK assert await resp.text() == make_playlist( sequence=0, segments=[make_segment(0), make_segment(1)] ) segment = Segment(sequence=2, duration=SEGMENT_DURATION) hls.put(segment) await hass.async_block_till_done() resp = await hls_client.get(""/playlist.m3u8"") assert resp.status == HTTPStatus.OK assert await resp.text() == make_playlist( sequence=0, segments=[make_segment(0), make_segment(1), make_segment(2)] ) stream_worker_sync.resume() await stream.stop() ",Test rendering the hls playlist with 1 and 2 output segments.,11,76,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_hls_playlist_view(hass, setup_component, hls_stream, stream_worker_sync): stream = create_stream(hass, STREAM_SOURCE, {}, dynamic_stream_settings()) stream_worker_sync.pause() hls = stream.add_provider(HLS_PROVIDER) for i in range(2): segment = Segment(sequence=i, duration=SEGMENT_DURATION) hls.put(segment) await hass.async_block_till_done() hls_client = await hls_stream(stream) resp = await hls_client.get(""/playlist.m3u8"") assert resp.status == HTTPStatus.OK assert await resp.text() == make_playlist( sequence=0, segments=[make_segment(0), make_segment(1)] ) segment = Segment(sequence=2, duration=SEGMENT_DURATION) hls.put(segment) await hass.async_block_till_done() resp = await hls_client.get(""/playlist.m3u8"") assert resp.status == HTTPStatus.OK assert await resp.text() == make_playlist( sequence=0, segments=[make_segment(0), make_segment(1), make_segment(2)] ) stream_worker_sync.resume() await stream.stop() ``` ###Assistant : Test rendering the hls playlist with 1 and 2 output segments. " 693,"def _create_local_rank_map(self) -> Dict: rank_mapping = {} ip_dict = defaultdict(int) for world_rank in range(len(self.worker_group)): worker = self.worker_group.workers[world_rank] node_ip = worker.metadata.node_ip rank_mapping[world_rank] = ip_dict[node_ip] ip_dict[node_ip] += 1 return rank_mapping ","Create mapping from worker world_rank to local_rank. Example: Worker 0: 0.0.0.0 Worker 1: 0.0.0.0 Worker 2: 0.0.0.1 Worker 3: 0.0.0.0 Worker 4: 0.0.0.1 Workers 0, 1, 3 are on 0.0.0.0. Workers 2, 4 are on 0.0.0.1. Expected Output: { 0 -> 0, 1 -> 1, 2 -> 0, 3 -> 2, 4 -> 1 } ",55,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_local_rank_map(self) -> Dict: rank_mapping = {} ip_dict = defaultdict(int) for world_rank in range(len(self.worker_group)): worker = self.worker_group.workers[world_rank] node_ip = worker.metadata.node_ip rank_mapping[world_rank] = ip_dict[node_ip] ip_dict[node_ip] += 1 return rank_mapping ``` ###Assistant : Create mapping from worker world_rank to local_rank. Example: Worker 0: 0.0.0.0 Worker 1: 0.0.0.0 Worker 2: 0.0.0.1 Worker 3: 0.0.0.0 Worker 4: 0.0.0.1 Workers 0, 1, 3 are on 0.0.0.0. Workers 2, 4 are on 0.0.0.1. Expected Output: { 0 -> 0, 1 -> 1, 2 -> 0, 3 -> 2, 4 -> 1 } " 694,"def _output_groups(self) -> None: is_rename = self._args.sort_method != ""none"" logger.info(""Creating %s group folders in '%s'."", len(self._sorter.binned), self._args.output_dir) bin_names = [f""_{b}"" for b in self._sorter.bin_names] if is_rename: bin_names = [f""{name}_by_{self._args.sort_method}"" for name in bin_names] for name in bin_names: folder = os.path.join(self._args.output_dir, name) if os.path.exists(folder): rmtree(folder) os.makedirs(folder) description = f""{'Copying' if self._args.keep_original else 'Moving'} into groups"" description += "" and renaming"" if is_rename else """" pbar = tqdm(range(len(self._sorter.sorted_filelist)), desc=description, file=sys.stdout, leave=False) idx = 0 for bin_id, bin_ in enumerate(self._sorter.binned): pbar.set_description(f""{description}: Bin {bin_id + 1} of {len(self._sorter.binned)}"") output_path = os.path.join(self._args.output_dir, bin_names[bin_id]) if not bin_: logger.debug(""Removing empty bin: %s"", output_path) os.rmdir(output_path) for source in bin_: basename = os.path.basename(source) dst_name = f""{idx:06d}_{basename}"" if is_rename else basename dest = os.path.join(output_path, dst_name) self._sort_file(source, dest) idx += 1 pbar.update(1) # Output methods"," Move the files to folders. Obtains the bins and original filenames from :attr:`_sorter` and outputs into appropriate bins in the output location ",22,124,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _output_groups(self) -> None: is_rename = self._args.sort_method != ""none"" logger.info(""Creating %s group folders in '%s'."", len(self._sorter.binned), self._args.output_dir) bin_names = [f""_{b}"" for b in self._sorter.bin_names] if is_rename: bin_names = [f""{name}_by_{self._args.sort_method}"" for name in bin_names] for name in bin_names: folder = os.path.join(self._args.output_dir, name) if os.path.exists(folder): rmtree(folder) os.makedirs(folder) description = f""{'Copying' if self._args.keep_original else 'Moving'} into groups"" description += "" and renaming"" if is_rename else """" pbar = tqdm(range(len(self._sorter.sorted_filelist)), desc=description, file=sys.stdout, leave=False) idx = 0 for bin_id, bin_ in enumerate(self._sorter.binned): pbar.set_description(f""{description}: Bin {bin_id + 1} of {len(self._sorter.binned)}"") output_path = os.path.join(self._args.output_dir, bin_names[bin_id]) if not bin_: logger.debug(""Removing empty bin: %s"", output_path) os.rmdir(output_path) for source in bin_: basename = os.path.basename(source) dst_name = f""{idx:06d}_{basename}"" if is_rename else basename dest = os.path.join(output_path, dst_name) self._sort_file(source, dest) idx += 1 pbar.update(1) # Output methods ``` ###Assistant : Move the files to folders. Obtains the bins and original filenames from :attr:`_sorter` and outputs into appropriate bins in the output location " 695,"def _setSharedLibraryRPATHElf(filename, rpath): # TODO: Might write something that makes a shell script replacement # in case no rpath is present, or use patchelf, for now our use # case seems to use rpaths for executables. # patchelf --set-rpath ""$ORIGIN/path/to/library"" with withEnvironmentVarOverriden(""LANG"", ""C""): executeToolChecked( logger=postprocessing_logger, command=(""patchelf"", ""--set-rpath"", rpath, filename), stderr_filter=_filterPatchelfErrorOutput, absence_message=, ) ","\ Error, needs 'patchelf' on your system, due to 'RPATH' settings that need to be set.",16,53,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _setSharedLibraryRPATHElf(filename, rpath): # TODO: Might write something that makes a shell script replacement # in case no rpath is present, or use patchelf, for now our use # case seems to use rpaths for executables. # patchelf --set-rpath ""$ORIGIN/path/to/library"" with withEnvironmentVarOverriden(""LANG"", ""C""): executeToolChecked( logger=postprocessing_logger, command=(""patchelf"", ""--set-rpath"", rpath, filename), stderr_filter=_filterPatchelfErrorOutput, absence_message=, ) ``` ###Assistant : \ Error, needs 'patchelf' on your system, due to 'RPATH' settings that need to be set. " 696,"def _get_columns(self): if self._columns_cache is None: self._columns_cache, column_widths = self._compute_axis_labels_and_lengths( 1 ) if self._column_widths_cache is None: self._column_widths_cache = column_widths return self._columns_cache "," Get the columns from the cache object. Returns ------- pandas.Index An index object containing the column labels. ",17,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_columns(self): if self._columns_cache is None: self._columns_cache, column_widths = self._compute_axis_labels_and_lengths( 1 ) if self._column_widths_cache is None: self._column_widths_cache = column_widths return self._columns_cache ``` ###Assistant : Get the columns from the cache object. Returns ------- pandas.Index An index object containing the column labels. " 697,"async def async_update(self) -> None: # Update values from controller's device dictionary self._connected = self._controller.is_connected self._current_temp = self._controller.get_temperature(self._device_id) self._fan_speed = self._controller.get_fan_speed(self._device_id) self._power = self._controller.is_on(self._device_id) self._min_temp = self._controller.get_min_setpoint(self._device_id) self._max_temp = self._controller.get_max_setpoint(self._device_id) self._rssi = self._controller.get_rssi(self._device_id) self._run_hours = self._controller.get_run_hours(self._device_id) self._target_temp = self._controller.get_setpoint(self._device_id) self._outdoor_temp = self._controller.get_outdoor_temperature(self._device_id) # Operation mode mode = self._controller.get_mode(self._device_id) self._hvac_mode = MAP_IH_TO_HVAC_MODE.get(mode) # Preset mode preset = self._controller.get_preset_mode(self._device_id) self._preset = MAP_IH_TO_PRESET_MODE.get(preset) # Swing mode # Climate module only supports one swing setting. self._vvane = self._controller.get_vertical_swing(self._device_id) self._hvane = self._controller.get_horizontal_swing(self._device_id) # Power usage self._power_consumption_heat = self._controller.get_heat_power_consumption( self._device_id ) self._power_consumption_cool = self._controller.get_cool_power_consumption( self._device_id ) ",Copy values from controller dictionary to climate device.,8,90,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_update(self) -> None: # Update values from controller's device dictionary self._connected = self._controller.is_connected self._current_temp = self._controller.get_temperature(self._device_id) self._fan_speed = self._controller.get_fan_speed(self._device_id) self._power = self._controller.is_on(self._device_id) self._min_temp = self._controller.get_min_setpoint(self._device_id) self._max_temp = self._controller.get_max_setpoint(self._device_id) self._rssi = self._controller.get_rssi(self._device_id) self._run_hours = self._controller.get_run_hours(self._device_id) self._target_temp = self._controller.get_setpoint(self._device_id) self._outdoor_temp = self._controller.get_outdoor_temperature(self._device_id) # Operation mode mode = self._controller.get_mode(self._device_id) self._hvac_mode = MAP_IH_TO_HVAC_MODE.get(mode) # Preset mode preset = self._controller.get_preset_mode(self._device_id) self._preset = MAP_IH_TO_PRESET_MODE.get(preset) # Swing mode # Climate module only supports one swing setting. self._vvane = self._controller.get_vertical_swing(self._device_id) self._hvane = self._controller.get_horizontal_swing(self._device_id) # Power usage self._power_consumption_heat = self._controller.get_heat_power_consumption( self._device_id ) self._power_consumption_cool = self._controller.get_cool_power_consumption( self._device_id ) ``` ###Assistant : Copy values from controller dictionary to climate device. " 698,"def test_kb_valid_entities(nlp): mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3) # adding entities mykb.add_entity(entity=""Q1"", freq=19, entity_vector=[8, 4, 3]) mykb.add_entity(entity=""Q2"", freq=5, entity_vector=[2, 1, 0]) mykb.add_entity(entity=""Q3"", freq=25, entity_vector=[-1, -6, 5]) # adding aliases mykb.add_alias(alias=""douglas"", entities=[""Q2"", ""Q3""], probabilities=[0.8, 0.2]) mykb.add_alias(alias=""adam"", entities=[""Q2""], probabilities=[0.9]) # test the size of the corresponding KB assert mykb.get_size_entities() == 3 assert mykb.get_size_aliases() == 2 # test retrieval of the entity vectors assert mykb.get_vector(""Q1"") == [8, 4, 3] assert mykb.get_vector(""Q2"") == [2, 1, 0] assert mykb.get_vector(""Q3"") == [-1, -6, 5] # test retrieval of prior probabilities assert_almost_equal(mykb.get_prior_prob(entity=""Q2"", alias=""douglas""), 0.8) assert_almost_equal(mykb.get_prior_prob(entity=""Q3"", alias=""douglas""), 0.2) assert_almost_equal(mykb.get_prior_prob(entity=""Q342"", alias=""douglas""), 0.0) assert_almost_equal(mykb.get_prior_prob(entity=""Q3"", alias=""douglassssss""), 0.0) ",Test the valid construction of a KB with 3 entities and two aliases,13,94,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_kb_valid_entities(nlp): mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3) # adding entities mykb.add_entity(entity=""Q1"", freq=19, entity_vector=[8, 4, 3]) mykb.add_entity(entity=""Q2"", freq=5, entity_vector=[2, 1, 0]) mykb.add_entity(entity=""Q3"", freq=25, entity_vector=[-1, -6, 5]) # adding aliases mykb.add_alias(alias=""douglas"", entities=[""Q2"", ""Q3""], probabilities=[0.8, 0.2]) mykb.add_alias(alias=""adam"", entities=[""Q2""], probabilities=[0.9]) # test the size of the corresponding KB assert mykb.get_size_entities() == 3 assert mykb.get_size_aliases() == 2 # test retrieval of the entity vectors assert mykb.get_vector(""Q1"") == [8, 4, 3] assert mykb.get_vector(""Q2"") == [2, 1, 0] assert mykb.get_vector(""Q3"") == [-1, -6, 5] # test retrieval of prior probabilities assert_almost_equal(mykb.get_prior_prob(entity=""Q2"", alias=""douglas""), 0.8) assert_almost_equal(mykb.get_prior_prob(entity=""Q3"", alias=""douglas""), 0.2) assert_almost_equal(mykb.get_prior_prob(entity=""Q342"", alias=""douglas""), 0.0) assert_almost_equal(mykb.get_prior_prob(entity=""Q3"", alias=""douglassssss""), 0.0) ``` ###Assistant : Test the valid construction of a KB with 3 entities and two aliases " 699,"def eye(N, M=None, k=0, dtype=float, order='C', *, like=None): if like is not None: return _eye_with_like(N, M=M, k=k, dtype=dtype, order=order, like=like) if M is None: M = N m = zeros((N, M), dtype=dtype, order=order) if k >= M: return m # Ensure M and k are integers, so we don't get any surprise casting # results in the expressions `M-k` and `M+1` used below. This avoids # a problem with inputs with type (for example) np.uint64. M = operator.index(M) k = operator.index(k) if k >= 0: i = k else: i = (-k) * M m[:M-k].flat[i::M+1] = 1 return m _eye_with_like = array_function_dispatch( _eye_dispatcher )(eye) "," Return a 2-D array with ones on the diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the output. M : int, optional Number of columns in the output. If None, defaults to `N`. k : int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. order : {'C', 'F'}, optional Whether the output should be stored in row-major (C-style) or column-major (Fortran-style) order in memory. .. versionadded:: 1.14.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 Returns ------- I : ndarray of shape (N,M) An array where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. See Also -------- identity : (almost) equivalent function diag : diagonal 2-D array from a 1-D array specified by the user. Examples -------- >>> np.eye(2, dtype=int) array([[1, 0], [0, 1]]) >>> np.eye(3, k=1) array([[0., 1., 0.], [0., 0., 1.], [0., 0., 0.]]) ",176,104,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def eye(N, M=None, k=0, dtype=float, order='C', *, like=None): if like is not None: return _eye_with_like(N, M=M, k=k, dtype=dtype, order=order, like=like) if M is None: M = N m = zeros((N, M), dtype=dtype, order=order) if k >= M: return m # Ensure M and k are integers, so we don't get any surprise casting # results in the expressions `M-k` and `M+1` used below. This avoids # a problem with inputs with type (for example) np.uint64. M = operator.index(M) k = operator.index(k) if k >= 0: i = k else: i = (-k) * M m[:M-k].flat[i::M+1] = 1 return m _eye_with_like = array_function_dispatch( _eye_dispatcher )(eye) ``` ###Assistant : Return a 2-D array with ones on the diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the output. M : int, optional Number of columns in the output. If None, defaults to `N`. k : int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. order : {'C', 'F'}, optional Whether the output should be stored in row-major (C-style) or column-major (Fortran-style) order in memory. .. versionadded:: 1.14.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 Returns ------- I : ndarray of shape (N,M) An array where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. See Also -------- identity : (almost) equivalent function diag : diagonal 2-D array from a 1-D array specified by the user. Examples -------- >>> np.eye(2, dtype=int) array([[1, 0], [0, 1]]) >>> np.eye(3, k=1) array([[0., 1., 0.], [0., 0., 1.], [0., 0., 0.]]) " 700,"def _clean_configuration_value(cls, item_type, new_value): if ( item_type == ConfigurationTypeField.BOOLEAN and new_value and not isinstance(new_value, bool) ): new_value = new_value.lower() == ""true"" if item_type == ConfigurationTypeField.OUTPUT: # OUTPUT field is read only. No need to update it return return new_value ","Clean the value that is saved in plugin configuration. Change the string provided as boolean into the bool value. Return None for Output type, as it's read only field. ",29,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _clean_configuration_value(cls, item_type, new_value): if ( item_type == ConfigurationTypeField.BOOLEAN and new_value and not isinstance(new_value, bool) ): new_value = new_value.lower() == ""true"" if item_type == ConfigurationTypeField.OUTPUT: # OUTPUT field is read only. No need to update it return return new_value ``` ###Assistant : Clean the value that is saved in plugin configuration. Change the string provided as boolean into the bool value. Return None for Output type, as it's read only field. " 701,"def coord_map_from_to(top_from, top_to): # We need to find a common ancestor of top_from and top_to. # We'll assume that all ancestors are equivalent here (otherwise the graph # is an inconsistent state (which we could improve this to check for)). # For now use a brute-force algorithm. "," Determine the coordinate mapping betweeen a top (from) and a top (to). Walk the graph to find a common ancestor while composing the coord maps for from and to until they meet. As a last step the from map is inverted. ",41,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def coord_map_from_to(top_from, top_to): # We need to find a common ancestor of top_from and top_to. # We'll assume that all ancestors are equivalent here (otherwise the graph # is an inconsistent state (which we could improve this to check for)). # For now use a brute-force algorithm. ``` ###Assistant : Determine the coordinate mapping betweeen a top (from) and a top (to). Walk the graph to find a common ancestor while composing the coord maps for from and to until they meet. As a last step the from map is inverted. " 702,"def get_conditions_to_validate_future_sle(sl_entries): warehouse_items_map = {} for entry in sl_entries: if entry.warehouse not in warehouse_items_map: warehouse_items_map[entry.warehouse] = set() warehouse_items_map[entry.warehouse].add(entry.item_code) or_conditions = [] for warehouse, items in warehouse_items_map.items(): or_conditions.append( f ) return or_conditions ","warehouse = {frappe.db.escape(warehouse)} and item_code in ({', '.join(frappe.db.escape(item) for item in items)})",12,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_conditions_to_validate_future_sle(sl_entries): warehouse_items_map = {} for entry in sl_entries: if entry.warehouse not in warehouse_items_map: warehouse_items_map[entry.warehouse] = set() warehouse_items_map[entry.warehouse].add(entry.item_code) or_conditions = [] for warehouse, items in warehouse_items_map.items(): or_conditions.append( f ) return or_conditions ``` ###Assistant : warehouse = {frappe.db.escape(warehouse)} and item_code in ({', '.join(frappe.db.escape(item) for item in items)}) " 703,"def split_dataset(items, eval_split_max_size=None, eval_split_size=0.01): speakers = [item[""speaker_name""] for item in items] is_multi_speaker = len(set(speakers)) > 1 if eval_split_size > 1: eval_split_size = int(eval_split_size) else: if eval_split_max_size: eval_split_size = min(eval_split_max_size, int(len(items) * eval_split_size)) else: eval_split_size = int(len(items) * eval_split_size) assert ( eval_split_size > 0 ), "" [!] You do not have enough samples for the evaluation set. You can work around this setting the 'eval_split_size' parameter to a minimum of {}"".format( 1 / len(items) ) np.random.seed(0) np.random.shuffle(items) if is_multi_speaker: items_eval = [] speakers = [item[""speaker_name""] for item in items] speaker_counter = Counter(speakers) while len(items_eval) < eval_split_size: item_idx = np.random.randint(0, len(items)) speaker_to_be_removed = items[item_idx][""speaker_name""] if speaker_counter[speaker_to_be_removed] > 1: items_eval.append(items[item_idx]) speaker_counter[speaker_to_be_removed] -= 1 del items[item_idx] return items_eval, items return items[:eval_split_size], items[eval_split_size:] ","Split a dataset into train and eval. Consider speaker distribution in multi-speaker training. Args: <<<<<<< HEAD items (List[List]): A list of samples. Each sample is a list of `[audio_path, text, speaker_id]`. eval_split_max_size (int): Number maximum of samples to be used for evaluation in proportion split. Defaults to None (Disabled). eval_split_size (float): If between 0.0 and 1.0 represents the proportion of the dataset to include in the evaluation set. If > 1, represents the absolute number of evaluation samples. Defaults to 0.01 (1%). ======= items (List[List]): A list of samples. Each sample is a list of `[text, audio_path, speaker_id]`. >>>>>>> Fix docstring ",101,118,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def split_dataset(items, eval_split_max_size=None, eval_split_size=0.01): speakers = [item[""speaker_name""] for item in items] is_multi_speaker = len(set(speakers)) > 1 if eval_split_size > 1: eval_split_size = int(eval_split_size) else: if eval_split_max_size: eval_split_size = min(eval_split_max_size, int(len(items) * eval_split_size)) else: eval_split_size = int(len(items) * eval_split_size) assert ( eval_split_size > 0 ), "" [!] You do not have enough samples for the evaluation set. You can work around this setting the 'eval_split_size' parameter to a minimum of {}"".format( 1 / len(items) ) np.random.seed(0) np.random.shuffle(items) if is_multi_speaker: items_eval = [] speakers = [item[""speaker_name""] for item in items] speaker_counter = Counter(speakers) while len(items_eval) < eval_split_size: item_idx = np.random.randint(0, len(items)) speaker_to_be_removed = items[item_idx][""speaker_name""] if speaker_counter[speaker_to_be_removed] > 1: items_eval.append(items[item_idx]) speaker_counter[speaker_to_be_removed] -= 1 del items[item_idx] return items_eval, items return items[:eval_split_size], items[eval_split_size:] ``` ###Assistant : Split a dataset into train and eval. Consider speaker distribution in multi-speaker training. Args: <<<<<<< HEAD items (List[List]): A list of samples. Each sample is a list of `[audio_path, text, speaker_id]`. eval_split_max_size (int): Number maximum of samples to be used for evaluation in proportion split. Defaults to None (Disabled). eval_split_size (float): If between 0.0 and 1.0 represents the proportion of the dataset to include in the evaluation set. If > 1, represents the absolute number of evaluation samples. Defaults to 0.01 (1%). ======= items (List[List]): A list of samples. Each sample is a list of `[text, audio_path, speaker_id]`. >>>>>>> Fix docstring " 704,"def test_quarantine_media(self) -> None: media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None self.assertFalse(media_info[""quarantined_by""]) # quarantining channel = self.make_request( ""POST"", self.url % (""quarantine"", self.server_name, self.media_id), access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertFalse(channel.json_body) media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None self.assertTrue(media_info[""quarantined_by""]) # remove from quarantine channel = self.make_request( ""POST"", self.url % (""unquarantine"", self.server_name, self.media_id), access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertFalse(channel.json_body) media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None self.assertFalse(media_info[""quarantined_by""]) "," Tests that quarantining and remove from quarantine a media is successfully ",11,67,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_quarantine_media(self) -> None: media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None self.assertFalse(media_info[""quarantined_by""]) # quarantining channel = self.make_request( ""POST"", self.url % (""quarantine"", self.server_name, self.media_id), access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertFalse(channel.json_body) media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None self.assertTrue(media_info[""quarantined_by""]) # remove from quarantine channel = self.make_request( ""POST"", self.url % (""unquarantine"", self.server_name, self.media_id), access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertFalse(channel.json_body) media_info = self.get_success(self.store.get_local_media(self.media_id)) assert media_info is not None self.assertFalse(media_info[""quarantined_by""]) ``` ###Assistant : Tests that quarantining and remove from quarantine a media is successfully " 705,"def naive_greedy_modularity_communities(G, resolution=1, weight=None): r # First create one community for each node communities = list(frozenset([u]) for u in G.nodes()) # Track merges merges = [] # Greedily merge communities until no improvement is possible old_modularity = None new_modularity = modularity(G, communities, resolution=resolution, weight=weight) while old_modularity is None or new_modularity > old_modularity: # Save modularity for comparison old_modularity = new_modularity # Find best pair to merge trial_communities = list(communities) to_merge = None for i, u in enumerate(communities): for j, v in enumerate(communities): # Skip i==j and empty communities if j <= i or len(u) == 0 or len(v) == 0: continue # Merge communities u and v trial_communities[j] = u | v trial_communities[i] = frozenset([]) trial_modularity = modularity( G, trial_communities, resolution=resolution, weight=weight ) if trial_modularity >= new_modularity: # Check if strictly better or tie if trial_modularity > new_modularity: # Found new best, save modularity and group indexes new_modularity = trial_modularity to_merge = (i, j, new_modularity - old_modularity) elif to_merge and min(i, j) < min(to_merge[0], to_merge[1]): # Break ties by choosing pair with lowest min id new_modularity = trial_modularity to_merge = (i, j, new_modularity - old_modularity) # Un-merge trial_communities[i] = u trial_communities[j] = v if to_merge is not None: # If the best merge improves modularity, use it merges.append(to_merge) i, j, dq = to_merge u, v = communities[i], communities[j] communities[j] = u | v communities[i] = frozenset([]) # Remove empty communities and sort return sorted((c for c in communities if len(c) > 0), key=len, reverse=True) # old name _naive_greedy_modularity_communities = naive_greedy_modularity_communities ","Find communities in G using greedy modularity maximization. This implementation is O(n^4), much slower than alternatives, but it is provided as an easy-to-understand reference implementation. Greedy modularity maximization begins with each node in its own community and joins the pair of communities that most increases modularity until no such pair exists. This function maximizes the generalized modularity, where `resolution` is the resolution parameter, often expressed as $\gamma$. See :func:`~networkx.algorithms.community.quality.modularity`. Parameters ---------- G : NetworkX graph resolution : float (default=1) If resolution is less than 1, modularity favors larger communities. Greater than 1 favors smaller communities. weight : string or None, optional (default=None) The name of an edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. Returns ------- list A list of sets of nodes, one for each community. Sorted by length with largest communities first. Examples -------- >>> from networkx.algorithms.community import \ ... naive_greedy_modularity_communities >>> G = nx.karate_club_graph() >>> c = naive_greedy_modularity_communities(G) >>> sorted(c[0]) [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] See Also -------- greedy_modularity_communities modularity ",199,250,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def naive_greedy_modularity_communities(G, resolution=1, weight=None): r # First create one community for each node communities = list(frozenset([u]) for u in G.nodes()) # Track merges merges = [] # Greedily merge communities until no improvement is possible old_modularity = None new_modularity = modularity(G, communities, resolution=resolution, weight=weight) while old_modularity is None or new_modularity > old_modularity: # Save modularity for comparison old_modularity = new_modularity # Find best pair to merge trial_communities = list(communities) to_merge = None for i, u in enumerate(communities): for j, v in enumerate(communities): # Skip i==j and empty communities if j <= i or len(u) == 0 or len(v) == 0: continue # Merge communities u and v trial_communities[j] = u | v trial_communities[i] = frozenset([]) trial_modularity = modularity( G, trial_communities, resolution=resolution, weight=weight ) if trial_modularity >= new_modularity: # Check if strictly better or tie if trial_modularity > new_modularity: # Found new best, save modularity and group indexes new_modularity = trial_modularity to_merge = (i, j, new_modularity - old_modularity) elif to_merge and min(i, j) < min(to_merge[0], to_merge[1]): # Break ties by choosing pair with lowest min id new_modularity = trial_modularity to_merge = (i, j, new_modularity - old_modularity) # Un-merge trial_communities[i] = u trial_communities[j] = v if to_merge is not None: # If the best merge improves modularity, use it merges.append(to_merge) i, j, dq = to_merge u, v = communities[i], communities[j] communities[j] = u | v communities[i] = frozenset([]) # Remove empty communities and sort return sorted((c for c in communities if len(c) > 0), key=len, reverse=True) # old name _naive_greedy_modularity_communities = naive_greedy_modularity_communities ``` ###Assistant : Find communities in G using greedy modularity maximization. This implementation is O(n^4), much slower than alternatives, but it is provided as an easy-to-understand reference implementation. Greedy modularity maximization begins with each node in its own community and joins the pair of communities that most increases modularity until no such pair exists. This function maximizes the generalized modularity, where `resolution` is the resolution parameter, often expressed as $\gamma$. See :func:`~networkx.algorithms.community.quality.modularity`. Parameters ---------- G : NetworkX graph resolution : float (default=1) If resolution is less than 1, modularity favors larger communities. Greater than 1 favors smaller communities. weight : string or None, optional (default=None) The name of an edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. Returns ------- list A list of sets of nodes, one for each community. Sorted by length with largest communities first. Examples -------- >>> from networkx.algorithms.community import \ ... naive_greedy_modularity_communities >>> G = nx.karate_club_graph() >>> c = naive_greedy_modularity_communities(G) >>> sorted(c[0]) [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] See Also -------- greedy_modularity_communities modularity " 706,"def invoke(self) -> Generator[PowerShell, None, None]: logger = copy(self.log) logger.setLevel(self._logging_level) local_context = self._conn is None if local_context: self.__enter__() try: assert self._conn is not None ps = PowerShell(self._conn) yield ps ps.begin_invoke() streams = [ ps.output, ps.streams.debug, ps.streams.error, ps.streams.information, ps.streams.progress, ps.streams.verbose, ps.streams.warning, ] offsets = [0 for _ in streams] # We're using polling to make sure output and streams are # handled while the process is running. while ps.state == PSInvocationState.RUNNING: ps.poll_invoke(timeout=self._operation_timeout) for i, stream in enumerate(streams): offset = offsets[i] while len(stream) > offset: record = stream[offset] # Records received on the output stream during job # status polling are handled via an optional callback, # while the other streams are simply logged. if stream is ps.output: if self._on_output_callback is not None: self._on_output_callback(record) else: self._log_record(logger.log, record) offset += 1 offsets[i] = offset # For good measure, we'll make sure the process has # stopped running in any case. ps.end_invoke() self.log.info(""Invocation state: %s"", str(PSInvocationState(ps.state))) if ps.streams.error: raise AirflowException(""Process had one or more errors"") finally: if local_context: self.__exit__(None, None, None) "," Context manager that yields a PowerShell object to which commands can be added. Upon exit, the commands will be invoked. ",20,167,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def invoke(self) -> Generator[PowerShell, None, None]: logger = copy(self.log) logger.setLevel(self._logging_level) local_context = self._conn is None if local_context: self.__enter__() try: assert self._conn is not None ps = PowerShell(self._conn) yield ps ps.begin_invoke() streams = [ ps.output, ps.streams.debug, ps.streams.error, ps.streams.information, ps.streams.progress, ps.streams.verbose, ps.streams.warning, ] offsets = [0 for _ in streams] # We're using polling to make sure output and streams are # handled while the process is running. while ps.state == PSInvocationState.RUNNING: ps.poll_invoke(timeout=self._operation_timeout) for i, stream in enumerate(streams): offset = offsets[i] while len(stream) > offset: record = stream[offset] # Records received on the output stream during job # status polling are handled via an optional callback, # while the other streams are simply logged. if stream is ps.output: if self._on_output_callback is not None: self._on_output_callback(record) else: self._log_record(logger.log, record) offset += 1 offsets[i] = offset # For good measure, we'll make sure the process has # stopped running in any case. ps.end_invoke() self.log.info(""Invocation state: %s"", str(PSInvocationState(ps.state))) if ps.streams.error: raise AirflowException(""Process had one or more errors"") finally: if local_context: self.__exit__(None, None, None) ``` ###Assistant : Context manager that yields a PowerShell object to which commands can be added. Upon exit, the commands will be invoked. " 707,"def util_call_before_task_publish_handler(self, headers_to_use, body_to_use): self.assertEqual(PaperlessTask.objects.all().count(), 0) before_task_publish_handler(headers=headers_to_use, body=body_to_use) self.assertEqual(PaperlessTask.objects.all().count(), 1) "," Simple utility to call the pre-run handle and ensure it created a single task instance ",15,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def util_call_before_task_publish_handler(self, headers_to_use, body_to_use): self.assertEqual(PaperlessTask.objects.all().count(), 0) before_task_publish_handler(headers=headers_to_use, body=body_to_use) self.assertEqual(PaperlessTask.objects.all().count(), 1) ``` ###Assistant : Simple utility to call the pre-run handle and ensure it created a single task instance " 708,"def test_form(self): form = self.EventPageForm(instance=self.event_page) self.assertIn(""comments"", form.formsets) comments_formset = form.formsets[""comments""] self.assertEqual(len(comments_formset.forms), 1) self.assertEqual(comments_formset.forms[0].user, self.commenting_user) replies_formset = comments_formset.forms[0].formsets[""replies""] self.assertEqual(len(replies_formset.forms), 2) self.assertEqual(replies_formset.forms[0].user, self.commenting_user) "," Check that the form has the comments/replies formsets, and that the user has been set on each CommentForm/CommentReplyForm subclass ",19,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_form(self): form = self.EventPageForm(instance=self.event_page) self.assertIn(""comments"", form.formsets) comments_formset = form.formsets[""comments""] self.assertEqual(len(comments_formset.forms), 1) self.assertEqual(comments_formset.forms[0].user, self.commenting_user) replies_formset = comments_formset.forms[0].formsets[""replies""] self.assertEqual(len(replies_formset.forms), 2) self.assertEqual(replies_formset.forms[0].user, self.commenting_user) ``` ###Assistant : Check that the form has the comments/replies formsets, and that the user has been set on each CommentForm/CommentReplyForm subclass " 709,"def _doc(self, doc_type, default, lang=""eng""): corpus = self._wordnet_corpus_reader if lang not in corpus.langs(): return None elif lang == ""eng"": return default else: corpus._load_lang_data(lang) of = corpus.ss2of(self) i = corpus.lg_attrs.index(doc_type) if of in corpus._lang_data[lang][i]: return corpus._lang_data[lang][i][of] else: return None ",Helper method for Synset.definition and Synset.examples,6,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _doc(self, doc_type, default, lang=""eng""): corpus = self._wordnet_corpus_reader if lang not in corpus.langs(): return None elif lang == ""eng"": return default else: corpus._load_lang_data(lang) of = corpus.ss2of(self) i = corpus.lg_attrs.index(doc_type) if of in corpus._lang_data[lang][i]: return corpus._lang_data[lang][i][of] else: return None ``` ###Assistant : Helper method for Synset.definition and Synset.examples " 710,"def map(self, mapper): new_categories = self.categories.map(mapper) try: return self.from_codes( self._codes.copy(), categories=new_categories, ordered=self.ordered ) except ValueError: # NA values are represented in self._codes with -1 # np.take causes NA values to take final element in new_categories if np.any(self._codes == -1): new_categories = new_categories.insert(len(new_categories), np.nan) return np.take(new_categories, self._codes) __eq__ = _cat_compare_op(operator.eq) __ne__ = _cat_compare_op(operator.ne) __lt__ = _cat_compare_op(operator.lt) __gt__ = _cat_compare_op(operator.gt) __le__ = _cat_compare_op(operator.le) __ge__ = _cat_compare_op(operator.ge) # ------------------------------------------------------------- # Validators; ideally these can be de-duplicated "," Map categories using an input mapping or function. Maps the categories to new categories. If the mapping correspondence is one-to-one the result is a :class:`~pandas.Categorical` which has the same order property as the original, otherwise a :class:`~pandas.Index` is returned. NaN values are unaffected. If a `dict` or :class:`~pandas.Series` is used any unmapped category is mapped to `NaN`. Note that if this happens an :class:`~pandas.Index` will be returned. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. Returns ------- pandas.Categorical or pandas.Index Mapped categorical. See Also -------- CategoricalIndex.map : Apply a mapping correspondence on a :class:`~pandas.CategoricalIndex`. Index.map : Apply a mapping correspondence on an :class:`~pandas.Index`. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Series.apply : Apply more complex functions on a :class:`~pandas.Series`. Examples -------- >>> cat = pd.Categorical(['a', 'b', 'c']) >>> cat ['a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> cat.map(lambda x: x.upper()) ['A', 'B', 'C'] Categories (3, object): ['A', 'B', 'C'] >>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'}) ['first', 'second', 'third'] Categories (3, object): ['first', 'second', 'third'] If the mapping is one-to-one the ordering of the categories is preserved: >>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True) >>> cat ['a', 'b', 'c'] Categories (3, object): ['a' < 'b' < 'c'] >>> cat.map({'a': 3, 'b': 2, 'c': 1}) [3, 2, 1] Categories (3, int64): [3 < 2 < 1] If the mapping is not one-to-one an :class:`~pandas.Index` is returned: >>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'}) Index(['first', 'second', 'first'], dtype='object') If a `dict` is used, all unmapped categories are mapped to `NaN` and the result is an :class:`~pandas.Index`: >>> cat.map({'a': 'first', 'b': 'second'}) Index(['first', 'second', nan], dtype='object') ",269,73,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def map(self, mapper): new_categories = self.categories.map(mapper) try: return self.from_codes( self._codes.copy(), categories=new_categories, ordered=self.ordered ) except ValueError: # NA values are represented in self._codes with -1 # np.take causes NA values to take final element in new_categories if np.any(self._codes == -1): new_categories = new_categories.insert(len(new_categories), np.nan) return np.take(new_categories, self._codes) __eq__ = _cat_compare_op(operator.eq) __ne__ = _cat_compare_op(operator.ne) __lt__ = _cat_compare_op(operator.lt) __gt__ = _cat_compare_op(operator.gt) __le__ = _cat_compare_op(operator.le) __ge__ = _cat_compare_op(operator.ge) # ------------------------------------------------------------- # Validators; ideally these can be de-duplicated ``` ###Assistant : Map categories using an input mapping or function. Maps the categories to new categories. If the mapping correspondence is one-to-one the result is a :class:`~pandas.Categorical` which has the same order property as the original, otherwise a :class:`~pandas.Index` is returned. NaN values are unaffected. If a `dict` or :class:`~pandas.Series` is used any unmapped category is mapped to `NaN`. Note that if this happens an :class:`~pandas.Index` will be returned. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. Returns ------- pandas.Categorical or pandas.Index Mapped categorical. See Also -------- CategoricalIndex.map : Apply a mapping correspondence on a :class:`~pandas.CategoricalIndex`. Index.map : Apply a mapping correspondence on an :class:`~pandas.Index`. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Series.apply : Apply more complex functions on a :class:`~pandas.Series`. Examples -------- >>> cat = pd.Categorical(['a', 'b', 'c']) >>> cat ['a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> cat.map(lambda x: x.upper()) ['A', 'B', 'C'] Categories (3, object): ['A', 'B', 'C'] >>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'}) ['first', 'second', 'third'] Categories (3, object): ['first', 'second', 'third'] If the mapping is one-to-one the ordering of the categories is preserved: >>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True) >>> cat ['a', 'b', 'c'] Categories (3, object): ['a' < 'b' < 'c'] >>> cat.map({'a': 3, 'b': 2, 'c': 1}) [3, 2, 1] Categories (3, int64): [3 < 2 < 1] If the mapping is not one-to-one an :class:`~pandas.Index` is returned: >>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'}) Index(['first', 'second', 'first'], dtype='object') If a `dict` is used, all unmapped categories are mapped to `NaN` and the result is an :class:`~pandas.Index`: >>> cat.map({'a': 'first', 'b': 'second'}) Index(['first', 'second', nan], dtype='object') " 711,"def to_pickle_distributed(cls, qc, **kwargs): if not ( isinstance(kwargs[""filepath_or_buffer""], str) and ""*"" in kwargs[""filepath_or_buffer""] ) or not isinstance(qc, PandasQueryCompiler): warnings.warn(""Defaulting to Modin core implementation"") return PandasOnUnidistIO.to_pickle(qc, **kwargs) "," When `*` in the filename all partitions are written to their own separate file. The filenames is determined as follows: - if `*` in the filename then it will be replaced by the increasing sequence 0, 1, 2, … - if `*` is not the filename, then will be used default implementation. Examples #1: 4 partitions and input filename=""partition*.pkl.gz"", then filenames will be: `partition0.pkl.gz`, `partition1.pkl.gz`, `partition2.pkl.gz`, `partition3.pkl.gz`. Parameters ---------- qc : BaseQueryCompiler The query compiler of the Modin dataframe that we want to run ``to_pickle_distributed`` on. **kwargs : dict Parameters for ``pandas.to_pickle(**kwargs)``. ",92,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def to_pickle_distributed(cls, qc, **kwargs): if not ( isinstance(kwargs[""filepath_or_buffer""], str) and ""*"" in kwargs[""filepath_or_buffer""] ) or not isinstance(qc, PandasQueryCompiler): warnings.warn(""Defaulting to Modin core implementation"") return PandasOnUnidistIO.to_pickle(qc, **kwargs) ``` ###Assistant : When `*` in the filename all partitions are written to their own separate file. The filenames is determined as follows: - if `*` in the filename then it will be replaced by the increasing sequence 0, 1, 2, … - if `*` is not the filename, then will be used default implementation. Examples #1: 4 partitions and input filename=""partition*.pkl.gz"", then filenames will be: `partition0.pkl.gz`, `partition1.pkl.gz`, `partition2.pkl.gz`, `partition3.pkl.gz`. Parameters ---------- qc : BaseQueryCompiler The query compiler of the Modin dataframe that we want to run ``to_pickle_distributed`` on. **kwargs : dict Parameters for ``pandas.to_pickle(**kwargs)``. " 712,"def project_state(self, nodes=None, at_end=True): return self.graph.make_state( nodes=nodes, at_end=at_end, real_apps=self.unmigrated_apps ) "," Return a ProjectState object representing the most recent state that the loaded migrations represent. See graph.make_state() for the meaning of ""nodes"" and ""at_end"". ",23,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def project_state(self, nodes=None, at_end=True): return self.graph.make_state( nodes=nodes, at_end=at_end, real_apps=self.unmigrated_apps ) ``` ###Assistant : Return a ProjectState object representing the most recent state that the loaded migrations represent. See graph.make_state() for the meaning of ""nodes"" and ""at_end"". " 713,"def center(self, frequency=1000): equal_energy_fr = self.__class__(name='equal_energy', frequency=self.frequency.copy(), raw=self.raw.copy()) equal_energy_fr.interpolate() interpolator = InterpolatedUnivariateSpline(np.log10(equal_energy_fr.frequency), equal_energy_fr.raw, k=1) if type(frequency) in [list, np.ndarray] and len(frequency) > 1: # Use the average of the gain values between the given frequencies as the difference to be subtracted diff = np.mean(equal_energy_fr.raw[np.logical_and( equal_energy_fr.frequency >= frequency[0], equal_energy_fr.frequency <= frequency[1] )]) else: if type(frequency) in [list, np.ndarray]: # List or array with only one element frequency = frequency[0] # Use the gain value at the given frequency as the difference to be subtracted diff = interpolator(np.log10(frequency)) self.raw -= diff if len(self.smoothed): self.smoothed -= diff if len(self.error): self.error += diff if len(self.error_smoothed): self.error_smoothed += diff # Everything but raw, smoothed, errors and target is affected by centering, reset them self.reset(raw=False, smoothed=False, error=False, error_smoothed=False, target=False) return -diff ","Removed bias from frequency response. Args: frequency: Frequency which is set to 0 dB. If this is a list with two values then an average between the two frequencies is set to 0 dB. Returns: Gain shifted ",37,125,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def center(self, frequency=1000): equal_energy_fr = self.__class__(name='equal_energy', frequency=self.frequency.copy(), raw=self.raw.copy()) equal_energy_fr.interpolate() interpolator = InterpolatedUnivariateSpline(np.log10(equal_energy_fr.frequency), equal_energy_fr.raw, k=1) if type(frequency) in [list, np.ndarray] and len(frequency) > 1: # Use the average of the gain values between the given frequencies as the difference to be subtracted diff = np.mean(equal_energy_fr.raw[np.logical_and( equal_energy_fr.frequency >= frequency[0], equal_energy_fr.frequency <= frequency[1] )]) else: if type(frequency) in [list, np.ndarray]: # List or array with only one element frequency = frequency[0] # Use the gain value at the given frequency as the difference to be subtracted diff = interpolator(np.log10(frequency)) self.raw -= diff if len(self.smoothed): self.smoothed -= diff if len(self.error): self.error += diff if len(self.error_smoothed): self.error_smoothed += diff # Everything but raw, smoothed, errors and target is affected by centering, reset them self.reset(raw=False, smoothed=False, error=False, error_smoothed=False, target=False) return -diff ``` ###Assistant : Removed bias from frequency response. Args: frequency: Frequency which is set to 0 dB. If this is a list with two values then an average between the two frequencies is set to 0 dB. Returns: Gain shifted " 714,"def _i18n_cache_key_suffix(request, cache_key): if settings.USE_I18N: # first check if LocaleMiddleware or another middleware added # LANGUAGE_CODE to request, then fall back to the active language # which in turn can also fall back to settings.LANGUAGE_CODE cache_key += "".%s"" % getattr(request, ""LANGUAGE_CODE"", get_language()) if settings.USE_TZ: cache_key += "".%s"" % get_current_timezone_name() return cache_key ","If necessary, add the current locale or time zone to the cache key.",13,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _i18n_cache_key_suffix(request, cache_key): if settings.USE_I18N: # first check if LocaleMiddleware or another middleware added # LANGUAGE_CODE to request, then fall back to the active language # which in turn can also fall back to settings.LANGUAGE_CODE cache_key += "".%s"" % getattr(request, ""LANGUAGE_CODE"", get_language()) if settings.USE_TZ: cache_key += "".%s"" % get_current_timezone_name() return cache_key ``` ###Assistant : If necessary, add the current locale or time zone to the cache key. " 715,"def test_pandas_arff_parser_strip_double_quotes(parser_func): pd = pytest.importorskip(""pandas"") arff_file = BytesIO( textwrap.dedent( ",Check that we properly strip double quotes from the data.,10,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_pandas_arff_parser_strip_double_quotes(parser_func): pd = pytest.importorskip(""pandas"") arff_file = BytesIO( textwrap.dedent( ``` ###Assistant : Check that we properly strip double quotes from the data. " 716,"def get_tokens_unprocessed(self, text=None, context=None): tokendefs = self._tokens if not context: ctx = LexerContext(text, 0) statetokens = tokendefs['root'] else: ctx = context statetokens = tokendefs[ctx.stack[-1]] text = ctx.text while 1: for rexmatch, action, new_state in statetokens: m = rexmatch(text, ctx.pos, ctx.end) if m: if action is not None: if type(action) is _TokenType: yield ctx.pos, action, m.group() ctx.pos = m.end() else: yield from action(self, m, ctx) if not new_state: # altered the state stack? statetokens = tokendefs[ctx.stack[-1]] # CAUTION: callback must set ctx.pos! if new_state is not None: # state transition if isinstance(new_state, tuple): for state in new_state: if state == '#pop': if len(ctx.stack) > 1: ctx.stack.pop() elif state == '#push': ctx.stack.append(ctx.stack[-1]) else: ctx.stack.append(state) elif isinstance(new_state, int): # see RegexLexer for why this check is made if abs(new_state) >= len(ctx.stack): del ctx.state[1:] else: del ctx.stack[new_state:] elif new_state == '#push': ctx.stack.append(ctx.stack[-1]) else: assert False, ""wrong state def: %r"" % new_state statetokens = tokendefs[ctx.stack[-1]] break else: try: if ctx.pos >= ctx.end: break if text[ctx.pos] == '\n': # at EOL, reset state to ""root"" ctx.stack = ['root'] statetokens = tokendefs['root'] yield ctx.pos, Text, '\n' ctx.pos += 1 continue yield ctx.pos, Error, text[ctx.pos] ctx.pos += 1 except IndexError: break "," Split ``text`` into (tokentype, text) pairs. If ``context`` is given, use this lexer context instead. ",15,193,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_tokens_unprocessed(self, text=None, context=None): tokendefs = self._tokens if not context: ctx = LexerContext(text, 0) statetokens = tokendefs['root'] else: ctx = context statetokens = tokendefs[ctx.stack[-1]] text = ctx.text while 1: for rexmatch, action, new_state in statetokens: m = rexmatch(text, ctx.pos, ctx.end) if m: if action is not None: if type(action) is _TokenType: yield ctx.pos, action, m.group() ctx.pos = m.end() else: yield from action(self, m, ctx) if not new_state: # altered the state stack? statetokens = tokendefs[ctx.stack[-1]] # CAUTION: callback must set ctx.pos! if new_state is not None: # state transition if isinstance(new_state, tuple): for state in new_state: if state == '#pop': if len(ctx.stack) > 1: ctx.stack.pop() elif state == '#push': ctx.stack.append(ctx.stack[-1]) else: ctx.stack.append(state) elif isinstance(new_state, int): # see RegexLexer for why this check is made if abs(new_state) >= len(ctx.stack): del ctx.state[1:] else: del ctx.stack[new_state:] elif new_state == '#push': ctx.stack.append(ctx.stack[-1]) else: assert False, ""wrong state def: %r"" % new_state statetokens = tokendefs[ctx.stack[-1]] break else: try: if ctx.pos >= ctx.end: break if text[ctx.pos] == '\n': # at EOL, reset state to ""root"" ctx.stack = ['root'] statetokens = tokendefs['root'] yield ctx.pos, Text, '\n' ctx.pos += 1 continue yield ctx.pos, Error, text[ctx.pos] ctx.pos += 1 except IndexError: break ``` ###Assistant : Split ``text`` into (tokentype, text) pairs. If ``context`` is given, use this lexer context instead. " 717,"def get_policy_data_from_agent_data(agent_data, policy_map_fn): policy_data = {} for agent_id, data in agent_data.items(): policy_id = policy_map_fn(agent_id) policy_data.setdefault(policy_id, {}) policy_data[policy_id].setdefault(""agent_id"", []) if data[""obs""].ndim == 1: policy_data[policy_id][""agent_id""].append(agent_id) else: policy_data[policy_id][""agent_id""] += [agent_id] * len(data[""obs""]) for k, v in data.items(): policy_data[policy_id].setdefault(k, []) if v.ndim == 1: v = v[None] policy_data[policy_id][k].append(v) for policy_id in policy_data: policy_data[policy_id] = { k: np.concatenate(v) if k != ""agent_id"" else v for k, v in policy_data[policy_id].items() } return policy_data ","Utility function to get policy data from agent data and policy map function. It also keeps track of agent_id for each row so that we can retreive the agent level information after the forward pass. Returns: dict of module_id to module data ",42,67,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_policy_data_from_agent_data(agent_data, policy_map_fn): policy_data = {} for agent_id, data in agent_data.items(): policy_id = policy_map_fn(agent_id) policy_data.setdefault(policy_id, {}) policy_data[policy_id].setdefault(""agent_id"", []) if data[""obs""].ndim == 1: policy_data[policy_id][""agent_id""].append(agent_id) else: policy_data[policy_id][""agent_id""] += [agent_id] * len(data[""obs""]) for k, v in data.items(): policy_data[policy_id].setdefault(k, []) if v.ndim == 1: v = v[None] policy_data[policy_id][k].append(v) for policy_id in policy_data: policy_data[policy_id] = { k: np.concatenate(v) if k != ""agent_id"" else v for k, v in policy_data[policy_id].items() } return policy_data ``` ###Assistant : Utility function to get policy data from agent data and policy map function. It also keeps track of agent_id for each row so that we can retreive the agent level information after the forward pass. Returns: dict of module_id to module data " 718,"def _update_dimensions(self) -> None: total_width = sum(column.width for column in self.columns) self.virtual_size = Size( total_width, len(self._y_offsets) + (self.header_height if self.show_header else 0), ) ",Called to recalculate the virtual (scrollable) size.,7,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _update_dimensions(self) -> None: total_width = sum(column.width for column in self.columns) self.virtual_size = Size( total_width, len(self._y_offsets) + (self.header_height if self.show_header else 0), ) ``` ###Assistant : Called to recalculate the virtual (scrollable) size. " 719,"def _store(self, messages, response, *args, **kwargs): raise NotImplementedError( ""subclasses of BaseStorage must provide a _store() method"" ) "," Store a list of messages and return a list of any messages which could not be stored. One type of object must be able to be stored, ``Message``. **This method must be implemented by a subclass.** ",36,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _store(self, messages, response, *args, **kwargs): raise NotImplementedError( ""subclasses of BaseStorage must provide a _store() method"" ) ``` ###Assistant : Store a list of messages and return a list of any messages which could not be stored. One type of object must be able to be stored, ``Message``. **This method must be implemented by a subclass.** " 720,"def test_from_is_negative(self) -> None: channel = self.make_request( ""GET"", self.url + ""?from=-5"", access_token=self.admin_user_tok, ) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body[""errcode""]) "," Testing that a negative from parameter returns a 400 ",9,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_from_is_negative(self) -> None: channel = self.make_request( ""GET"", self.url + ""?from=-5"", access_token=self.admin_user_tok, ) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body[""errcode""]) ``` ###Assistant : Testing that a negative from parameter returns a 400 " 721,"async def test_focused_child_widget_no_inherit_empty_bindings_with_movement_bindings_on_screen() -> None: ","A focused child widget, that doesn't inherit bindings and sets BINDINGS empty, with movement bindings in the screen, should trigger screen actions.",22,5,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_focused_child_widget_no_inherit_empty_bindings_with_movement_bindings_on_screen() -> None: ``` ###Assistant : A focused child widget, that doesn't inherit bindings and sets BINDINGS empty, with movement bindings in the screen, should trigger screen actions. " 722,"def addtoken(self, type, value, context): # Map from token to label ilabel = self.classify(type, value, context) # Loop until the token is shifted; may raise exceptions while True: dfa, state, node = self.stack[-1] states, first = dfa arcs = states[state] # Look for a state with this label for i, newstate in arcs: t, v = self.grammar.labels[i] if ilabel == i: # Look it up in the list of labels assert t < 256 # Shift a token; we're done with it self.shift(type, value, newstate, context) # Pop while we are in an accept-only state state = newstate while states[state] == [(0, state)]: self.pop() if not self.stack: # Done parsing! return True dfa, state, node = self.stack[-1] states, first = dfa # Done with this token return False elif t >= 256: # See if it's a symbol and if we're in its first set itsdfa = self.grammar.dfas[t] itsstates, itsfirst = itsdfa if ilabel in itsfirst: # Push a symbol self.push(t, self.grammar.dfas[t], newstate, context) break # To continue the outer while loop else: if (0, state) in arcs: # An accepting state, pop it and try something else self.pop() if not self.stack: # Done parsing, but another token is input raise ParseError(""too much input"", type, value, context) else: # No success finding a transition raise ParseError(""bad input"", type, value, context) ",Add a token; return True iff this is the end of the program.,13,220,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def addtoken(self, type, value, context): # Map from token to label ilabel = self.classify(type, value, context) # Loop until the token is shifted; may raise exceptions while True: dfa, state, node = self.stack[-1] states, first = dfa arcs = states[state] # Look for a state with this label for i, newstate in arcs: t, v = self.grammar.labels[i] if ilabel == i: # Look it up in the list of labels assert t < 256 # Shift a token; we're done with it self.shift(type, value, newstate, context) # Pop while we are in an accept-only state state = newstate while states[state] == [(0, state)]: self.pop() if not self.stack: # Done parsing! return True dfa, state, node = self.stack[-1] states, first = dfa # Done with this token return False elif t >= 256: # See if it's a symbol and if we're in its first set itsdfa = self.grammar.dfas[t] itsstates, itsfirst = itsdfa if ilabel in itsfirst: # Push a symbol self.push(t, self.grammar.dfas[t], newstate, context) break # To continue the outer while loop else: if (0, state) in arcs: # An accepting state, pop it and try something else self.pop() if not self.stack: # Done parsing, but another token is input raise ParseError(""too much input"", type, value, context) else: # No success finding a transition raise ParseError(""bad input"", type, value, context) ``` ###Assistant : Add a token; return True iff this is the end of the program. " 723,"def import_local_settings(): try: import airflow_local_settings if hasattr(airflow_local_settings, ""__all__""): for i in airflow_local_settings.__all__: globals()[i] = getattr(airflow_local_settings, i) else: for k, v in airflow_local_settings.__dict__.items(): if not k.startswith(""__""): globals()[k] = v # TODO: Remove once deprecated if ""policy"" in globals() and ""task_policy"" not in globals(): warnings.warn( ""Using `policy` in airflow_local_settings.py is deprecated. "" ""Please rename your `policy` to `task_policy`."", DeprecationWarning, stacklevel=2, ) globals()[""task_policy""] = globals()[""policy""] del globals()[""policy""] if not hasattr(task_instance_mutation_hook, 'is_noop'): task_instance_mutation_hook.is_noop = False log.info(""Loaded airflow_local_settings from %s ."", airflow_local_settings.__file__) except ModuleNotFoundError as e: if e.name == ""airflow_local_settings"": log.debug(""No airflow_local_settings to import."", exc_info=True) else: log.critical( ""Failed to import airflow_local_settings due to a transitive module not found error."", exc_info=True, ) raise except ImportError: log.critical(""Failed to import airflow_local_settings."", exc_info=True) raise ",Import airflow_local_settings.py files to allow overriding any configs in settings.py file,11,115,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def import_local_settings(): try: import airflow_local_settings if hasattr(airflow_local_settings, ""__all__""): for i in airflow_local_settings.__all__: globals()[i] = getattr(airflow_local_settings, i) else: for k, v in airflow_local_settings.__dict__.items(): if not k.startswith(""__""): globals()[k] = v # TODO: Remove once deprecated if ""policy"" in globals() and ""task_policy"" not in globals(): warnings.warn( ""Using `policy` in airflow_local_settings.py is deprecated. "" ""Please rename your `policy` to `task_policy`."", DeprecationWarning, stacklevel=2, ) globals()[""task_policy""] = globals()[""policy""] del globals()[""policy""] if not hasattr(task_instance_mutation_hook, 'is_noop'): task_instance_mutation_hook.is_noop = False log.info(""Loaded airflow_local_settings from %s ."", airflow_local_settings.__file__) except ModuleNotFoundError as e: if e.name == ""airflow_local_settings"": log.debug(""No airflow_local_settings to import."", exc_info=True) else: log.critical( ""Failed to import airflow_local_settings due to a transitive module not found error."", exc_info=True, ) raise except ImportError: log.critical(""Failed to import airflow_local_settings."", exc_info=True) raise ``` ###Assistant : Import airflow_local_settings.py files to allow overriding any configs in settings.py file " 724,"def unpolarify(eq, subs=None, exponents_only=False): if isinstance(eq, bool): return eq eq = sympify(eq) if subs is not None: return unpolarify(eq.subs(subs)) changed = True pause = False if exponents_only: pause = True while changed: changed = False res = _unpolarify(eq, exponents_only, pause) if res != eq: changed = True eq = res if isinstance(res, bool): return res # Finally, replacing Exp(0) by 1 is always correct. # So is polar_lift(0) -> 0. return res.subs({exp_polar(0): 1, polar_lift(0): 0}) "," If `p` denotes the projection from the Riemann surface of the logarithm to the complex line, return a simplified version `eq'` of `eq` such that `p(eq') = p(eq)`. Also apply the substitution subs in the end. (This is a convenience, since ``unpolarify``, in a certain sense, undoes :func:`polarify`.) Examples ======== >>> from sympy import unpolarify, polar_lift, sin, I >>> unpolarify(polar_lift(I + 2)) 2 + I >>> unpolarify(sin(polar_lift(I + 7))) sin(7 + I) ",72,75,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def unpolarify(eq, subs=None, exponents_only=False): if isinstance(eq, bool): return eq eq = sympify(eq) if subs is not None: return unpolarify(eq.subs(subs)) changed = True pause = False if exponents_only: pause = True while changed: changed = False res = _unpolarify(eq, exponents_only, pause) if res != eq: changed = True eq = res if isinstance(res, bool): return res # Finally, replacing Exp(0) by 1 is always correct. # So is polar_lift(0) -> 0. return res.subs({exp_polar(0): 1, polar_lift(0): 0}) ``` ###Assistant : If `p` denotes the projection from the Riemann surface of the logarithm to the complex line, return a simplified version `eq'` of `eq` such that `p(eq') = p(eq)`. Also apply the substitution subs in the end. (This is a convenience, since ``unpolarify``, in a certain sense, undoes :func:`polarify`.) Examples ======== >>> from sympy import unpolarify, polar_lift, sin, I >>> unpolarify(polar_lift(I + 2)) 2 + I >>> unpolarify(sin(polar_lift(I + 7))) sin(7 + I) " 725,"def check_and_raise_error(self) -> None: for thread in self._threads: thread.check_and_raise_error() "," Check all threads for errors Exposed for :mod:`~plugins.extract.pipeline` to check plugin's threads for errors ",14,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_and_raise_error(self) -> None: for thread in self._threads: thread.check_and_raise_error() ``` ###Assistant : Check all threads for errors Exposed for :mod:`~plugins.extract.pipeline` to check plugin's threads for errors " 726,"def recorder_or_dbworker(self) -> bool: thread_name = threading.current_thread().name return bool( thread_name == ""Recorder"" or thread_name.startswith(DB_WORKER_PREFIX) ) ",Check if the thread is a recorder or dbworker thread.,10,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def recorder_or_dbworker(self) -> bool: thread_name = threading.current_thread().name return bool( thread_name == ""Recorder"" or thread_name.startswith(DB_WORKER_PREFIX) ) ``` ###Assistant : Check if the thread is a recorder or dbworker thread. " 727,"def renew_resnet_paths(old_list, n_shave_prefix_segments=0): mapping = [] for old_item in old_list: new_item = old_item.replace('in_layers.0', 'norm1') new_item = new_item.replace('in_layers.2', 'conv1') new_item = new_item.replace('out_layers.0', 'norm2') new_item = new_item.replace('out_layers.3', 'conv2') new_item = new_item.replace('emb_layers.1', 'time_emb_proj') new_item = new_item.replace('skip_connection', 'conv_shortcut') new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({'old': old_item, 'new': new_item}) return mapping "," Updates paths inside resnets to the new naming scheme (local renaming) ",11,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def renew_resnet_paths(old_list, n_shave_prefix_segments=0): mapping = [] for old_item in old_list: new_item = old_item.replace('in_layers.0', 'norm1') new_item = new_item.replace('in_layers.2', 'conv1') new_item = new_item.replace('out_layers.0', 'norm2') new_item = new_item.replace('out_layers.3', 'conv2') new_item = new_item.replace('emb_layers.1', 'time_emb_proj') new_item = new_item.replace('skip_connection', 'conv_shortcut') new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({'old': old_item, 'new': new_item}) return mapping ``` ###Assistant : Updates paths inside resnets to the new naming scheme (local renaming) " 728,"def _assert_splits_match(nested_splits_lists): error_msg = ( ""Inputs must have identical ragged splits. "" f""Input received: {nested_splits_lists}"" ) for splits_list in nested_splits_lists: if len(splits_list) != len(nested_splits_lists[0]): raise ValueError(error_msg) return [ tf.debugging.assert_equal(s1, s2, message=error_msg) for splits_list in nested_splits_lists[1:] for (s1, s2) in zip(nested_splits_lists[0], splits_list) ] ","Checks that the given splits lists are identical. Performs static tests to ensure that the given splits lists are identical, and returns a list of control dependency op tensors that check that they are fully identical. Args: nested_splits_lists: A list of nested_splits_lists, where each split_list is a list of `splits` tensors from a `RaggedTensor`, ordered from outermost ragged dimension to innermost ragged dimension. Returns: A list of control dependency op tensors. Raises: ValueError: If the splits are not identical. ",79,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _assert_splits_match(nested_splits_lists): error_msg = ( ""Inputs must have identical ragged splits. "" f""Input received: {nested_splits_lists}"" ) for splits_list in nested_splits_lists: if len(splits_list) != len(nested_splits_lists[0]): raise ValueError(error_msg) return [ tf.debugging.assert_equal(s1, s2, message=error_msg) for splits_list in nested_splits_lists[1:] for (s1, s2) in zip(nested_splits_lists[0], splits_list) ] ``` ###Assistant : Checks that the given splits lists are identical. Performs static tests to ensure that the given splits lists are identical, and returns a list of control dependency op tensors that check that they are fully identical. Args: nested_splits_lists: A list of nested_splits_lists, where each split_list is a list of `splits` tensors from a `RaggedTensor`, ordered from outermost ragged dimension to innermost ragged dimension. Returns: A list of control dependency op tensors. Raises: ValueError: If the splits are not identical. " 729,"def _iteration_limit_callback(self, *args) -> None: try: limit = self.vars[""display_iterations""].get() except tk.TclError: # Don't update when there is no value in the variable return logger.debug(""Updating graph iteration limit: (new_value: %s, args: %s)"", limit, args) for graph in self.subnotebook.children.values(): graph.calcs.set_iterations_limit(limit) "," Limit the amount of data displayed in the live graph on a iteration slider variable change. ",16,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _iteration_limit_callback(self, *args) -> None: try: limit = self.vars[""display_iterations""].get() except tk.TclError: # Don't update when there is no value in the variable return logger.debug(""Updating graph iteration limit: (new_value: %s, args: %s)"", limit, args) for graph in self.subnotebook.children.values(): graph.calcs.set_iterations_limit(limit) ``` ###Assistant : Limit the amount of data displayed in the live graph on a iteration slider variable change. " 730,"def test_queued_dagruns_stops_creating_when_max_active_is_reached(self, dag_maker): with dag_maker(max_active_runs=10) as dag: EmptyOperator(task_id='mytask') session = settings.Session() self.scheduler_job = SchedulerJob(subdir=os.devnull) self.scheduler_job.executor = MockExecutor() self.scheduler_job.processor_agent = mock.MagicMock() self.scheduler_job.dagbag = dag_maker.dagbag session = settings.Session() orm_dag = session.query(DagModel).get(dag.dag_id) assert orm_dag is not None for _ in range(20): self.scheduler_job._create_dag_runs([orm_dag], session) drs = session.query(DagRun).all() assert len(drs) == 10 for dr in drs: dr.state = State.RUNNING session.merge(dr) session.commit() assert session.query(DagRun.state).filter(DagRun.state == State.RUNNING).count() == 10 for _ in range(20): self.scheduler_job._create_dag_runs([orm_dag], session) assert session.query(DagRun).count() == 10 assert session.query(DagRun.state).filter(DagRun.state == State.RUNNING).count() == 10 assert session.query(DagRun.state).filter(DagRun.state == State.QUEUED).count() == 0 assert orm_dag.next_dagrun_create_after is None ",This tests that queued dagruns stops creating once max_active_runs is reached,11,88,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_queued_dagruns_stops_creating_when_max_active_is_reached(self, dag_maker): with dag_maker(max_active_runs=10) as dag: EmptyOperator(task_id='mytask') session = settings.Session() self.scheduler_job = SchedulerJob(subdir=os.devnull) self.scheduler_job.executor = MockExecutor() self.scheduler_job.processor_agent = mock.MagicMock() self.scheduler_job.dagbag = dag_maker.dagbag session = settings.Session() orm_dag = session.query(DagModel).get(dag.dag_id) assert orm_dag is not None for _ in range(20): self.scheduler_job._create_dag_runs([orm_dag], session) drs = session.query(DagRun).all() assert len(drs) == 10 for dr in drs: dr.state = State.RUNNING session.merge(dr) session.commit() assert session.query(DagRun.state).filter(DagRun.state == State.RUNNING).count() == 10 for _ in range(20): self.scheduler_job._create_dag_runs([orm_dag], session) assert session.query(DagRun).count() == 10 assert session.query(DagRun.state).filter(DagRun.state == State.RUNNING).count() == 10 assert session.query(DagRun.state).filter(DagRun.state == State.QUEUED).count() == 0 assert orm_dag.next_dagrun_create_after is None ``` ###Assistant : This tests that queued dagruns stops creating once max_active_runs is reached " 731,"def test_escape_sequence_resulting_in_multiple_keypresses(parser): events = list(parser.feed(""\x1b[2;4~"")) assert len(events) == 2 assert events[0].key == ""escape"" assert events[1].key == ""shift+insert"" ",Some sequences are interpreted as more than 1 keypress,9,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_escape_sequence_resulting_in_multiple_keypresses(parser): events = list(parser.feed(""\x1b[2;4~"")) assert len(events) == 2 assert events[0].key == ""escape"" assert events[1].key == ""shift+insert"" ``` ###Assistant : Some sequences are interpreted as more than 1 keypress " 732,"def generate_altered_options(self): models_to_check = self.kept_model_keys.union( self.kept_proxy_keys, self.kept_unmanaged_keys, # unmanaged converted to managed self.old_unmanaged_keys & self.new_model_keys, # managed converted to unmanaged self.old_model_keys & self.new_unmanaged_keys, ) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_options = { key: value for key, value in old_model_state.options.items() if key in AlterModelOptions.ALTER_OPTION_KEYS } new_options = { key: value for key, value in new_model_state.options.items() if key in AlterModelOptions.ALTER_OPTION_KEYS } if old_options != new_options: self.add_operation( app_label, operations.AlterModelOptions( name=model_name, options=new_options, ), ) "," Work out if any non-schema-affecting options have changed and make an operation to represent them in state changes (in case Python code in migrations needs them). ",26,85,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def generate_altered_options(self): models_to_check = self.kept_model_keys.union( self.kept_proxy_keys, self.kept_unmanaged_keys, # unmanaged converted to managed self.old_unmanaged_keys & self.new_model_keys, # managed converted to unmanaged self.old_model_keys & self.new_unmanaged_keys, ) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_options = { key: value for key, value in old_model_state.options.items() if key in AlterModelOptions.ALTER_OPTION_KEYS } new_options = { key: value for key, value in new_model_state.options.items() if key in AlterModelOptions.ALTER_OPTION_KEYS } if old_options != new_options: self.add_operation( app_label, operations.AlterModelOptions( name=model_name, options=new_options, ), ) ``` ###Assistant : Work out if any non-schema-affecting options have changed and make an operation to represent them in state changes (in case Python code in migrations needs them). " 733,"def _check_readonly_fields(self, obj): if obj.readonly_fields == (): return [] elif not isinstance(obj.readonly_fields, (list, tuple)): return must_be( ""a list or tuple"", option=""readonly_fields"", obj=obj, id=""admin.E034"" ) else: return list( chain.from_iterable( self._check_readonly_fields_item( obj, field_name, ""readonly_fields[%d]"" % index ) for index, field_name in enumerate(obj.readonly_fields) ) ) ",Check that readonly_fields refers to proper attribute or field.,9,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_readonly_fields(self, obj): if obj.readonly_fields == (): return [] elif not isinstance(obj.readonly_fields, (list, tuple)): return must_be( ""a list or tuple"", option=""readonly_fields"", obj=obj, id=""admin.E034"" ) else: return list( chain.from_iterable( self._check_readonly_fields_item( obj, field_name, ""readonly_fields[%d]"" % index ) for index, field_name in enumerate(obj.readonly_fields) ) ) ``` ###Assistant : Check that readonly_fields refers to proper attribute or field. " 734,"def querystring(context, **kwargs): request = context[""request""] querydict = request.GET.copy() # Can't do querydict.update(kwargs), because QueryDict.update() appends to # the list of values, instead of replacing the values. for key, value in kwargs.items(): if value is None: # Remove the key if the value is None querydict.pop(key, None) else: # Set the key otherwise querydict[key] = str(value) return ""?"" + querydict.urlencode() @register.simple_tag(takes_context=True)"," Print out the current querystring. Any keyword arguments to this template tag will be added to the querystring before it is printed out. Will result in something like: ",35,61,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def querystring(context, **kwargs): request = context[""request""] querydict = request.GET.copy() # Can't do querydict.update(kwargs), because QueryDict.update() appends to # the list of values, instead of replacing the values. for key, value in kwargs.items(): if value is None: # Remove the key if the value is None querydict.pop(key, None) else: # Set the key otherwise querydict[key] = str(value) return ""?"" + querydict.urlencode() @register.simple_tag(takes_context=True) ``` ###Assistant : Print out the current querystring. Any keyword arguments to this template tag will be added to the querystring before it is printed out. Will result in something like: " 735,"def _get_fingerprint_of_schema_without_irrelevant_keys(self) -> Text: graph_schema = self._execution_context.graph_schema schema_as_dict = graph_schema.as_dict() for node_name, node_dict in schema_as_dict[""nodes""].items(): config_copy = copy.deepcopy(node_dict[""config""]) config_copy.pop(EPOCHS, None) # ignore default values since they're filled in anyway later and can # end up in configs (or not) in mysterious ways defaults = graph_schema.nodes[node_name].uses.get_default_config() for key, default_value in defaults.items(): if key in config_copy and config_copy[key] == default_value: config_copy.pop(key) node_dict[""config""] = config_copy node_dict.pop(""eager"") node_dict.pop(""constructor_name"") return rasa.shared.utils.io.deep_container_fingerprint(schema_as_dict) ","Returns a fingerprint of the given schema with certain items removed. These items include specifications that do not influence actual training results such as ""eager"" mode. The only configuration (in your config) that is allowed to change is the number of `epochs`. Returns: fingerprint ",44,66,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_fingerprint_of_schema_without_irrelevant_keys(self) -> Text: graph_schema = self._execution_context.graph_schema schema_as_dict = graph_schema.as_dict() for node_name, node_dict in schema_as_dict[""nodes""].items(): config_copy = copy.deepcopy(node_dict[""config""]) config_copy.pop(EPOCHS, None) # ignore default values since they're filled in anyway later and can # end up in configs (or not) in mysterious ways defaults = graph_schema.nodes[node_name].uses.get_default_config() for key, default_value in defaults.items(): if key in config_copy and config_copy[key] == default_value: config_copy.pop(key) node_dict[""config""] = config_copy node_dict.pop(""eager"") node_dict.pop(""constructor_name"") return rasa.shared.utils.io.deep_container_fingerprint(schema_as_dict) ``` ###Assistant : Returns a fingerprint of the given schema with certain items removed. These items include specifications that do not influence actual training results such as ""eager"" mode. The only configuration (in your config) that is allowed to change is the number of `epochs`. Returns: fingerprint " 736,"def get_gi_typelibs(module, version): module_info = GiModuleInfo(module, version) return module_info.collect_typelib_data() "," Return a tuple of (binaries, datas, hiddenimports) to be used by PyGObject related hooks. Searches for and adds dependencies recursively. :param module: GI module name, as passed to 'gi.require_version()' :param version: GI module version, as passed to 'gi.require_version()' ",38,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_gi_typelibs(module, version): module_info = GiModuleInfo(module, version) return module_info.collect_typelib_data() ``` ###Assistant : Return a tuple of (binaries, datas, hiddenimports) to be used by PyGObject related hooks. Searches for and adds dependencies recursively. :param module: GI module name, as passed to 'gi.require_version()' :param version: GI module version, as passed to 'gi.require_version()' " 737,"def _sync_dag_view_permissions(self, dag_id, access_control): dag_resource_name = permissions.resource_name_for_dag(dag_id) "," Set the access policy on the given DAG's ViewModel. :param dag_id: the ID of the DAG whose permissions should be updated :param access_control: a dict where each key is a rolename and each value is a set() of action names (e.g. {'can_read'}) ",42,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _sync_dag_view_permissions(self, dag_id, access_control): dag_resource_name = permissions.resource_name_for_dag(dag_id) ``` ###Assistant : Set the access policy on the given DAG's ViewModel. :param dag_id: the ID of the DAG whose permissions should be updated :param access_control: a dict where each key is a rolename and each value is a set() of action names (e.g. {'can_read'}) " 738,"def test_series_equal_datetime_values_mismatch(rtol): msg = s1 = Series(pd.date_range(""2018-01-01"", periods=3, freq=""D"")) s2 = Series(pd.date_range(""2019-02-02"", periods=3, freq=""D"")) with pytest.raises(AssertionError, match=msg): tm.assert_series_equal(s1, s2, rtol=rtol) ","Series are different Series values are different \\(100.0 %\\) \\[index\\]: \\[0, 1, 2\\] \\[left\\]: \\[1514764800000000000, 1514851200000000000, 1514937600000000000\\] \\[right\\]: \\[1549065600000000000, 1549152000000000000, 1549238400000000000\\]",21,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_series_equal_datetime_values_mismatch(rtol): msg = s1 = Series(pd.date_range(""2018-01-01"", periods=3, freq=""D"")) s2 = Series(pd.date_range(""2019-02-02"", periods=3, freq=""D"")) with pytest.raises(AssertionError, match=msg): tm.assert_series_equal(s1, s2, rtol=rtol) ``` ###Assistant : Series are different Series values are different \\(100.0 %\\) \\[index\\]: \\[0, 1, 2\\] \\[left\\]: \\[1514764800000000000, 1514851200000000000, 1514937600000000000\\] \\[right\\]: \\[1549065600000000000, 1549152000000000000, 1549238400000000000\\] " 739,"def test_decrypt_pillar_invalid_renderer(salt_master, grains, pillar_homedir): opts = salt_master.config.copy() opts[""decrypt_pillar""] = [{""secrets:vault"": ""gpg""}] opts[""decrypt_pillar_default""] = ""foo"" opts[""decrypt_pillar_renderers""] = [""foo"", ""bar""] pillar_obj = salt.pillar.Pillar(opts, grains, ""test"", ""base"") ret = pillar_obj.compile_pillar() expected = copy.deepcopy(GPG_PILLAR_ENCRYPTED) expected[""_errors""] = [ ""Failed to decrypt pillar key 'secrets:vault': 'gpg' is not a valid decryption"" "" renderer. Valid choices are: foo, bar"" ] assert ret[""_errors""] == expected[""_errors""] assert ret[""secrets""][""vault""][""foo""] == expected[""secrets""][""vault""][""foo""] assert ret[""secrets""][""vault""][""bar""] == expected[""secrets""][""vault""][""bar""] assert ret[""secrets""][""vault""][""baz""] == expected[""secrets""][""vault""][""baz""] assert ret[""secrets""][""vault""][""qux""] == expected[""secrets""][""vault""][""qux""] "," Test decryption using a renderer which is not permitted. It should fail, leaving the encrypted keys intact, and add an error to the pillar dictionary. decrypt_pillar_default: foo decrypt_pillar_renderers: - foo - bar decrypt_pillar: - 'secrets:vault': gpg ",36,73,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_decrypt_pillar_invalid_renderer(salt_master, grains, pillar_homedir): opts = salt_master.config.copy() opts[""decrypt_pillar""] = [{""secrets:vault"": ""gpg""}] opts[""decrypt_pillar_default""] = ""foo"" opts[""decrypt_pillar_renderers""] = [""foo"", ""bar""] pillar_obj = salt.pillar.Pillar(opts, grains, ""test"", ""base"") ret = pillar_obj.compile_pillar() expected = copy.deepcopy(GPG_PILLAR_ENCRYPTED) expected[""_errors""] = [ ""Failed to decrypt pillar key 'secrets:vault': 'gpg' is not a valid decryption"" "" renderer. Valid choices are: foo, bar"" ] assert ret[""_errors""] == expected[""_errors""] assert ret[""secrets""][""vault""][""foo""] == expected[""secrets""][""vault""][""foo""] assert ret[""secrets""][""vault""][""bar""] == expected[""secrets""][""vault""][""bar""] assert ret[""secrets""][""vault""][""baz""] == expected[""secrets""][""vault""][""baz""] assert ret[""secrets""][""vault""][""qux""] == expected[""secrets""][""vault""][""qux""] ``` ###Assistant : Test decryption using a renderer which is not permitted. It should fail, leaving the encrypted keys intact, and add an error to the pillar dictionary. decrypt_pillar_default: foo decrypt_pillar_renderers: - foo - bar decrypt_pillar: - 'secrets:vault': gpg " 740,"def get_trial_name(): warnings.warn( _deprecation_msg, DeprecationWarning, ) _session = get_session() if _session: return _session.trial_name @DeveloperAPI","Trial name for the corresponding trial. For function API use only. ",11,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_trial_name(): warnings.warn( _deprecation_msg, DeprecationWarning, ) _session = get_session() if _session: return _session.trial_name @DeveloperAPI ``` ###Assistant : Trial name for the corresponding trial. For function API use only. " 741,"def get_scheduler_lock(collection=None, scheduler=None): from dask import multiprocessing from dask.base import get_scheduler actual_get = get_scheduler(collections=[collection], scheduler=scheduler) if actual_get == multiprocessing.get: return multiprocessing.get_context().Manager().Lock() return SerializableLock() ","Get an instance of the appropriate lock for a certain situation based on scheduler used.",15,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_scheduler_lock(collection=None, scheduler=None): from dask import multiprocessing from dask.base import get_scheduler actual_get = get_scheduler(collections=[collection], scheduler=scheduler) if actual_get == multiprocessing.get: return multiprocessing.get_context().Manager().Lock() return SerializableLock() ``` ###Assistant : Get an instance of the appropriate lock for a certain situation based on scheduler used. " 742,"def verify_dataset_shuffled(x): assert isinstance(x, tf.data.Dataset) graph_def = get_dataset_graph_def(x) for node in graph_def.node: if node.op.startswith(""ShuffleDataset""): return True # Also check graph_def.library.function for ds.interleave or ds.flat_map for function in graph_def.library.function: for node in function.node_def: if node.op.startswith(""ShuffleDataset""): return True logging.warning( ""Expected a shuffled dataset but input dataset `x` is "" ""not shuffled. Please invoke `shuffle()` on input dataset."" ) return False ","Verifies that the dataset is shuffled. Args: x: Dataset passed as an input to the model. Returns: boolean, whether the input dataset is shuffled or not. ",26,58,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def verify_dataset_shuffled(x): assert isinstance(x, tf.data.Dataset) graph_def = get_dataset_graph_def(x) for node in graph_def.node: if node.op.startswith(""ShuffleDataset""): return True # Also check graph_def.library.function for ds.interleave or ds.flat_map for function in graph_def.library.function: for node in function.node_def: if node.op.startswith(""ShuffleDataset""): return True logging.warning( ""Expected a shuffled dataset but input dataset `x` is "" ""not shuffled. Please invoke `shuffle()` on input dataset."" ) return False ``` ###Assistant : Verifies that the dataset is shuffled. Args: x: Dataset passed as an input to the model. Returns: boolean, whether the input dataset is shuffled or not. " 743,"def get_binance_available_quotes_for_each_coin() -> dict: trading_pairs = _get_trading_pairs() results = defaultdict(list) for pair in trading_pairs: results[pair[""baseAsset""]].append(pair[""quoteAsset""]) return results @log_start_end(log=logger)","Helper methods that for every coin available on Binance add all quote assets. [Source: Binance] Returns ------- dict: All quote assets for given coin {'ETH' : ['BTC', 'USDT' ...], 'UNI' : ['ETH', 'BTC','BUSD', ...] ",34,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_binance_available_quotes_for_each_coin() -> dict: trading_pairs = _get_trading_pairs() results = defaultdict(list) for pair in trading_pairs: results[pair[""baseAsset""]].append(pair[""quoteAsset""]) return results @log_start_end(log=logger) ``` ###Assistant : Helper methods that for every coin available on Binance add all quote assets. [Source: Binance] Returns ------- dict: All quote assets for given coin {'ETH' : ['BTC', 'USDT' ...], 'UNI' : ['ETH', 'BTC','BUSD', ...] " 744,"def update_qty_in_future_sle(args, allow_negative_stock=False): datetime_limit_condition = """" qty_shift = args.actual_qty # find difference/shift in qty caused by stock reconciliation if args.voucher_type == ""Stock Reconciliation"": qty_shift = get_stock_reco_qty_shift(args) # find the next nearest stock reco so that we only recalculate SLEs till that point next_stock_reco_detail = get_next_stock_reco(args) if next_stock_reco_detail: detail = next_stock_reco_detail[0] # add condition to update SLEs before this date & time datetime_limit_condition = get_datetime_limit_condition(detail) frappe.db.sql( .format( qty_shift=qty_shift, datetime_limit_condition=datetime_limit_condition ), args, ) validate_negative_qty_in_future_sle(args, allow_negative_stock) ","Recalculate Qty after Transaction in future SLEs based on current SLE. update `tabStock Ledger Entry` set qty_after_transaction = qty_after_transaction + {qty_shift} where item_code = %(item_code)s and warehouse = %(warehouse)s and voucher_no != %(voucher_no)s and is_cancelled = 0 and (timestamp(posting_date, posting_time) > timestamp(%(posting_date)s, %(posting_time)s) or ( timestamp(posting_date, posting_time) = timestamp(%(posting_date)s, %(posting_time)s) and creation > %(creation)s ) ) {datetime_limit_condition} ",57,73,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update_qty_in_future_sle(args, allow_negative_stock=False): datetime_limit_condition = """" qty_shift = args.actual_qty # find difference/shift in qty caused by stock reconciliation if args.voucher_type == ""Stock Reconciliation"": qty_shift = get_stock_reco_qty_shift(args) # find the next nearest stock reco so that we only recalculate SLEs till that point next_stock_reco_detail = get_next_stock_reco(args) if next_stock_reco_detail: detail = next_stock_reco_detail[0] # add condition to update SLEs before this date & time datetime_limit_condition = get_datetime_limit_condition(detail) frappe.db.sql( .format( qty_shift=qty_shift, datetime_limit_condition=datetime_limit_condition ), args, ) validate_negative_qty_in_future_sle(args, allow_negative_stock) ``` ###Assistant : Recalculate Qty after Transaction in future SLEs based on current SLE. update `tabStock Ledger Entry` set qty_after_transaction = qty_after_transaction + {qty_shift} where item_code = %(item_code)s and warehouse = %(warehouse)s and voucher_no != %(voucher_no)s and is_cancelled = 0 and (timestamp(posting_date, posting_time) > timestamp(%(posting_date)s, %(posting_time)s) or ( timestamp(posting_date, posting_time) = timestamp(%(posting_date)s, %(posting_time)s) and creation > %(creation)s ) ) {datetime_limit_condition} " 745,"def get_current_timezone_tag(parser, token): # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments args = token.contents.split() if len(args) != 3 or args[1] != 'as': raise TemplateSyntaxError( ""'get_current_timezone' requires 'as variable' (got %r)"" % args ) return GetCurrentTimezoneNode(args[2]) "," Store the name of the current time zone in the context. Usage:: {% get_current_timezone as TIME_ZONE %} This will fetch the currently active time zone and put its name into the ``TIME_ZONE`` context variable. ",34,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_current_timezone_tag(parser, token): # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments args = token.contents.split() if len(args) != 3 or args[1] != 'as': raise TemplateSyntaxError( ""'get_current_timezone' requires 'as variable' (got %r)"" % args ) return GetCurrentTimezoneNode(args[2]) ``` ###Assistant : Store the name of the current time zone in the context. Usage:: {% get_current_timezone as TIME_ZONE %} This will fetch the currently active time zone and put its name into the ``TIME_ZONE`` context variable. " 746,"def test_save_multiple_world_logs_mutator(self): with testing_utils.tempdir() as tmpdir: log_report = os.path.join(tmpdir, 'world_logs.jsonl') multitask = 'integration_tests:mutators=flatten,integration_tests:ReverseTeacher:mutator=reverse' valid, test = testing_utils.train_model( { 'task': multitask, 'validation_max_exs': 10, 'model': 'repeat_label', 'short_final_eval': True, 'num_epochs': 1.0, 'world_logs': log_report, } ) for task in multitask.split(','): task_log_report = get_task_world_logs( task, log_report, is_multitask=True ) with PathManager.open(task_log_report) as f: json_lines = f.readlines() assert len(json_lines) == 5 @register_agent(""fake_report"")"," Test that we can save multiple world_logs from train model on multiple tasks with mutators present. ",16,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_save_multiple_world_logs_mutator(self): with testing_utils.tempdir() as tmpdir: log_report = os.path.join(tmpdir, 'world_logs.jsonl') multitask = 'integration_tests:mutators=flatten,integration_tests:ReverseTeacher:mutator=reverse' valid, test = testing_utils.train_model( { 'task': multitask, 'validation_max_exs': 10, 'model': 'repeat_label', 'short_final_eval': True, 'num_epochs': 1.0, 'world_logs': log_report, } ) for task in multitask.split(','): task_log_report = get_task_world_logs( task, log_report, is_multitask=True ) with PathManager.open(task_log_report) as f: json_lines = f.readlines() assert len(json_lines) == 5 @register_agent(""fake_report"") ``` ###Assistant : Test that we can save multiple world_logs from train model on multiple tasks with mutators present. " 747,"def testDotsInLogdir(self): local_dir_path = Path(""/tmp/test_rel_dots"") local_dir = str(local_dir_path) if local_dir_path.exists(): local_dir = tempfile.mkdtemp(prefix=str(local_dir_path) + ""_"") trial = Trial(trainable_name=""rel_logdir"", local_dir=local_dir) with self.assertRaises(ValueError): trial.logdir = ""/tmp/test_rel/../dots"" with self.assertRaises(ValueError): trial.logdir = local_dir + ""/../"" if shutil.rmtree.avoids_symlink_attacks: if local_dir_path.exists(): shutil.rmtree(local_dir) ",This should result in errors as dots in paths are not allowed.,12,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def testDotsInLogdir(self): local_dir_path = Path(""/tmp/test_rel_dots"") local_dir = str(local_dir_path) if local_dir_path.exists(): local_dir = tempfile.mkdtemp(prefix=str(local_dir_path) + ""_"") trial = Trial(trainable_name=""rel_logdir"", local_dir=local_dir) with self.assertRaises(ValueError): trial.logdir = ""/tmp/test_rel/../dots"" with self.assertRaises(ValueError): trial.logdir = local_dir + ""/../"" if shutil.rmtree.avoids_symlink_attacks: if local_dir_path.exists(): shutil.rmtree(local_dir) ``` ###Assistant : This should result in errors as dots in paths are not allowed. " 748,"def test_delete_post(self): # Send request response = self.client.post( reverse(""wagtailimages:delete_multiple"", args=(self.image.id,)) ) # Check response self.assertEqual(response.status_code, 200) self.assertEqual(response[""Content-Type""], ""application/json"") # Make sure the image is deleted self.assertFalse(Image.objects.filter(id=self.image.id).exists()) # Check JSON response_json = json.loads(response.content.decode()) self.assertIn(""image_id"", response_json) self.assertIn(""success"", response_json) self.assertEqual(response_json[""image_id""], self.image.id) self.assertTrue(response_json[""success""]) @override_settings(WAGTAILIMAGES_IMAGE_MODEL=""tests.CustomImage"")"," This tests that a POST request to the delete view deletes the image ",13,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_delete_post(self): # Send request response = self.client.post( reverse(""wagtailimages:delete_multiple"", args=(self.image.id,)) ) # Check response self.assertEqual(response.status_code, 200) self.assertEqual(response[""Content-Type""], ""application/json"") # Make sure the image is deleted self.assertFalse(Image.objects.filter(id=self.image.id).exists()) # Check JSON response_json = json.loads(response.content.decode()) self.assertIn(""image_id"", response_json) self.assertIn(""success"", response_json) self.assertEqual(response_json[""image_id""], self.image.id) self.assertTrue(response_json[""success""]) @override_settings(WAGTAILIMAGES_IMAGE_MODEL=""tests.CustomImage"") ``` ###Assistant : This tests that a POST request to the delete view deletes the image " 749,"def _show_diff_helper(self, frame_data, expected_frame_data): import matplotlib.gridspec as gridspec # type: ignore import matplotlib.pyplot as plt gs = gridspec.GridSpec(2, 2) fig = plt.figure() fig.suptitle(f""Test for {str(self.scene).replace('Test', '')}"", fontsize=16) ax = fig.add_subplot(gs[0, 0]) ax.imshow(frame_data) ax.set_title(""Generated :"") ax = fig.add_subplot(gs[0, 1]) ax.imshow(expected_frame_data) ax.set_title(""Expected :"") ax = fig.add_subplot(gs[1, :]) diff_im = expected_frame_data.copy() diff_im = np.where( frame_data != np.array([0, 0, 0, 255]), np.array([0, 255, 0, 255], dtype=""uint8""), np.array([0, 0, 0, 255], dtype=""uint8""), ) # Set any non-black pixels to green np.putmask( diff_im, expected_frame_data != frame_data, np.array([255, 0, 0, 255], dtype=""uint8""), ) # Set any different pixels to red ax.imshow(diff_im, interpolation=""nearest"") ax.set_title(""Differences summary : (green = same, red = different)"") plt.show() plt.savefig(f""{self.scene}.png"") ",Will visually display with matplotlib differences between frame generated and the one expected.,13,106,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _show_diff_helper(self, frame_data, expected_frame_data): import matplotlib.gridspec as gridspec # type: ignore import matplotlib.pyplot as plt gs = gridspec.GridSpec(2, 2) fig = plt.figure() fig.suptitle(f""Test for {str(self.scene).replace('Test', '')}"", fontsize=16) ax = fig.add_subplot(gs[0, 0]) ax.imshow(frame_data) ax.set_title(""Generated :"") ax = fig.add_subplot(gs[0, 1]) ax.imshow(expected_frame_data) ax.set_title(""Expected :"") ax = fig.add_subplot(gs[1, :]) diff_im = expected_frame_data.copy() diff_im = np.where( frame_data != np.array([0, 0, 0, 255]), np.array([0, 255, 0, 255], dtype=""uint8""), np.array([0, 0, 0, 255], dtype=""uint8""), ) # Set any non-black pixels to green np.putmask( diff_im, expected_frame_data != frame_data, np.array([255, 0, 0, 255], dtype=""uint8""), ) # Set any different pixels to red ax.imshow(diff_im, interpolation=""nearest"") ax.set_title(""Differences summary : (green = same, red = different)"") plt.show() plt.savefig(f""{self.scene}.png"") ``` ###Assistant : Will visually display with matplotlib differences between frame generated and the one expected. " 750,"def consume_capacity(self, task): if self.is_container_group: self.container_group_jobs += 1 self.container_group_consumed_forks += task.task_impact else: raise RuntimeError(""We only track capacity for container groups at the instance group level. Otherwise, consume capacity on instances."") ",We only consume capacity on an instance group level if it is a container group. Otherwise we consume capacity on an instance level.,23,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def consume_capacity(self, task): if self.is_container_group: self.container_group_jobs += 1 self.container_group_consumed_forks += task.task_impact else: raise RuntimeError(""We only track capacity for container groups at the instance group level. Otherwise, consume capacity on instances."") ``` ###Assistant : We only consume capacity on an instance group level if it is a container group. Otherwise we consume capacity on an instance level. " 751,"def get_avail_mem_per_ray_worker_node(spark, object_store_memory_per_node): num_cpus_per_spark_task = int( spark.sparkContext.getConf().get(""spark.task.cpus"", ""1"") ) "," Return the available heap memory and object store memory for each ray worker. NB: We have one ray node per spark task. ",22,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_avail_mem_per_ray_worker_node(spark, object_store_memory_per_node): num_cpus_per_spark_task = int( spark.sparkContext.getConf().get(""spark.task.cpus"", ""1"") ) ``` ###Assistant : Return the available heap memory and object store memory for each ray worker. NB: We have one ray node per spark task. " 752,"def _iter_egg_info_dependencies(self) -> Iterable[str]: for entry in self._iter_requires_txt_entries(): if entry.extra and entry.marker: marker = f'({entry.marker}) and extra == ""{safe_extra(entry.extra)}""' elif entry.extra: marker = f'extra == ""{safe_extra(entry.extra)}""' elif entry.marker: marker = entry.marker else: marker = """" if marker: yield f""{entry.requirement} ; {marker}"" else: yield entry.requirement ","Get distribution dependencies from the egg-info directory. To ease parsing, this converts a legacy dependency entry into a PEP 508 requirement string. Like ``_iter_requires_txt_entries()``, there is code in ``importlib.metadata`` that does mostly the same, but not do exactly what we need. Namely, ``importlib.metadata`` does not normalize the extra name before putting it into the requirement string, which causes marker comparison to fail because the dist-info format do normalize. This is consistent in all currently available PEP 517 backends, although not standardized. ",81,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _iter_egg_info_dependencies(self) -> Iterable[str]: for entry in self._iter_requires_txt_entries(): if entry.extra and entry.marker: marker = f'({entry.marker}) and extra == ""{safe_extra(entry.extra)}""' elif entry.extra: marker = f'extra == ""{safe_extra(entry.extra)}""' elif entry.marker: marker = entry.marker else: marker = """" if marker: yield f""{entry.requirement} ; {marker}"" else: yield entry.requirement ``` ###Assistant : Get distribution dependencies from the egg-info directory. To ease parsing, this converts a legacy dependency entry into a PEP 508 requirement string. Like ``_iter_requires_txt_entries()``, there is code in ``importlib.metadata`` that does mostly the same, but not do exactly what we need. Namely, ``importlib.metadata`` does not normalize the extra name before putting it into the requirement string, which causes marker comparison to fail because the dist-info format do normalize. This is consistent in all currently available PEP 517 backends, although not standardized. " 753,"def cache_from_source(path, debug_override=None): with warnings.catch_warnings(): warnings.simplefilter('ignore') return util.cache_from_source(path, debug_override) ","**DEPRECATED** Given the path to a .py file, return the path to its .pyc file. The .py file does not need to exist; this simply returns the path to the .pyc file calculated as if the .py file were imported. If debug_override is not None, then it must be a boolean and is used in place of sys.flags.optimize. If sys.implementation.cache_tag is None then NotImplementedError is raised. ",66,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cache_from_source(path, debug_override=None): with warnings.catch_warnings(): warnings.simplefilter('ignore') return util.cache_from_source(path, debug_override) ``` ###Assistant : **DEPRECATED** Given the path to a .py file, return the path to its .pyc file. The .py file does not need to exist; this simply returns the path to the .pyc file calculated as if the .py file were imported. If debug_override is not None, then it must be a boolean and is used in place of sys.flags.optimize. If sys.implementation.cache_tag is None then NotImplementedError is raised. " 754,"def _url_collapse_path(path): # Query component should not be involved. path, _, query = path.partition('?') path = urllib.parse.unquote(path) # Similar to os.path.split(os.path.normpath(path)) but specific to URL # path semantics rather than local operating system semantics. path_parts = path.split('/') head_parts = [] for part in path_parts[:-1]: if part == '..': head_parts.pop() # IndexError if more '..' than prior parts elif part and part != '.': head_parts.append( part ) if path_parts: tail_part = path_parts.pop() if tail_part: if tail_part == '..': head_parts.pop() tail_part = '' elif tail_part == '.': tail_part = '' else: tail_part = '' if query: tail_part = '?'.join((tail_part, query)) splitpath = ('/' + '/'.join(head_parts), tail_part) collapsed_path = ""/"".join(splitpath) return collapsed_path nobody = None "," Given a URL path, remove extra '/'s and '.' path elements and collapse any '..' references and returns a collapsed path. Implements something akin to RFC-2396 5.2 step 6 to parse relative paths. The utility of this function is limited to is_cgi method and helps preventing some security attacks. Returns: The reconstituted URL, which will always start with a '/'. Raises: IndexError if too many '..' occur within the path. ",70,112,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _url_collapse_path(path): # Query component should not be involved. path, _, query = path.partition('?') path = urllib.parse.unquote(path) # Similar to os.path.split(os.path.normpath(path)) but specific to URL # path semantics rather than local operating system semantics. path_parts = path.split('/') head_parts = [] for part in path_parts[:-1]: if part == '..': head_parts.pop() # IndexError if more '..' than prior parts elif part and part != '.': head_parts.append( part ) if path_parts: tail_part = path_parts.pop() if tail_part: if tail_part == '..': head_parts.pop() tail_part = '' elif tail_part == '.': tail_part = '' else: tail_part = '' if query: tail_part = '?'.join((tail_part, query)) splitpath = ('/' + '/'.join(head_parts), tail_part) collapsed_path = ""/"".join(splitpath) return collapsed_path nobody = None ``` ###Assistant : Given a URL path, remove extra '/'s and '.' path elements and collapse any '..' references and returns a collapsed path. Implements something akin to RFC-2396 5.2 step 6 to parse relative paths. The utility of this function is limited to is_cgi method and helps preventing some security attacks. Returns: The reconstituted URL, which will always start with a '/'. Raises: IndexError if too many '..' occur within the path. " 755,"def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels: bool = False): from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo A, rows, columns = sparse_series_to_coo( self._parent, row_levels, column_levels, sort_labels=sort_labels ) return A, rows, columns "," Create a scipy.sparse.coo_matrix from a Series with MultiIndex. Use row_levels and column_levels to determine the row and column coordinates respectively. row_levels and column_levels are the names (labels) or numbers of the levels. {row_levels, column_levels} must be a partition of the MultiIndex level names (or numbers). Parameters ---------- row_levels : tuple/list column_levels : tuple/list sort_labels : bool, default False Sort the row and column labels before forming the sparse matrix. When `row_levels` and/or `column_levels` refer to a single level, set to `True` for a faster execution. Returns ------- y : scipy.sparse.coo_matrix rows : list (row labels) columns : list (column labels) Examples -------- >>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan]) >>> s.index = pd.MultiIndex.from_tuples( ... [ ... (1, 2, ""a"", 0), ... (1, 2, ""a"", 1), ... (1, 1, ""b"", 0), ... (1, 1, ""b"", 1), ... (2, 1, ""b"", 0), ... (2, 1, ""b"", 1) ... ], ... names=[""A"", ""B"", ""C"", ""D""], ... ) >>> s A B C D 1 2 a 0 3.0 1 NaN 1 b 0 1.0 1 3.0 2 1 b 0 NaN 1 NaN dtype: float64 >>> ss = s.astype(""Sparse"") >>> ss A B C D 1 2 a 0 3.0 1 NaN 1 b 0 1.0 1 3.0 2 1 b 0 NaN 1 NaN dtype: Sparse[float64, nan] >>> A, rows, columns = ss.sparse.to_coo( ... row_levels=[""A"", ""B""], column_levels=[""C"", ""D""], sort_labels=True ... ) >>> A <3x4 sparse matrix of type '' with 3 stored elements in COOrdinate format> >>> A.todense() matrix([[0., 0., 1., 3.], [3., 0., 0., 0.], [0., 0., 0., 0.]]) >>> rows [(1, 1), (1, 2), (2, 1)] >>> columns [('a', 0), ('a', 1), ('b', 0), ('b', 1)] ",279,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels: bool = False): from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo A, rows, columns = sparse_series_to_coo( self._parent, row_levels, column_levels, sort_labels=sort_labels ) return A, rows, columns ``` ###Assistant : Create a scipy.sparse.coo_matrix from a Series with MultiIndex. Use row_levels and column_levels to determine the row and column coordinates respectively. row_levels and column_levels are the names (labels) or numbers of the levels. {row_levels, column_levels} must be a partition of the MultiIndex level names (or numbers). Parameters ---------- row_levels : tuple/list column_levels : tuple/list sort_labels : bool, default False Sort the row and column labels before forming the sparse matrix. When `row_levels` and/or `column_levels` refer to a single level, set to `True` for a faster execution. Returns ------- y : scipy.sparse.coo_matrix rows : list (row labels) columns : list (column labels) Examples -------- >>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan]) >>> s.index = pd.MultiIndex.from_tuples( ... [ ... (1, 2, ""a"", 0), ... (1, 2, ""a"", 1), ... (1, 1, ""b"", 0), ... (1, 1, ""b"", 1), ... (2, 1, ""b"", 0), ... (2, 1, ""b"", 1) ... ], ... names=[""A"", ""B"", ""C"", ""D""], ... ) >>> s A B C D 1 2 a 0 3.0 1 NaN 1 b 0 1.0 1 3.0 2 1 b 0 NaN 1 NaN dtype: float64 >>> ss = s.astype(""Sparse"") >>> ss A B C D 1 2 a 0 3.0 1 NaN 1 b 0 1.0 1 3.0 2 1 b 0 NaN 1 NaN dtype: Sparse[float64, nan] >>> A, rows, columns = ss.sparse.to_coo( ... row_levels=[""A"", ""B""], column_levels=[""C"", ""D""], sort_labels=True ... ) >>> A <3x4 sparse matrix of type '' with 3 stored elements in COOrdinate format> >>> A.todense() matrix([[0., 0., 1., 3.], [3., 0., 0., 0.], [0., 0., 0., 0.]]) >>> rows [(1, 1), (1, 2), (2, 1)] >>> columns [('a', 0), ('a', 1), ('b', 0), ('b', 1)] " 756,"def _get_memory_heuristic_values(self) -> Dict[str, Union[str, float, bool]]: return { 'ignore_in_session_memories': self.opt.get( 'ignore_in_session_memories_mkm', False ), 'memory_overlap_threshold': self.opt.get('memory_overlap_threshold', 0.0), 'memory_hard_block_for_n_turns': self.opt.get( 'memory_hard_block_for_n_turns', 0 ), 'memory_soft_block_decay_factor': self.opt.get( 'memory_soft_block_decay_factor', 0.0 ), } "," Extract heuristics from self.opt. ",4,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_memory_heuristic_values(self) -> Dict[str, Union[str, float, bool]]: return { 'ignore_in_session_memories': self.opt.get( 'ignore_in_session_memories_mkm', False ), 'memory_overlap_threshold': self.opt.get('memory_overlap_threshold', 0.0), 'memory_hard_block_for_n_turns': self.opt.get( 'memory_hard_block_for_n_turns', 0 ), 'memory_soft_block_decay_factor': self.opt.get( 'memory_soft_block_decay_factor', 0.0 ), } ``` ###Assistant : Extract heuristics from self.opt. " 757,"def plugin_list_buttons(context, model): return _get_registered_content(model, 'list_buttons', context) "," Render all list buttons registered by plugins ",7,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def plugin_list_buttons(context, model): return _get_registered_content(model, 'list_buttons', context) ``` ###Assistant : Render all list buttons registered by plugins " 758,"def drop_path(self, inputs): # if prob is 0 or eval mode, return original input if self.drop_prob == 0. or not self.training: return inputs keep_prob = 1 - self.drop_prob keep_prob = paddle.to_tensor(keep_prob, dtype='float32') shape = (inputs.shape[0], ) + (1, ) * (inputs.ndim - 1) # shape=(N, 1, 1, 1) random_tensor = keep_prob + paddle.rand(shape, dtype=inputs.dtype) random_tensor = random_tensor.floor() # mask output = inputs.divide(keep_prob) * random_tensor #divide is to keep same output expectation return output ","drop path op Args: input: tensor with arbitrary shape drop_prob: float number of drop path probability, default: 0.0 training: bool, if current mode is training, default: False Returns: output: output tensor after drop path ",34,73,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def drop_path(self, inputs): # if prob is 0 or eval mode, return original input if self.drop_prob == 0. or not self.training: return inputs keep_prob = 1 - self.drop_prob keep_prob = paddle.to_tensor(keep_prob, dtype='float32') shape = (inputs.shape[0], ) + (1, ) * (inputs.ndim - 1) # shape=(N, 1, 1, 1) random_tensor = keep_prob + paddle.rand(shape, dtype=inputs.dtype) random_tensor = random_tensor.floor() # mask output = inputs.divide(keep_prob) * random_tensor #divide is to keep same output expectation return output ``` ###Assistant : drop path op Args: input: tensor with arbitrary shape drop_prob: float number of drop path probability, default: 0.0 training: bool, if current mode is training, default: False Returns: output: output tensor after drop path " 759,"def set_style(style=None, rc=None): style_object = axes_style(style, rc) mpl.rcParams.update(style_object) "," Set the parameters that control the general style of the plots. The style parameters control properties like the color of the background and whether a grid is enabled by default. This is accomplished using the matplotlib rcParams system. The options are illustrated in the :doc:`aesthetics tutorial <../tutorial/aesthetics>`. See :func:`axes_style` to get the parameter values. Parameters ---------- style : dict, or one of {darkgrid, whitegrid, dark, white, ticks} A dictionary of parameters or the name of a preconfigured style. rc : dict, optional Parameter mappings to override the values in the preset seaborn style dictionaries. This only updates parameters that are considered part of the style definition. Examples -------- .. include:: ../docstrings/set_style.rst ",111,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_style(style=None, rc=None): style_object = axes_style(style, rc) mpl.rcParams.update(style_object) ``` ###Assistant : Set the parameters that control the general style of the plots. The style parameters control properties like the color of the background and whether a grid is enabled by default. This is accomplished using the matplotlib rcParams system. The options are illustrated in the :doc:`aesthetics tutorial <../tutorial/aesthetics>`. See :func:`axes_style` to get the parameter values. Parameters ---------- style : dict, or one of {darkgrid, whitegrid, dark, white, ticks} A dictionary of parameters or the name of a preconfigured style. rc : dict, optional Parameter mappings to override the values in the preset seaborn style dictionaries. This only updates parameters that are considered part of the style definition. Examples -------- .. include:: ../docstrings/set_style.rst " 760,"def _resize(self, image, shorter=800, longer=1333, size_divisor=32, resample=Image.BICUBIC): if not isinstance(image, Image.Image): image = self.to_pil_image(image) w, h = image.size min_size = shorter max_size = longer scale = min_size / min(w, h) if h < w: newh, neww = min_size, scale * w else: newh, neww = scale * h, min_size if max(newh, neww) > max_size: scale = max_size / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) newh, neww = newh // size_divisor * size_divisor, neww // size_divisor * size_divisor return self.resize(image, size=(neww, newh), resample=resample) "," Resizes the shorter edge of `image` to `shorter` and limits the longer edge to under `longer`, while preserving the aspect ratio. Also makes sure that both the height and width can be divided by `size_divisor`. Based on original implementation: https://github.com/dandelin/ViLT/blob/3db8b5035464afee84d951bf6322e1b27f1d072d/vilt/transforms/utils.py#L5 Args: image (`PIL.Image`): The image to resize. shorter (`int`, *optional*, defaults to `800`): The size to which to resize the shorter side of the image. longer (`int`, *optional*, defaults to `1333`): The size by which to limit the longer side of the image, while preserving the aspect ratio. size_divisor (`int`, *optional*, defaults to `32`): The size by which both the height and the width must be divisible. resample (`int`, *optional*, defaults to `PIL.Image.BICUBIC`): An optional resampling filter. ",117,97,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _resize(self, image, shorter=800, longer=1333, size_divisor=32, resample=Image.BICUBIC): if not isinstance(image, Image.Image): image = self.to_pil_image(image) w, h = image.size min_size = shorter max_size = longer scale = min_size / min(w, h) if h < w: newh, neww = min_size, scale * w else: newh, neww = scale * h, min_size if max(newh, neww) > max_size: scale = max_size / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) newh, neww = newh // size_divisor * size_divisor, neww // size_divisor * size_divisor return self.resize(image, size=(neww, newh), resample=resample) ``` ###Assistant : Resizes the shorter edge of `image` to `shorter` and limits the longer edge to under `longer`, while preserving the aspect ratio. Also makes sure that both the height and width can be divided by `size_divisor`. Based on original implementation: https://github.com/dandelin/ViLT/blob/3db8b5035464afee84d951bf6322e1b27f1d072d/vilt/transforms/utils.py#L5 Args: image (`PIL.Image`): The image to resize. shorter (`int`, *optional*, defaults to `800`): The size to which to resize the shorter side of the image. longer (`int`, *optional*, defaults to `1333`): The size by which to limit the longer side of the image, while preserving the aspect ratio. size_divisor (`int`, *optional*, defaults to `32`): The size by which both the height and the width must be divisible. resample (`int`, *optional*, defaults to `PIL.Image.BICUBIC`): An optional resampling filter. " 761,"def extra_action_out_fn(self) -> Dict[str, TensorType]: extra_action_fetches = super().extra_action_out_fn() extra_action_fetches.update(self._policy_extra_action_fetches) return extra_action_fetches ","Extra values to fetch and return from compute_actions(). Returns: Dict[str, TensorType]: An extra fetch-dict to be passed to and returned from the compute_actions() call. ",24,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def extra_action_out_fn(self) -> Dict[str, TensorType]: extra_action_fetches = super().extra_action_out_fn() extra_action_fetches.update(self._policy_extra_action_fetches) return extra_action_fetches ``` ###Assistant : Extra values to fetch and return from compute_actions(). Returns: Dict[str, TensorType]: An extra fetch-dict to be passed to and returned from the compute_actions() call. " 762,"def get_all_exported_dataset_infos(cls) -> dict: dset_infos_file_path = os.path.join(cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME) if os.path.exists(dset_infos_file_path): return DatasetInfosDict.from_directory(cls.get_imported_module_dir()) return {} ","Empty dict if doesn't exist Example: ```py >>> from datasets import load_dataset_builder >>> ds_builder = load_dataset_builder('rotten_tomatoes') >>> ds_builder.get_all_exported_dataset_infos() {'default': DatasetInfo(description=""Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\nsentiment categorization with respect to rating scales.'', Proceedings of the\nACL, 2005.\n"", citation='@InProceedings{Pang+Lee:05a,\n author = {Bo Pang and Lillian Lee},\n title = {Seeing stars: Exploiting class relationships for sentiment\n categorization with respect to rating scales},\n booktitle = {Proceedings of the ACL},\n year = 2005\n}\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231)} ``` ",140,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_all_exported_dataset_infos(cls) -> dict: dset_infos_file_path = os.path.join(cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME) if os.path.exists(dset_infos_file_path): return DatasetInfosDict.from_directory(cls.get_imported_module_dir()) return {} ``` ###Assistant : Empty dict if doesn't exist Example: ```py >>> from datasets import load_dataset_builder >>> ds_builder = load_dataset_builder('rotten_tomatoes') >>> ds_builder.get_all_exported_dataset_infos() {'default': DatasetInfo(description=""Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\nsentiment categorization with respect to rating scales.'', Proceedings of the\nACL, 2005.\n"", citation='@InProceedings{Pang+Lee:05a,\n author = {Bo Pang and Lillian Lee},\n title = {Seeing stars: Exploiting class relationships for sentiment\n categorization with respect to rating scales},\n booktitle = {Proceedings of the ACL},\n year = 2005\n}\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231)} ``` " 763,"def to_sanitized_dict(self) -> Dict[str, Any]: d = self.to_dict() d = { ** d, ** { ""train_batch_size"": self.train_batch_size, ""eval_batch_size"": self.eval_batch_size } } valid_types = [bool, int, float, str] valid_types.append(paddle.Tensor) return { k: v if type(v) in valid_types else str(v) for k, v in d.items() } "," Sanitized serialization to use with TensorBoard’s hparams ",7,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def to_sanitized_dict(self) -> Dict[str, Any]: d = self.to_dict() d = { ** d, ** { ""train_batch_size"": self.train_batch_size, ""eval_batch_size"": self.eval_batch_size } } valid_types = [bool, int, float, str] valid_types.append(paddle.Tensor) return { k: v if type(v) in valid_types else str(v) for k, v in d.items() } ``` ###Assistant : Sanitized serialization to use with TensorBoard’s hparams " 764,"def readinto(self, b): self._check_can_read() return self._buffer.readinto(b) ","Read bytes into b. Returns the number of bytes read (0 for EOF). ",13,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def readinto(self, b): self._check_can_read() return self._buffer.readinto(b) ``` ###Assistant : Read bytes into b. Returns the number of bytes read (0 for EOF). " 765,"def _output_type_handler(cursor, name, defaultType, length, precision, scale): if defaultType == Database.NUMBER: if scale == -127: if precision == 0: # NUMBER column: decimal-precision floating point. # This will normally be an integer from a sequence, # but it could be a decimal value. outconverter = FormatStylePlaceholderCursor._output_number_converter else: # FLOAT column: binary-precision floating point. # This comes from FloatField columns. outconverter = float elif precision > 0: # NUMBER(p,s) column: decimal-precision fixed point. # This comes from IntegerField and DecimalField columns. outconverter = FormatStylePlaceholderCursor._get_decimal_converter( precision, scale ) else: # No type information. This normally comes from a # mathematical expression in the SELECT list. Guess int # or Decimal based on whether it has a decimal point. outconverter = FormatStylePlaceholderCursor._output_number_converter return cursor.var( Database.STRING, size=255, arraysize=cursor.arraysize, outconverter=outconverter, ) "," Called for each db column fetched from cursors. Return numbers as the appropriate Python type. ",15,126,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _output_type_handler(cursor, name, defaultType, length, precision, scale): if defaultType == Database.NUMBER: if scale == -127: if precision == 0: # NUMBER column: decimal-precision floating point. # This will normally be an integer from a sequence, # but it could be a decimal value. outconverter = FormatStylePlaceholderCursor._output_number_converter else: # FLOAT column: binary-precision floating point. # This comes from FloatField columns. outconverter = float elif precision > 0: # NUMBER(p,s) column: decimal-precision fixed point. # This comes from IntegerField and DecimalField columns. outconverter = FormatStylePlaceholderCursor._get_decimal_converter( precision, scale ) else: # No type information. This normally comes from a # mathematical expression in the SELECT list. Guess int # or Decimal based on whether it has a decimal point. outconverter = FormatStylePlaceholderCursor._output_number_converter return cursor.var( Database.STRING, size=255, arraysize=cursor.arraysize, outconverter=outconverter, ) ``` ###Assistant : Called for each db column fetched from cursors. Return numbers as the appropriate Python type. " 766,"def decode_nested_example(schema, obj): # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): return { k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in utils.zip_dict(schema, obj) } elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if decode_nested_example(sub_schema, first_elmt) != first_elmt: return [decode_nested_example(sub_schema, o) for o in obj] return list(obj) elif isinstance(schema, Sequence): # We allow to reverse list of dict => dict of list for compatiblity with tfds if isinstance(schema.feature, dict): return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature} else: return decode_nested_example([schema.feature], obj) # Object with special decoding: elif isinstance(schema, (Audio, Image)): return schema.decode_example(obj) if obj is not None else None return obj ","Decode a nested example. This is used since some features (in particular Audio and Image) have some logic during decoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded. If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same. ",73,121,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def decode_nested_example(schema, obj): # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): return { k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in utils.zip_dict(schema, obj) } elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if decode_nested_example(sub_schema, first_elmt) != first_elmt: return [decode_nested_example(sub_schema, o) for o in obj] return list(obj) elif isinstance(schema, Sequence): # We allow to reverse list of dict => dict of list for compatiblity with tfds if isinstance(schema.feature, dict): return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature} else: return decode_nested_example([schema.feature], obj) # Object with special decoding: elif isinstance(schema, (Audio, Image)): return schema.decode_example(obj) if obj is not None else None return obj ``` ###Assistant : Decode a nested example. This is used since some features (in particular Audio and Image) have some logic during decoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded. If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same. " 767,"def running_under_virtualenv() -> bool: return _running_under_venv() or _running_under_legacy_virtualenv() ","True if we're running inside a virtual environment, False otherwise.",10,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def running_under_virtualenv() -> bool: return _running_under_venv() or _running_under_legacy_virtualenv() ``` ###Assistant : True if we're running inside a virtual environment, False otherwise. " 768,"def forward_train(self, x, data_samples, proposal_cfg=None, **kwargs): img_metas = [data_sample['meta'] for data_sample in data_samples] outs = self(x) gt_bboxes = [ data_sample.gt_instances.bboxes for data_sample in data_samples ] if hasattr(data_samples[0].gt_instances, 'labels'): gt_labels = [ data_sample.gt_instances.labels for data_sample in data_samples ] else: # RPN gt_labels = None if hasattr(data_samples[0], 'instances_ignore'): gt_bboxes_ignore = [ data_sample.ignored_instances.bboxes for data_sample in data_samples ] else: gt_bboxes_ignore = None if gt_labels is None: loss_inputs = outs + (gt_bboxes, img_metas) else: loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) if proposal_cfg is None: return losses else: results_list = self.get_results( *outs, img_metas=img_metas, cfg=proposal_cfg) return losses, results_list "," Args: x (list[Tensor]): Features from FPN. data_samples (list[:obj:`GeneralData`]): Each item contains the meta information of each image and corresponding annotations. proposal_cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used Returns: tuple or Tensor: When `proposal_cfg` is None, the detector is a \ normal one-stage detector, The return value is the losses. - losses: (dict[str, Tensor]): A dictionary of loss components. When the `proposal_cfg` is not None, the head is used as a `rpn_head`, the return value is a tuple contains: - losses: (dict[str, Tensor]): A dictionary of loss components. - results_list (list[:obj:`InstanceData`]): Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance,) - labels (Tensor): Labels of bboxes, has a shape (num_instances,). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). ",147,97,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def forward_train(self, x, data_samples, proposal_cfg=None, **kwargs): img_metas = [data_sample['meta'] for data_sample in data_samples] outs = self(x) gt_bboxes = [ data_sample.gt_instances.bboxes for data_sample in data_samples ] if hasattr(data_samples[0].gt_instances, 'labels'): gt_labels = [ data_sample.gt_instances.labels for data_sample in data_samples ] else: # RPN gt_labels = None if hasattr(data_samples[0], 'instances_ignore'): gt_bboxes_ignore = [ data_sample.ignored_instances.bboxes for data_sample in data_samples ] else: gt_bboxes_ignore = None if gt_labels is None: loss_inputs = outs + (gt_bboxes, img_metas) else: loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) if proposal_cfg is None: return losses else: results_list = self.get_results( *outs, img_metas=img_metas, cfg=proposal_cfg) return losses, results_list ``` ###Assistant : Args: x (list[Tensor]): Features from FPN. data_samples (list[:obj:`GeneralData`]): Each item contains the meta information of each image and corresponding annotations. proposal_cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used Returns: tuple or Tensor: When `proposal_cfg` is None, the detector is a \ normal one-stage detector, The return value is the losses. - losses: (dict[str, Tensor]): A dictionary of loss components. When the `proposal_cfg` is not None, the head is used as a `rpn_head`, the return value is a tuple contains: - losses: (dict[str, Tensor]): A dictionary of loss components. - results_list (list[:obj:`InstanceData`]): Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance,) - labels (Tensor): Labels of bboxes, has a shape (num_instances,). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). " 769,"def cf(self): return { cf.name: cf.deserialize(self.custom_field_data.get(cf.name)) for cf in self.custom_fields } "," Return a dictionary mapping each custom field for this instance to its deserialized value. ```python >>> tenant = Tenant.objects.first() >>> tenant.cf {'primary_site': , 'cust_id': 'DMI01', 'is_active': True} ``` ",29,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cf(self): return { cf.name: cf.deserialize(self.custom_field_data.get(cf.name)) for cf in self.custom_fields } ``` ###Assistant : Return a dictionary mapping each custom field for this instance to its deserialized value. ```python >>> tenant = Tenant.objects.first() >>> tenant.cf {'primary_site': , 'cust_id': 'DMI01', 'is_active': True} ``` " 770,"def test_arf_layout_negative_a_check(self): G = self.Gs pytest.raises(ValueError, nx.arf_layout, G=G, a=-1) "," Checks input parameters correctly raises errors. For example, `a` should be larger than 1 ",14,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_arf_layout_negative_a_check(self): G = self.Gs pytest.raises(ValueError, nx.arf_layout, G=G, a=-1) ``` ###Assistant : Checks input parameters correctly raises errors. For example, `a` should be larger than 1 " 771,"async def test_connected_device_registered(hass): registry = mock_registry(hass) dispatches = [] ",Test dispatch on connected device being registered.,7,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_connected_device_registered(hass): registry = mock_registry(hass) dispatches = [] ``` ###Assistant : Test dispatch on connected device being registered. " 772,"def test_ddp_sharded_strategy_checkpoint_multi_gpu(tmpdir): model = BoringModel() trainer = Trainer(gpus=2, strategy=""ddp_sharded_spawn"", fast_dev_run=True) trainer.fit(model) checkpoint_path = os.path.join(tmpdir, ""model.pt"") trainer.save_checkpoint(checkpoint_path) saved_model = BoringModel.load_from_checkpoint(checkpoint_path) # Assert model parameters are identical after loading for ddp_param, shard_param in zip(model.parameters(), saved_model.parameters()): assert torch.equal(ddp_param.to(""cpu""), shard_param) @RunIf(min_gpus=2, skip_windows=True, fairscale=True)",Test to ensure that checkpoint is saved correctly when using multiple GPUs.,12,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_ddp_sharded_strategy_checkpoint_multi_gpu(tmpdir): model = BoringModel() trainer = Trainer(gpus=2, strategy=""ddp_sharded_spawn"", fast_dev_run=True) trainer.fit(model) checkpoint_path = os.path.join(tmpdir, ""model.pt"") trainer.save_checkpoint(checkpoint_path) saved_model = BoringModel.load_from_checkpoint(checkpoint_path) # Assert model parameters are identical after loading for ddp_param, shard_param in zip(model.parameters(), saved_model.parameters()): assert torch.equal(ddp_param.to(""cpu""), shard_param) @RunIf(min_gpus=2, skip_windows=True, fairscale=True) ``` ###Assistant : Test to ensure that checkpoint is saved correctly when using multiple GPUs. " 773,"def autoscale(self) -> None: for deployment_name, ( deployment_info, route_prefix, ) in self.list_deployments().items(): deployment_config = deployment_info.deployment_config autoscaling_policy = deployment_info.autoscaling_policy if autoscaling_policy is None: continue replicas = self.deployment_state_manager._deployment_states[ deployment_name ]._replicas running_replicas = replicas.get([ReplicaState.RUNNING]) current_num_ongoing_requests = [] for replica in running_replicas: replica_tag = replica.replica_tag num_ongoing_requests = self.autoscaling_metrics_store.window_average( replica_tag, time.time() - autoscaling_policy.config.look_back_period_s, ) if num_ongoing_requests is not None: current_num_ongoing_requests.append(num_ongoing_requests) if len(current_num_ongoing_requests) == 0: continue new_deployment_config = deployment_config.copy() decision_num_replicas = autoscaling_policy.get_decision_num_replicas( current_num_ongoing_requests=current_num_ongoing_requests, curr_target_num_replicas=deployment_config.num_replicas, ) new_deployment_config.num_replicas = decision_num_replicas new_deployment_info = copy(deployment_info) new_deployment_info.deployment_config = new_deployment_config goal_id, updating = self.deployment_state_manager.deploy( deployment_name, new_deployment_info ) ",Updates autoscaling deployments with calculated num_replicas.,6,85,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def autoscale(self) -> None: for deployment_name, ( deployment_info, route_prefix, ) in self.list_deployments().items(): deployment_config = deployment_info.deployment_config autoscaling_policy = deployment_info.autoscaling_policy if autoscaling_policy is None: continue replicas = self.deployment_state_manager._deployment_states[ deployment_name ]._replicas running_replicas = replicas.get([ReplicaState.RUNNING]) current_num_ongoing_requests = [] for replica in running_replicas: replica_tag = replica.replica_tag num_ongoing_requests = self.autoscaling_metrics_store.window_average( replica_tag, time.time() - autoscaling_policy.config.look_back_period_s, ) if num_ongoing_requests is not None: current_num_ongoing_requests.append(num_ongoing_requests) if len(current_num_ongoing_requests) == 0: continue new_deployment_config = deployment_config.copy() decision_num_replicas = autoscaling_policy.get_decision_num_replicas( current_num_ongoing_requests=current_num_ongoing_requests, curr_target_num_replicas=deployment_config.num_replicas, ) new_deployment_config.num_replicas = decision_num_replicas new_deployment_info = copy(deployment_info) new_deployment_info.deployment_config = new_deployment_config goal_id, updating = self.deployment_state_manager.deploy( deployment_name, new_deployment_info ) ``` ###Assistant : Updates autoscaling deployments with calculated num_replicas. " 774,"def match_state_dict(model_state_dict, weight_state_dict): model_keys = sorted(model_state_dict.keys()) weight_keys = sorted(weight_state_dict.keys()) "," Match between the model state dict and pretrained weight state dict. Return the matched state dict. The method supposes that all the names in pretrained weight state dict are subclass of the names in models`, if the prefix 'backbone.' in pretrained weight keys is stripped. And we could get the candidates for each model key. Then we select the name with the longest matched size as the final match result. For example, the model state dict has the name of 'backbone.res2.res2a.branch2a.conv.weight' and the pretrained weight as name of 'res2.res2a.branch2a.conv.weight' and 'branch2a.conv.weight'. We match the 'res2.res2a.branch2a.conv.weight' to the model key. ",99,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def match_state_dict(model_state_dict, weight_state_dict): model_keys = sorted(model_state_dict.keys()) weight_keys = sorted(weight_state_dict.keys()) ``` ###Assistant : Match between the model state dict and pretrained weight state dict. Return the matched state dict. The method supposes that all the names in pretrained weight state dict are subclass of the names in models`, if the prefix 'backbone.' in pretrained weight keys is stripped. And we could get the candidates for each model key. Then we select the name with the longest matched size as the final match result. For example, the model state dict has the name of 'backbone.res2.res2a.branch2a.conv.weight' and the pretrained weight as name of 'res2.res2a.branch2a.conv.weight' and 'branch2a.conv.weight'. We match the 'res2.res2a.branch2a.conv.weight' to the model key. " 775,"def local_node_connectivity(G, source, target, cutoff=None): if target == source: raise nx.NetworkXError(""source and target have to be different nodes."") # Maximum possible node independent paths if G.is_directed(): possible = min(G.out_degree(source), G.in_degree(target)) else: possible = min(G.degree(source), G.degree(target)) K = 0 if not possible: return K if cutoff is None: cutoff = float(""inf"") exclude = set() for i in range(min(possible, cutoff)): try: path = _bidirectional_shortest_path(G, source, target, exclude) exclude.update(set(path)) K += 1 except nx.NetworkXNoPath: break return K ","Compute node connectivity between source and target. Pairwise or local node connectivity between two distinct and nonadjacent nodes is the minimum number of nodes that must be removed (minimum separating cutset) to disconnect them. By Menger's theorem, this is equal to the number of node independent paths (paths that share no nodes other than source and target). Which is what we compute in this function. This algorithm is a fast approximation that gives an strict lower bound on the actual number of node independent paths between two nodes [1]_. It works for both directed and undirected graphs. Parameters ---------- G : NetworkX graph source : node Starting node for node connectivity target : node Ending node for node connectivity cutoff : integer Maximum node connectivity to consider. If None, the minimum degree of source or target is used as a cutoff. Default value None. Returns ------- k: integer pairwise node connectivity Examples -------- >>> # Platonic octahedral graph has node connectivity 4 >>> # for each non adjacent node pair >>> from networkx.algorithms import approximation as approx >>> G = nx.octahedral_graph() >>> approx.local_node_connectivity(G, 0, 5) 4 Notes ----- This algorithm [1]_ finds node independents paths between two nodes by computing their shortest path using BFS, marking the nodes of the path found as 'used' and then searching other shortest paths excluding the nodes marked as used until no more paths exist. It is not exact because a shortest path could use nodes that, if the path were longer, may belong to two different node independent paths. Thus it only guarantees an strict lower bound on node connectivity. Note that the authors propose a further refinement, losing accuracy and gaining speed, which is not implemented yet. See also -------- all_pairs_node_connectivity node_connectivity References ---------- .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 http://eclectic.ss.uci.edu/~drwhite/working.pdf ",314,74,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def local_node_connectivity(G, source, target, cutoff=None): if target == source: raise nx.NetworkXError(""source and target have to be different nodes."") # Maximum possible node independent paths if G.is_directed(): possible = min(G.out_degree(source), G.in_degree(target)) else: possible = min(G.degree(source), G.degree(target)) K = 0 if not possible: return K if cutoff is None: cutoff = float(""inf"") exclude = set() for i in range(min(possible, cutoff)): try: path = _bidirectional_shortest_path(G, source, target, exclude) exclude.update(set(path)) K += 1 except nx.NetworkXNoPath: break return K ``` ###Assistant : Compute node connectivity between source and target. Pairwise or local node connectivity between two distinct and nonadjacent nodes is the minimum number of nodes that must be removed (minimum separating cutset) to disconnect them. By Menger's theorem, this is equal to the number of node independent paths (paths that share no nodes other than source and target). Which is what we compute in this function. This algorithm is a fast approximation that gives an strict lower bound on the actual number of node independent paths between two nodes [1]_. It works for both directed and undirected graphs. Parameters ---------- G : NetworkX graph source : node Starting node for node connectivity target : node Ending node for node connectivity cutoff : integer Maximum node connectivity to consider. If None, the minimum degree of source or target is used as a cutoff. Default value None. Returns ------- k: integer pairwise node connectivity Examples -------- >>> # Platonic octahedral graph has node connectivity 4 >>> # for each non adjacent node pair >>> from networkx.algorithms import approximation as approx >>> G = nx.octahedral_graph() >>> approx.local_node_connectivity(G, 0, 5) 4 Notes ----- This algorithm [1]_ finds node independents paths between two nodes by computing their shortest path using BFS, marking the nodes of the path found as 'used' and then searching other shortest paths excluding the nodes marked as used until no more paths exist. It is not exact because a shortest path could use nodes that, if the path were longer, may belong to two different node independent paths. Thus it only guarantees an strict lower bound on node connectivity. Note that the authors propose a further refinement, losing accuracy and gaining speed, which is not implemented yet. See also -------- all_pairs_node_connectivity node_connectivity References ---------- .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 http://eclectic.ss.uci.edu/~drwhite/working.pdf " 776,"def _copartition(self, axis, other, how, sort, force_repartition=False): if isinstance(other, type(self)): other = [other] self_index = self.axes[axis] others_index = [o.axes[axis] for o in other] joined_index, make_reindexer = self._join_index_objects( axis, [self_index] + others_index, how, sort ) frames = [self] + other non_empty_frames_idx = [ i for i, o in enumerate(frames) if o._partitions.size != 0 ] # If all frames are empty if len(non_empty_frames_idx) == 0: return ( self._partitions, [o._partitions for o in other], joined_index, # There are no partition sizes because the resulting dataframe # has no partitions. [], ) base_frame_idx = non_empty_frames_idx[0] other_frames = frames[base_frame_idx + 1 :] # Picking first non-empty frame base_frame = frames[non_empty_frames_idx[0]] base_index = base_frame.axes[axis] # define conditions for reindexing and repartitioning `self` frame do_reindex_base = not base_index.equals(joined_index) do_repartition_base = force_repartition or do_reindex_base # Perform repartitioning and reindexing for `base_frame` if needed. # Also define length of base and frames. We will need to know the # lengths for alignment. if do_repartition_base: reindexed_base = base_frame._partition_mgr_cls.map_axis_partitions( axis, base_frame._partitions, make_reindexer(do_reindex_base, base_frame_idx), ) if axis: base_lengths = [obj.width() for obj in reindexed_base[0]] else: base_lengths = [obj.length() for obj in reindexed_base.T[0]] else: reindexed_base = base_frame._partitions base_lengths = self._column_widths if axis else self._row_lengths others_lengths = [o._axes_lengths[axis] for o in other_frames] # define conditions for reindexing and repartitioning `other` frames do_reindex_others = [ not o.axes[axis].equals(joined_index) for o in other_frames ] do_repartition_others = [None] * len(other_frames) for i in range(len(other_frames)): do_repartition_others[i] = ( force_repartition or do_reindex_others[i] or others_lengths[i] != base_lengths ) # perform repartitioning and reindexing for `other_frames` if needed reindexed_other_list = [None] * len(other_frames) for i in range(len(other_frames)): if do_repartition_others[i]: # indices of others frame start from `base_frame_idx` + 1 reindexed_other_list[i] = other_frames[ i ]._partition_mgr_cls.map_axis_partitions( axis, other_frames[i]._partitions, make_reindexer(do_repartition_others[i], base_frame_idx + 1 + i), lengths=base_lengths, ) else: reindexed_other_list[i] = other_frames[i]._partitions reindexed_frames = ( [frames[i]._partitions for i in range(base_frame_idx)] + [reindexed_base] + reindexed_other_list ) return (reindexed_frames[0], reindexed_frames[1:], joined_index, base_lengths) "," Copartition two Modin DataFrames. Perform aligning of partitions, index and partition blocks. Parameters ---------- axis : {0, 1} Axis to copartition along (0 - rows, 1 - columns). other : PandasDataframe Other Modin DataFrame(s) to copartition against. how : str How to manage joining the index object (""left"", ""right"", etc.). sort : bool Whether sort the joined index or not. force_repartition : bool, default: False Whether force the repartitioning or not. By default, this method will skip repartitioning if it is possible. This is because reindexing is extremely inefficient. Because this method is used to `join` or `append`, it is vital that the internal indices match. Returns ------- tuple Tuple containing: 1) 2-d NumPy array of aligned left partitions 2) list of 2-d NumPy arrays of aligned right partitions 3) joined index along ``axis`` 4) List with sizes of partitions along axis that partitioning was done on. This list will be empty if and only if all the frames are empty. ",161,304,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _copartition(self, axis, other, how, sort, force_repartition=False): if isinstance(other, type(self)): other = [other] self_index = self.axes[axis] others_index = [o.axes[axis] for o in other] joined_index, make_reindexer = self._join_index_objects( axis, [self_index] + others_index, how, sort ) frames = [self] + other non_empty_frames_idx = [ i for i, o in enumerate(frames) if o._partitions.size != 0 ] # If all frames are empty if len(non_empty_frames_idx) == 0: return ( self._partitions, [o._partitions for o in other], joined_index, # There are no partition sizes because the resulting dataframe # has no partitions. [], ) base_frame_idx = non_empty_frames_idx[0] other_frames = frames[base_frame_idx + 1 :] # Picking first non-empty frame base_frame = frames[non_empty_frames_idx[0]] base_index = base_frame.axes[axis] # define conditions for reindexing and repartitioning `self` frame do_reindex_base = not base_index.equals(joined_index) do_repartition_base = force_repartition or do_reindex_base # Perform repartitioning and reindexing for `base_frame` if needed. # Also define length of base and frames. We will need to know the # lengths for alignment. if do_repartition_base: reindexed_base = base_frame._partition_mgr_cls.map_axis_partitions( axis, base_frame._partitions, make_reindexer(do_reindex_base, base_frame_idx), ) if axis: base_lengths = [obj.width() for obj in reindexed_base[0]] else: base_lengths = [obj.length() for obj in reindexed_base.T[0]] else: reindexed_base = base_frame._partitions base_lengths = self._column_widths if axis else self._row_lengths others_lengths = [o._axes_lengths[axis] for o in other_frames] # define conditions for reindexing and repartitioning `other` frames do_reindex_others = [ not o.axes[axis].equals(joined_index) for o in other_frames ] do_repartition_others = [None] * len(other_frames) for i in range(len(other_frames)): do_repartition_others[i] = ( force_repartition or do_reindex_others[i] or others_lengths[i] != base_lengths ) # perform repartitioning and reindexing for `other_frames` if needed reindexed_other_list = [None] * len(other_frames) for i in range(len(other_frames)): if do_repartition_others[i]: # indices of others frame start from `base_frame_idx` + 1 reindexed_other_list[i] = other_frames[ i ]._partition_mgr_cls.map_axis_partitions( axis, other_frames[i]._partitions, make_reindexer(do_repartition_others[i], base_frame_idx + 1 + i), lengths=base_lengths, ) else: reindexed_other_list[i] = other_frames[i]._partitions reindexed_frames = ( [frames[i]._partitions for i in range(base_frame_idx)] + [reindexed_base] + reindexed_other_list ) return (reindexed_frames[0], reindexed_frames[1:], joined_index, base_lengths) ``` ###Assistant : Copartition two Modin DataFrames. Perform aligning of partitions, index and partition blocks. Parameters ---------- axis : {0, 1} Axis to copartition along (0 - rows, 1 - columns). other : PandasDataframe Other Modin DataFrame(s) to copartition against. how : str How to manage joining the index object (""left"", ""right"", etc.). sort : bool Whether sort the joined index or not. force_repartition : bool, default: False Whether force the repartitioning or not. By default, this method will skip repartitioning if it is possible. This is because reindexing is extremely inefficient. Because this method is used to `join` or `append`, it is vital that the internal indices match. Returns ------- tuple Tuple containing: 1) 2-d NumPy array of aligned left partitions 2) list of 2-d NumPy arrays of aligned right partitions 3) joined index along ``axis`` 4) List with sizes of partitions along axis that partitioning was done on. This list will be empty if and only if all the frames are empty. " 777,"def size(self) -> int: # override Index.size to avoid materializing _values return len(self) # -------------------------------------------------------------------- # Levels Methods "," Return the number of elements in the underlying data. ",9,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def size(self) -> int: # override Index.size to avoid materializing _values return len(self) # -------------------------------------------------------------------- # Levels Methods ``` ###Assistant : Return the number of elements in the underlying data. " 778,"def groupby_agg(self, by, axis, agg, groupby_args, **kwargs): # Currently we only expect 'by' to be a projection of the same frame. # If 'by' holds a list of columns/series, then we create such projection # to re-use code. if not isinstance(by, DFAlgQueryCompiler): if is_list_like(by): by_cols = [] by_frames = [] for obj in by: if isinstance(obj, str): by_cols.append(obj) elif hasattr(obj, ""_modin_frame""): by_frames.append(obj._modin_frame) else: raise NotImplementedError(""unsupported groupby args"") by_cols = Index.__new__(Index, data=by_cols, dtype=self.columns.dtype) by_frame = self.mask(col_labels=by_cols) if by_frames: by_frame = by_frame.concat( axis=1, other_modin_frames=by_frames, ignore_index=True ) else: raise NotImplementedError(""unsupported groupby args"") else: by_frame = by._modin_frame if axis != 0: raise NotImplementedError(""groupby is supported for axis = 0 only"") base = by_frame._find_common_projections_base(self) if base is None: raise NotImplementedError(""unsupported groupby args"") if groupby_args[""level""] is not None: raise NotImplementedError(""levels are not supported for groupby"") drop = kwargs.get(""drop"", True) as_index = groupby_args.get(""as_index"", True) groupby_cols = by_frame.columns if isinstance(agg, dict): agg_cols = agg.keys() elif not drop: # If 'by' data came from a different frame then 'self-aggregation' # columns are more prioritized. agg_cols = self.columns else: agg_cols = [col for col in self.columns if col not in groupby_cols] # Mimic pandas behaviour: pandas does not allow for aggregation to be empty # in case of multi-column 'by'. if not as_index and len(agg_cols) == 0 and len(groupby_cols) > 1: agg_cols = self.columns # Create new base where all required columns are computed. We don't allow # complex expressions to be a group key or an aggeregate operand. allowed_nodes = (FrameNode, TransformNode) if not isinstance(by_frame._op, allowed_nodes): raise NotImplementedError( ""OmniSci doesn't allow complex expression to be a group key. "" f""The only allowed frame nodes are: {tuple(o.__name__ for o in allowed_nodes)}, "" f""met '{type(by_frame._op).__name__}'."" ) col_to_delete_template = ""__delete_me_{name}"" "," Groupby with aggregation operation. Parameters ---------- by : DFAlgQueryCompiler or list-like of str Grouping keys. axis : {0, 1} Only rows groupby is supported, so should be 0. agg : str or dict Aggregates to compute. groupby_args : dict Additional groupby args. **kwargs : dict Keyword args. Currently ignored. Returns ------- OmnisciOnNativeDataframe The new frame. ",55,278,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def groupby_agg(self, by, axis, agg, groupby_args, **kwargs): # Currently we only expect 'by' to be a projection of the same frame. # If 'by' holds a list of columns/series, then we create such projection # to re-use code. if not isinstance(by, DFAlgQueryCompiler): if is_list_like(by): by_cols = [] by_frames = [] for obj in by: if isinstance(obj, str): by_cols.append(obj) elif hasattr(obj, ""_modin_frame""): by_frames.append(obj._modin_frame) else: raise NotImplementedError(""unsupported groupby args"") by_cols = Index.__new__(Index, data=by_cols, dtype=self.columns.dtype) by_frame = self.mask(col_labels=by_cols) if by_frames: by_frame = by_frame.concat( axis=1, other_modin_frames=by_frames, ignore_index=True ) else: raise NotImplementedError(""unsupported groupby args"") else: by_frame = by._modin_frame if axis != 0: raise NotImplementedError(""groupby is supported for axis = 0 only"") base = by_frame._find_common_projections_base(self) if base is None: raise NotImplementedError(""unsupported groupby args"") if groupby_args[""level""] is not None: raise NotImplementedError(""levels are not supported for groupby"") drop = kwargs.get(""drop"", True) as_index = groupby_args.get(""as_index"", True) groupby_cols = by_frame.columns if isinstance(agg, dict): agg_cols = agg.keys() elif not drop: # If 'by' data came from a different frame then 'self-aggregation' # columns are more prioritized. agg_cols = self.columns else: agg_cols = [col for col in self.columns if col not in groupby_cols] # Mimic pandas behaviour: pandas does not allow for aggregation to be empty # in case of multi-column 'by'. if not as_index and len(agg_cols) == 0 and len(groupby_cols) > 1: agg_cols = self.columns # Create new base where all required columns are computed. We don't allow # complex expressions to be a group key or an aggeregate operand. allowed_nodes = (FrameNode, TransformNode) if not isinstance(by_frame._op, allowed_nodes): raise NotImplementedError( ""OmniSci doesn't allow complex expression to be a group key. "" f""The only allowed frame nodes are: {tuple(o.__name__ for o in allowed_nodes)}, "" f""met '{type(by_frame._op).__name__}'."" ) col_to_delete_template = ""__delete_me_{name}"" ``` ###Assistant : Groupby with aggregation operation. Parameters ---------- by : DFAlgQueryCompiler or list-like of str Grouping keys. axis : {0, 1} Only rows groupby is supported, so should be 0. agg : str or dict Aggregates to compute. groupby_args : dict Additional groupby args. **kwargs : dict Keyword args. Currently ignored. Returns ------- OmnisciOnNativeDataframe The new frame. " 779,"def delete_subscription_from_snuba(query_subscription_id, **kwargs): try: subscription = QuerySubscription.objects.get(id=query_subscription_id) except QuerySubscription.DoesNotExist: metrics.incr(""snuba.subscriptions.delete.subscription_does_not_exist"") return if subscription.status not in [ QuerySubscription.Status.DELETING.value, QuerySubscription.Status.DISABLED.value, ]: metrics.incr(""snuba.subscriptions.delete.incorrect_status"") return if subscription.subscription_id is not None: query_dataset = QueryDatasets(subscription.snuba_query.dataset) entity_key = get_entity_key_from_snuba_query( subscription.snuba_query, subscription.project.organization_id, subscription.project_id ) _delete_from_snuba( query_dataset, subscription.subscription_id, entity_key, ) if subscription.status == QuerySubscription.Status.DELETING.value: subscription.delete() else: subscription.update(subscription_id=None) "," Task to delete a corresponding subscription in Snuba from a `QuerySubscription` in Sentry. If the local subscription is marked for deletion (as opposed to disabled), then we delete the local subscription once we've successfully removed from Snuba. ",37,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def delete_subscription_from_snuba(query_subscription_id, **kwargs): try: subscription = QuerySubscription.objects.get(id=query_subscription_id) except QuerySubscription.DoesNotExist: metrics.incr(""snuba.subscriptions.delete.subscription_does_not_exist"") return if subscription.status not in [ QuerySubscription.Status.DELETING.value, QuerySubscription.Status.DISABLED.value, ]: metrics.incr(""snuba.subscriptions.delete.incorrect_status"") return if subscription.subscription_id is not None: query_dataset = QueryDatasets(subscription.snuba_query.dataset) entity_key = get_entity_key_from_snuba_query( subscription.snuba_query, subscription.project.organization_id, subscription.project_id ) _delete_from_snuba( query_dataset, subscription.subscription_id, entity_key, ) if subscription.status == QuerySubscription.Status.DELETING.value: subscription.delete() else: subscription.update(subscription_id=None) ``` ###Assistant : Task to delete a corresponding subscription in Snuba from a `QuerySubscription` in Sentry. If the local subscription is marked for deletion (as opposed to disabled), then we delete the local subscription once we've successfully removed from Snuba. " 780,"def test_changing_timer_with_messages_shown(qtbot, view, config_stub): config_stub.val.messages.timeout = 900000 # 15s view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test')) with qtbot.wait_signal(view._clear_timer.timeout): config_stub.val.messages.timeout = 100 @pytest.mark.parametrize('count, expected', [(1, 100), (3, 300), (5, 500), (7, 500)])","When we change messages.timeout, the timer should be restarted.",9,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_changing_timer_with_messages_shown(qtbot, view, config_stub): config_stub.val.messages.timeout = 900000 # 15s view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test')) with qtbot.wait_signal(view._clear_timer.timeout): config_stub.val.messages.timeout = 100 @pytest.mark.parametrize('count, expected', [(1, 100), (3, 300), (5, 500), (7, 500)]) ``` ###Assistant : When we change messages.timeout, the timer should be restarted. " 781,"def get_events(start, end, filters=None): from frappe.desk.calendar import get_event_conditions events = [] event_color = { ""Pending"": ""#fff4f0"", ""Under Review"": ""#d3e8fc"", ""Cleared"": ""#eaf5ed"", ""Rejected"": ""#fce7e7"", } conditions = get_event_conditions(""Interview"", filters) interviews = frappe.db.sql( .format( conditions=conditions ), {""start"": start, ""end"": end}, as_dict=True, update={""allDay"": 0}, ) for d in interviews: subject_data = [] for field in [""name"", ""job_applicant"", ""interview_round""]: if not d.get(field): continue subject_data.append(d.get(field)) color = event_color.get(d.status) interview_data = { ""from"": get_datetime(""%s %s"" % (d.scheduled_on, d.from_time or ""00:00:00"")), ""to"": get_datetime(""%s %s"" % (d.scheduled_on, d.to_time or ""00:00:00"")), ""name"": d.name, ""subject"": ""\n"".join(subject_data), ""color"": color if color else ""#89bcde"", } events.append(interview_data) return events ","Returns events for Gantt / Calendar view rendering. :param start: Start date-time. :param end: End date-time. :param filters: Filters (JSON). SELECT DISTINCT `tabInterview`.name, `tabInterview`.job_applicant, `tabInterview`.interview_round, `tabInterview`.scheduled_on, `tabInterview`.status, `tabInterview`.from_time as from_time, `tabInterview`.to_time as to_time from `tabInterview` where (`tabInterview`.scheduled_on between %(start)s and %(end)s) and docstatus != 2 {conditions} ",46,96,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_events(start, end, filters=None): from frappe.desk.calendar import get_event_conditions events = [] event_color = { ""Pending"": ""#fff4f0"", ""Under Review"": ""#d3e8fc"", ""Cleared"": ""#eaf5ed"", ""Rejected"": ""#fce7e7"", } conditions = get_event_conditions(""Interview"", filters) interviews = frappe.db.sql( .format( conditions=conditions ), {""start"": start, ""end"": end}, as_dict=True, update={""allDay"": 0}, ) for d in interviews: subject_data = [] for field in [""name"", ""job_applicant"", ""interview_round""]: if not d.get(field): continue subject_data.append(d.get(field)) color = event_color.get(d.status) interview_data = { ""from"": get_datetime(""%s %s"" % (d.scheduled_on, d.from_time or ""00:00:00"")), ""to"": get_datetime(""%s %s"" % (d.scheduled_on, d.to_time or ""00:00:00"")), ""name"": d.name, ""subject"": ""\n"".join(subject_data), ""color"": color if color else ""#89bcde"", } events.append(interview_data) return events ``` ###Assistant : Returns events for Gantt / Calendar view rendering. :param start: Start date-time. :param end: End date-time. :param filters: Filters (JSON). SELECT DISTINCT `tabInterview`.name, `tabInterview`.job_applicant, `tabInterview`.interview_round, `tabInterview`.scheduled_on, `tabInterview`.status, `tabInterview`.from_time as from_time, `tabInterview`.to_time as to_time from `tabInterview` where (`tabInterview`.scheduled_on between %(start)s and %(end)s) and docstatus != 2 {conditions} " 782,"def testFuncTrainableCheckpointConfigValidation(self): with self.assertRaises(ValueError): Experiment( name=""foo"", run=""f1"", # Will point to a wrapped function trainable checkpoint_config=CheckpointConfig(checkpoint_at_end=True), ) with self.assertRaises(ValueError): Experiment( name=""foo"", run=""f1"", checkpoint_config=CheckpointConfig(checkpoint_frequency=1), ) with self.assertRaises(ValueError): Experiment( name=""foo"", run=lambda config: 1, checkpoint_config=CheckpointConfig(checkpoint_at_end=True), ) ","Raise an error when trying to specify checkpoint_at_end/checkpoint_frequency with a function trainable.",12,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def testFuncTrainableCheckpointConfigValidation(self): with self.assertRaises(ValueError): Experiment( name=""foo"", run=""f1"", # Will point to a wrapped function trainable checkpoint_config=CheckpointConfig(checkpoint_at_end=True), ) with self.assertRaises(ValueError): Experiment( name=""foo"", run=""f1"", checkpoint_config=CheckpointConfig(checkpoint_frequency=1), ) with self.assertRaises(ValueError): Experiment( name=""foo"", run=lambda config: 1, checkpoint_config=CheckpointConfig(checkpoint_at_end=True), ) ``` ###Assistant : Raise an error when trying to specify checkpoint_at_end/checkpoint_frequency with a function trainable. " 783,"def create_perspective_transform(src, dst, round=False, splat_args=False): try: transform_matrix = create_perspective_transform_matrix(src, dst) error = None except np.linalg.LinAlgError as e: transform_matrix = np.identity(3, dtype=np.float) error = ""invalid input quads (%s and %s): %s"" %(src, dst, e) error = error.replace(""\n"", """") to_eval = ""def perspective_transform(%s):\n"" %( splat_args and ""*pt"" or ""pt"", ) to_eval += "" res = np.dot(transform_matrix, ((pt[0], ), (pt[1], ), (1, )))\n"" to_eval += "" res = res / res[2]\n"" if round: to_eval += "" return (int(round(res[0][0])), int(round(res[1][0])))\n"" else: to_eval += "" return (res[0][0], res[1][0])\n"" locals = { ""transform_matrix"": transform_matrix, } locals.update(globals()) exec(to_eval,locals,locals) res = locals[""perspective_transform""] res.matrix = transform_matrix res.error = error return res "," Returns a function which will transform points in quadrilateral ``src`` to the corresponding points on quadrilateral ``dst``:: >>> transform = create_perspective_transform( ... [(0, 0), (10, 0), (10, 10), (0, 10)], ... [(50, 50), (100, 50), (100, 100), (50, 100)], ... ) >>> transform((5, 5)) (74.99999999999639, 74.999999999999957) If ``round`` is ``True`` then points will be rounded to the nearest integer and integer values will be returned. >>> transform = create_perspective_transform( ... [(0, 0), (10, 0), (10, 10), (0, 10)], ... [(50, 50), (100, 50), (100, 100), (50, 100)], ... round=True, ... ) >>> transform((5, 5)) (75, 75) If ``splat_args`` is ``True`` the function will accept two arguments instead of a tuple. >>> transform = create_perspective_transform( ... [(0, 0), (10, 0), (10, 10), (0, 10)], ... [(50, 50), (100, 50), (100, 100), (50, 100)], ... splat_args=True, ... ) >>> transform(5, 5) (74.99999999999639, 74.999999999999957) If the input values yield an invalid transformation matrix an identity function will be returned and the ``error`` attribute will be set to a description of the error:: >>> tranform = create_perspective_transform( ... np.zeros((4, 2)), ... np.zeros((4, 2)), ... ) >>> transform((5, 5)) (5.0, 5.0) >>> transform.error 'invalid input quads (...): Singular matrix ",194,102,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def create_perspective_transform(src, dst, round=False, splat_args=False): try: transform_matrix = create_perspective_transform_matrix(src, dst) error = None except np.linalg.LinAlgError as e: transform_matrix = np.identity(3, dtype=np.float) error = ""invalid input quads (%s and %s): %s"" %(src, dst, e) error = error.replace(""\n"", """") to_eval = ""def perspective_transform(%s):\n"" %( splat_args and ""*pt"" or ""pt"", ) to_eval += "" res = np.dot(transform_matrix, ((pt[0], ), (pt[1], ), (1, )))\n"" to_eval += "" res = res / res[2]\n"" if round: to_eval += "" return (int(round(res[0][0])), int(round(res[1][0])))\n"" else: to_eval += "" return (res[0][0], res[1][0])\n"" locals = { ""transform_matrix"": transform_matrix, } locals.update(globals()) exec(to_eval,locals,locals) res = locals[""perspective_transform""] res.matrix = transform_matrix res.error = error return res ``` ###Assistant : Returns a function which will transform points in quadrilateral ``src`` to the corresponding points on quadrilateral ``dst``:: >>> transform = create_perspective_transform( ... [(0, 0), (10, 0), (10, 10), (0, 10)], ... [(50, 50), (100, 50), (100, 100), (50, 100)], ... ) >>> transform((5, 5)) (74.99999999999639, 74.999999999999957) If ``round`` is ``True`` then points will be rounded to the nearest integer and integer values will be returned. >>> transform = create_perspective_transform( ... [(0, 0), (10, 0), (10, 10), (0, 10)], ... [(50, 50), (100, 50), (100, 100), (50, 100)], ... round=True, ... ) >>> transform((5, 5)) (75, 75) If ``splat_args`` is ``True`` the function will accept two arguments instead of a tuple. >>> transform = create_perspective_transform( ... [(0, 0), (10, 0), (10, 10), (0, 10)], ... [(50, 50), (100, 50), (100, 100), (50, 100)], ... splat_args=True, ... ) >>> transform(5, 5) (74.99999999999639, 74.999999999999957) If the input values yield an invalid transformation matrix an identity function will be returned and the ``error`` attribute will be set to a description of the error:: >>> tranform = create_perspective_transform( ... np.zeros((4, 2)), ... np.zeros((4, 2)), ... ) >>> transform((5, 5)) (5.0, 5.0) >>> transform.error 'invalid input quads (...): Singular matrix " 784,"def get(self, url, cache=True, **kwargs): if not url.isValid(): urlutils.invalid_url_error(url, ""start download"") return None req = QNetworkRequest(url) user_agent = websettings.user_agent(url) req.setHeader(QNetworkRequest.KnownHeaders.UserAgentHeader, user_agent) if not cache: req.setAttribute(QNetworkRequest.Attribute.CacheSaveControlAttribute, False) return self.get_request(req, **kwargs) ","Start a download with a link URL. Args: url: The URL to get, as QUrl cache: If set to False, don't cache the response. **kwargs: passed to get_request(). Return: The created DownloadItem. ",32,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get(self, url, cache=True, **kwargs): if not url.isValid(): urlutils.invalid_url_error(url, ""start download"") return None req = QNetworkRequest(url) user_agent = websettings.user_agent(url) req.setHeader(QNetworkRequest.KnownHeaders.UserAgentHeader, user_agent) if not cache: req.setAttribute(QNetworkRequest.Attribute.CacheSaveControlAttribute, False) return self.get_request(req, **kwargs) ``` ###Assistant : Start a download with a link URL. Args: url: The URL to get, as QUrl cache: If set to False, don't cache the response. **kwargs: passed to get_request(). Return: The created DownloadItem. " 785,"def test_send_join_partial_state(self): joining_user = ""@misspiggy:"" + self.OTHER_SERVER_NAME join_result = self._make_join(joining_user) join_event_dict = join_result[""event""] add_hashes_and_signatures( KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], join_event_dict, signature_name=self.OTHER_SERVER_NAME, signing_key=self.OTHER_SERVER_SIGNATURE_KEY, ) channel = self.make_signed_federation_request( ""PUT"", f""/_matrix/federation/v2/send_join/{self._room_id}/x?org.matrix.msc3706.partial_state=true"", content=join_event_dict, ) self.assertEquals(channel.code, 200, channel.json_body) # expect a reduced room state returned_state = [ (ev[""type""], ev[""state_key""]) for ev in channel.json_body[""state""] ] self.assertCountEqual( returned_state, [ (""m.room.create"", """"), (""m.room.power_levels"", """"), (""m.room.join_rules"", """"), (""m.room.history_visibility"", """"), ], ) # the auth chain should not include anything already in ""state"" returned_auth_chain_events = [ (ev[""type""], ev[""state_key""]) for ev in channel.json_body[""auth_chain""] ] self.assertCountEqual( returned_auth_chain_events, [ (""m.room.member"", ""@kermit:test""), ], ) # the room should show that the new user is a member r = self.get_success( self.hs.get_state_handler().get_current_state(self._room_id) ) self.assertEqual(r[(""m.room.member"", joining_user)].membership, ""join"") ","When MSC3706 support is enabled, /send_join should return partial state",10,106,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_send_join_partial_state(self): joining_user = ""@misspiggy:"" + self.OTHER_SERVER_NAME join_result = self._make_join(joining_user) join_event_dict = join_result[""event""] add_hashes_and_signatures( KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], join_event_dict, signature_name=self.OTHER_SERVER_NAME, signing_key=self.OTHER_SERVER_SIGNATURE_KEY, ) channel = self.make_signed_federation_request( ""PUT"", f""/_matrix/federation/v2/send_join/{self._room_id}/x?org.matrix.msc3706.partial_state=true"", content=join_event_dict, ) self.assertEquals(channel.code, 200, channel.json_body) # expect a reduced room state returned_state = [ (ev[""type""], ev[""state_key""]) for ev in channel.json_body[""state""] ] self.assertCountEqual( returned_state, [ (""m.room.create"", """"), (""m.room.power_levels"", """"), (""m.room.join_rules"", """"), (""m.room.history_visibility"", """"), ], ) # the auth chain should not include anything already in ""state"" returned_auth_chain_events = [ (ev[""type""], ev[""state_key""]) for ev in channel.json_body[""auth_chain""] ] self.assertCountEqual( returned_auth_chain_events, [ (""m.room.member"", ""@kermit:test""), ], ) # the room should show that the new user is a member r = self.get_success( self.hs.get_state_handler().get_current_state(self._room_id) ) self.assertEqual(r[(""m.room.member"", joining_user)].membership, ""join"") ``` ###Assistant : When MSC3706 support is enabled, /send_join should return partial state " 786,"async def drain(self): if self._reader is not None: exc = self._reader.exception() if exc is not None: raise exc if self._transport.is_closing(): # Wait for protocol.connection_lost() call # Raise connection closing error if any, # ConnectionResetError otherwise # Yield to the event loop so connection_lost() may be # called. Without this, _drain_helper() would return # immediately, and code that calls # write(...); await drain() # in a loop would never call connection_lost(), so it # would not see an error when the socket is closed. await sleep(0) await self._protocol._drain_helper() ","Flush the write buffer. The intended use is to write w.write(data) await w.drain() ",13,87,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def drain(self): if self._reader is not None: exc = self._reader.exception() if exc is not None: raise exc if self._transport.is_closing(): # Wait for protocol.connection_lost() call # Raise connection closing error if any, # ConnectionResetError otherwise # Yield to the event loop so connection_lost() may be # called. Without this, _drain_helper() would return # immediately, and code that calls # write(...); await drain() # in a loop would never call connection_lost(), so it # would not see an error when the socket is closed. await sleep(0) await self._protocol._drain_helper() ``` ###Assistant : Flush the write buffer. The intended use is to write w.write(data) await w.drain() " 787,"def on_chord_header_start(self, chord, **header) -> dict: if not isinstance(chord.tasks, group): chord.tasks = group(chord.tasks) return self.on_group_start(chord.tasks, **header) ","Method that is called on сhord header stamping start. Arguments: chord (chord): chord that is stamped. headers (Dict): Partial headers that could be merged with existing headers. Returns: Dict: headers to update. ",32,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def on_chord_header_start(self, chord, **header) -> dict: if not isinstance(chord.tasks, group): chord.tasks = group(chord.tasks) return self.on_group_start(chord.tasks, **header) ``` ###Assistant : Method that is called on сhord header stamping start. Arguments: chord (chord): chord that is stamped. headers (Dict): Partial headers that could be merged with existing headers. Returns: Dict: headers to update. " 788,"def load_historic_predictions_from_disk(self): exists = self.historic_predictions_path.is_file() if exists: try: with open(self.historic_predictions_path, ""rb"") as fp: self.historic_predictions = cloudpickle.load(fp) logger.info( f""Found existing historic predictions at {self.full_path}, but beware "" ""that statistics may be inaccurate if the bot has been offline for "" ""an extended period of time."" ) except EOFError: logger.warning( 'Historical prediction file was corrupted. Trying to load backup file.') with open(self.historic_predictions_bkp_path, ""rb"") as fp: self.historic_predictions = cloudpickle.load(fp) logger.warning('FreqAI successfully loaded the backup historical predictions file.') elif not self.follow_mode: logger.info(""Could not find existing historic_predictions, starting from scratch"") else: logger.warning( f""Follower could not find historic predictions at {self.full_path} "" ""sending null values back to strategy"" ) return exists "," Locate and load a previously saved historic predictions. :return: bool - whether or not the drawer was located ",18,105,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_historic_predictions_from_disk(self): exists = self.historic_predictions_path.is_file() if exists: try: with open(self.historic_predictions_path, ""rb"") as fp: self.historic_predictions = cloudpickle.load(fp) logger.info( f""Found existing historic predictions at {self.full_path}, but beware "" ""that statistics may be inaccurate if the bot has been offline for "" ""an extended period of time."" ) except EOFError: logger.warning( 'Historical prediction file was corrupted. Trying to load backup file.') with open(self.historic_predictions_bkp_path, ""rb"") as fp: self.historic_predictions = cloudpickle.load(fp) logger.warning('FreqAI successfully loaded the backup historical predictions file.') elif not self.follow_mode: logger.info(""Could not find existing historic_predictions, starting from scratch"") else: logger.warning( f""Follower could not find historic predictions at {self.full_path} "" ""sending null values back to strategy"" ) return exists ``` ###Assistant : Locate and load a previously saved historic predictions. :return: bool - whether or not the drawer was located " 789,"def test_background(self): css = stylesheet = Stylesheet() stylesheet.parse(css) styles = stylesheet.rules[0].styles assert styles.text_background == Color(""red"", type=ColorType.STANDARD, number=1) ","#some-widget { text: on red; } ",6,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_background(self): css = stylesheet = Stylesheet() stylesheet.parse(css) styles = stylesheet.rules[0].styles assert styles.text_background == Color(""red"", type=ColorType.STANDARD, number=1) ``` ###Assistant : #some-widget { text: on red; } " 790,"def update(self) -> None: try: response = requests.get(self._url, timeout=5) except (requests.exceptions.RequestException, ValueError): _LOGGER.warning( ""Could not update status for DTE Energy Bridge (%s)"", self._attr_name ) return if response.status_code != HTTPStatus.OK: _LOGGER.warning( ""Invalid status_code from DTE Energy Bridge: %s (%s)"", response.status_code, self._attr_name, ) return response_split = response.text.split() if len(response_split) != 2: _LOGGER.warning( 'Invalid response from DTE Energy Bridge: ""%s"" (%s)', response.text, self._attr_name, ) return val = float(response_split[0]) # A workaround for a bug in the DTE energy bridge. # The returned value can randomly be in W or kW. Checking for a # a decimal seems to be a reliable way to determine the units. # Limiting to version 1 because version 2 apparently always returns # values in the format 000000.000 kW, but the scaling is Watts # NOT kWatts if self._version == 1 and ""."" in response_split[0]: self._attr_native_value = val else: self._attr_native_value = val / 1000 ",Get the energy usage data from the DTE energy bridge.,10,146,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update(self) -> None: try: response = requests.get(self._url, timeout=5) except (requests.exceptions.RequestException, ValueError): _LOGGER.warning( ""Could not update status for DTE Energy Bridge (%s)"", self._attr_name ) return if response.status_code != HTTPStatus.OK: _LOGGER.warning( ""Invalid status_code from DTE Energy Bridge: %s (%s)"", response.status_code, self._attr_name, ) return response_split = response.text.split() if len(response_split) != 2: _LOGGER.warning( 'Invalid response from DTE Energy Bridge: ""%s"" (%s)', response.text, self._attr_name, ) return val = float(response_split[0]) # A workaround for a bug in the DTE energy bridge. # The returned value can randomly be in W or kW. Checking for a # a decimal seems to be a reliable way to determine the units. # Limiting to version 1 because version 2 apparently always returns # values in the format 000000.000 kW, but the scaling is Watts # NOT kWatts if self._version == 1 and ""."" in response_split[0]: self._attr_native_value = val else: self._attr_native_value = val / 1000 ``` ###Assistant : Get the energy usage data from the DTE energy bridge. " 791,"def ndependencies(dependencies, dependents): num_needed = {} result = {} for k, v in dependencies.items(): num_needed[k] = len(v) if not v: result[k] = 1 num_dependencies = num_needed.copy() current = [] current_pop = current.pop current_append = current.append for key in result: for parent in dependents[key]: num_needed[parent] -= 1 if not num_needed[parent]: current_append(parent) while current: key = current_pop() result[key] = 1 + sum(result[child] for child in dependencies[key]) for parent in dependents[key]: num_needed[parent] -= 1 if not num_needed[parent]: current_append(parent) return num_dependencies, result ","Number of total data elements on which this key depends For each key we return the number of tasks that must be run for us to run this task. Examples -------- >>> inc = lambda x: x + 1 >>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')} >>> dependencies, dependents = get_deps(dsk) >>> num_dependencies, total_dependencies = ndependencies(dependencies, dependents) >>> sorted(total_dependencies.items()) [('a', 1), ('b', 2), ('c', 3)] Returns ------- num_dependencies: Dict[key, int] total_dependencies: Dict[key, int] ",77,78,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ndependencies(dependencies, dependents): num_needed = {} result = {} for k, v in dependencies.items(): num_needed[k] = len(v) if not v: result[k] = 1 num_dependencies = num_needed.copy() current = [] current_pop = current.pop current_append = current.append for key in result: for parent in dependents[key]: num_needed[parent] -= 1 if not num_needed[parent]: current_append(parent) while current: key = current_pop() result[key] = 1 + sum(result[child] for child in dependencies[key]) for parent in dependents[key]: num_needed[parent] -= 1 if not num_needed[parent]: current_append(parent) return num_dependencies, result ``` ###Assistant : Number of total data elements on which this key depends For each key we return the number of tasks that must be run for us to run this task. Examples -------- >>> inc = lambda x: x + 1 >>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')} >>> dependencies, dependents = get_deps(dsk) >>> num_dependencies, total_dependencies = ndependencies(dependencies, dependents) >>> sorted(total_dependencies.items()) [('a', 1), ('b', 2), ('c', 3)] Returns ------- num_dependencies: Dict[key, int] total_dependencies: Dict[key, int] " 792,"def validate_attr(self, append) -> None: if append: existing_fields = getattr(self.attrs, self.kind_attr, None) if existing_fields is not None and existing_fields != list(self.values): raise ValueError(""appended items do not match existing items in table!"") existing_dtype = getattr(self.attrs, self.dtype_attr, None) if existing_dtype is not None and existing_dtype != self.dtype: raise ValueError( ""appended items dtype do not match existing items dtype in table!"" ) ",validate that we have the same order as the existing & same dtype,13,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def validate_attr(self, append) -> None: if append: existing_fields = getattr(self.attrs, self.kind_attr, None) if existing_fields is not None and existing_fields != list(self.values): raise ValueError(""appended items do not match existing items in table!"") existing_dtype = getattr(self.attrs, self.dtype_attr, None) if existing_dtype is not None and existing_dtype != self.dtype: raise ValueError( ""appended items dtype do not match existing items dtype in table!"" ) ``` ###Assistant : validate that we have the same order as the existing & same dtype " 793,"def __getitem__(self, parameters): item = typing._type_check(parameters, f'{self._name} accepts only single type') return typing._GenericAlias(self, (item,)) Final = _FinalForm('Final', doc=","A special typing construct to indicate that a name cannot be re-assigned or overridden in a subclass. For example: MAX_SIZE: Final = 9000 MAX_SIZE += 1 # Error reported by type checker",32,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __getitem__(self, parameters): item = typing._type_check(parameters, f'{self._name} accepts only single type') return typing._GenericAlias(self, (item,)) Final = _FinalForm('Final', doc= ``` ###Assistant : A special typing construct to indicate that a name cannot be re-assigned or overridden in a subclass. For example: MAX_SIZE: Final = 9000 MAX_SIZE += 1 # Error reported by type checker " 794,"def type_spec_from_value(value): if is_extension_type(value): return value._type_spec # pylint: disable=protected-access # Get a TensorSpec for array-like data without # converting the data to a Tensor if hasattr(value, ""shape"") and hasattr(value, ""dtype""): return tf.TensorSpec(value.shape, value.dtype) else: return tf.type_spec_from_value(value) ",Grab type_spec without converting array-likes to tensors.,7,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def type_spec_from_value(value): if is_extension_type(value): return value._type_spec # pylint: disable=protected-access # Get a TensorSpec for array-like data without # converting the data to a Tensor if hasattr(value, ""shape"") and hasattr(value, ""dtype""): return tf.TensorSpec(value.shape, value.dtype) else: return tf.type_spec_from_value(value) ``` ###Assistant : Grab type_spec without converting array-likes to tensors. " 795,"def make_window(): sg.theme(settings.get('-theme-', 'DarkBlue2')) # set the theme layout = [[sg.Text('Settings Window')], [sg.Input(settings.get('-input-', ''), k='-IN-')], [sg.Listbox(sg.theme_list(), default_values=[settings['-theme-'],], size=(15, 10), k='-LISTBOX-')], [sg.CB('Option 1', settings.get('-option1-', True), k='-CB1-')], [sg.CB('Option 2', settings.get('-option2-', False), k='-CB2-')], [sg.T('Settings file = ' + settings.get_filename())], [sg.Button('Save'), sg.Button('Settings Dictionary'), sg.Button('Exit without saving', k='Exit')]] window = sg.Window('A Settings Window', layout) "," Creates a new window. The default values for some elements are pulled directly from the ""User Settings"" without the use of temp variables. Some get_entry calls don't have a default value, such as theme, because there was an initial call that would have set the default value if the setting wasn't present. Could still put the default value if you wanted but it would be 2 places to change if you wanted a different default value. Use of a lookup table to map between element keys and user settings could be aded. This demo is intentionally done without one to show how to use the settings APIs in the most basic, straightforward way. If your application allows changing the theme, then a make_window function is good to have so that you can close and re-create a window easily. :return: (sg.Window) The window that was created ",145,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def make_window(): sg.theme(settings.get('-theme-', 'DarkBlue2')) # set the theme layout = [[sg.Text('Settings Window')], [sg.Input(settings.get('-input-', ''), k='-IN-')], [sg.Listbox(sg.theme_list(), default_values=[settings['-theme-'],], size=(15, 10), k='-LISTBOX-')], [sg.CB('Option 1', settings.get('-option1-', True), k='-CB1-')], [sg.CB('Option 2', settings.get('-option2-', False), k='-CB2-')], [sg.T('Settings file = ' + settings.get_filename())], [sg.Button('Save'), sg.Button('Settings Dictionary'), sg.Button('Exit without saving', k='Exit')]] window = sg.Window('A Settings Window', layout) ``` ###Assistant : Creates a new window. The default values for some elements are pulled directly from the ""User Settings"" without the use of temp variables. Some get_entry calls don't have a default value, such as theme, because there was an initial call that would have set the default value if the setting wasn't present. Could still put the default value if you wanted but it would be 2 places to change if you wanted a different default value. Use of a lookup table to map between element keys and user settings could be aded. This demo is intentionally done without one to show how to use the settings APIs in the most basic, straightforward way. If your application allows changing the theme, then a make_window function is good to have so that you can close and re-create a window easily. :return: (sg.Window) The window that was created " 796,"async def _collect(self) -> CommonUsageMetrics: dau_count = await self._store.count_daily_users() return CommonUsageMetrics( daily_active_users=dau_count, ) ","Collect the common metrics and either create the CommonUsageMetrics object to use if it doesn't exist yet, or update it. ",20,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def _collect(self) -> CommonUsageMetrics: dau_count = await self._store.count_daily_users() return CommonUsageMetrics( daily_active_users=dau_count, ) ``` ###Assistant : Collect the common metrics and either create the CommonUsageMetrics object to use if it doesn't exist yet, or update it. " 797,"def getPreprocessorSymbols(cls): if cls.preprocessor_symbols is None: cls.preprocessor_symbols = OrderedDict() for plugin in getActivePlugins(): value = plugin.getPreprocessorSymbols() if value is not None: assert type(value) is dict, value # We order per plugin, but from the plugins, lets just take a dict # and achieve determinism by ordering the defines by name. for key, value in sorted(value.items()): # False alarm, pylint: disable=I0021,unsupported-assignment-operation cls.preprocessor_symbols[key] = value return cls.preprocessor_symbols ","Let plugins provide C defines to be used in compilation. Notes: The plugins can each contribute, but are hopefully using a namespace for their defines. Returns: OrderedDict(), where None value indicates no define value, i.e. ""-Dkey=value"" vs. ""-Dkey"" ",38,65,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def getPreprocessorSymbols(cls): if cls.preprocessor_symbols is None: cls.preprocessor_symbols = OrderedDict() for plugin in getActivePlugins(): value = plugin.getPreprocessorSymbols() if value is not None: assert type(value) is dict, value # We order per plugin, but from the plugins, lets just take a dict # and achieve determinism by ordering the defines by name. for key, value in sorted(value.items()): # False alarm, pylint: disable=I0021,unsupported-assignment-operation cls.preprocessor_symbols[key] = value return cls.preprocessor_symbols ``` ###Assistant : Let plugins provide C defines to be used in compilation. Notes: The plugins can each contribute, but are hopefully using a namespace for their defines. Returns: OrderedDict(), where None value indicates no define value, i.e. ""-Dkey=value"" vs. ""-Dkey"" " 798,"def _check_xy(self, renderer=None): if renderer is None: renderer = self.figure._get_renderer() b = self.get_annotation_clip() if b or (b is None and self.xycoords == ""data""): # check if self.xy is inside the axes. xy_pixel = self._get_position_xy(renderer) return self.axes.contains_point(xy_pixel) return True ",Check whether the annotation at *xy_pixel* should be drawn.,9,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_xy(self, renderer=None): if renderer is None: renderer = self.figure._get_renderer() b = self.get_annotation_clip() if b or (b is None and self.xycoords == ""data""): # check if self.xy is inside the axes. xy_pixel = self._get_position_xy(renderer) return self.axes.contains_point(xy_pixel) return True ``` ###Assistant : Check whether the annotation at *xy_pixel* should be drawn. " 799,"def _split_ssh_args(argstring): # In Python3, shlex.split doesn't work on a byte string. return [to_text(x.strip()) for x in shlex.split(argstring) if x.strip()] "," Takes a string like '-o Foo=1 -o Bar=""foo bar""' and returns a list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to the argument list. The list will not contain any empty elements. ",34,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _split_ssh_args(argstring): # In Python3, shlex.split doesn't work on a byte string. return [to_text(x.strip()) for x in shlex.split(argstring) if x.strip()] ``` ###Assistant : Takes a string like '-o Foo=1 -o Bar=""foo bar""' and returns a list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to the argument list. The list will not contain any empty elements. " 800,"def is_empty_indexer(indexer) -> bool: if is_list_like(indexer) and not len(indexer): return True if not isinstance(indexer, tuple): indexer = (indexer,) return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer) # ----------------------------------------------------------- # Indexer Validation "," Check if we have an empty indexer. Parameters ---------- indexer : object Returns ------- bool ",15,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_empty_indexer(indexer) -> bool: if is_list_like(indexer) and not len(indexer): return True if not isinstance(indexer, tuple): indexer = (indexer,) return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer) # ----------------------------------------------------------- # Indexer Validation ``` ###Assistant : Check if we have an empty indexer. Parameters ---------- indexer : object Returns ------- bool " 801,"def make_layoutgrids_gs(layoutgrids, gs): if gs in layoutgrids or gs.figure is None: return layoutgrids # in order to do constrained_layout there has to be at least *one* # gridspec in the tree: layoutgrids['hasgrids'] = True if not hasattr(gs, '_subplot_spec'): # normal gridspec parent = layoutgrids[gs.figure] layoutgrids[gs] = mlayoutgrid.LayoutGrid( parent=parent, parent_inner=True, name='gridspec', ncols=gs._ncols, nrows=gs._nrows, width_ratios=gs.get_width_ratios(), height_ratios=gs.get_height_ratios()) else: # this is a gridspecfromsubplotspec: subplot_spec = gs._subplot_spec parentgs = subplot_spec.get_gridspec() # if a nested gridspec it is possible the parent is not in there yet: if parentgs not in layoutgrids: layoutgrids = make_layoutgrids_gs(layoutgrids, parentgs) subspeclb = layoutgrids[parentgs] # get a unique representation: rep = object.__repr__(gs) + 'top' # gridspecfromsubplotspec need an outer container: if rep not in layoutgrids: layoutgrids[rep] = mlayoutgrid.LayoutGrid( parent=subspeclb, name='top', nrows=1, ncols=1, parent_pos=(subplot_spec.rowspan, subplot_spec.colspan)) layoutgrids[gs] = mlayoutgrid.LayoutGrid( parent=layoutgrids[rep], name='gridspec', nrows=gs._nrows, ncols=gs._ncols, width_ratios=gs.get_width_ratios(), height_ratios=gs.get_height_ratios()) return layoutgrids "," Make the layoutgrid for a gridspec (and anything nested in the gridspec) ",12,134,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def make_layoutgrids_gs(layoutgrids, gs): if gs in layoutgrids or gs.figure is None: return layoutgrids # in order to do constrained_layout there has to be at least *one* # gridspec in the tree: layoutgrids['hasgrids'] = True if not hasattr(gs, '_subplot_spec'): # normal gridspec parent = layoutgrids[gs.figure] layoutgrids[gs] = mlayoutgrid.LayoutGrid( parent=parent, parent_inner=True, name='gridspec', ncols=gs._ncols, nrows=gs._nrows, width_ratios=gs.get_width_ratios(), height_ratios=gs.get_height_ratios()) else: # this is a gridspecfromsubplotspec: subplot_spec = gs._subplot_spec parentgs = subplot_spec.get_gridspec() # if a nested gridspec it is possible the parent is not in there yet: if parentgs not in layoutgrids: layoutgrids = make_layoutgrids_gs(layoutgrids, parentgs) subspeclb = layoutgrids[parentgs] # get a unique representation: rep = object.__repr__(gs) + 'top' # gridspecfromsubplotspec need an outer container: if rep not in layoutgrids: layoutgrids[rep] = mlayoutgrid.LayoutGrid( parent=subspeclb, name='top', nrows=1, ncols=1, parent_pos=(subplot_spec.rowspan, subplot_spec.colspan)) layoutgrids[gs] = mlayoutgrid.LayoutGrid( parent=layoutgrids[rep], name='gridspec', nrows=gs._nrows, ncols=gs._ncols, width_ratios=gs.get_width_ratios(), height_ratios=gs.get_height_ratios()) return layoutgrids ``` ###Assistant : Make the layoutgrid for a gridspec (and anything nested in the gridspec) " 802,"def test_sitemap_published_titles(self): sitemap = CMSSitemap() locations = [] urlset = sitemap.get_urls() for item in urlset: locations.append(item['location']) for title in Title.objects.public(): page = title.page.get_public_object() if title.path: url = f'http://example.com/{title.language}/{title.path}/' else: url = f'http://example.com/{title.language}/{title.path}' if page.is_published('en') and not page.publisher_is_draft: self.assertTrue(url in locations) else: self.assertFalse(url in locations) "," Check that published titles are in the urls ",8,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_sitemap_published_titles(self): sitemap = CMSSitemap() locations = [] urlset = sitemap.get_urls() for item in urlset: locations.append(item['location']) for title in Title.objects.public(): page = title.page.get_public_object() if title.path: url = f'http://example.com/{title.language}/{title.path}/' else: url = f'http://example.com/{title.language}/{title.path}' if page.is_published('en') and not page.publisher_is_draft: self.assertTrue(url in locations) else: self.assertFalse(url in locations) ``` ###Assistant : Check that published titles are in the urls " 803,"def eye(N, chunks=""auto"", M=None, k=0, dtype=float): eye = {} if M is None: M = N if dtype is None: dtype = float if not isinstance(chunks, (int, str)): raise ValueError(""chunks must be an int or string"") vchunks, hchunks = normalize_chunks(chunks, shape=(N, M), dtype=dtype) chunks = vchunks[0] token = tokenize(N, chunks, M, k, dtype) name_eye = ""eye-"" + token for i, vchunk in enumerate(vchunks): for j, hchunk in enumerate(hchunks): if (j - i - 1) * chunks <= k <= (j - i + 1) * chunks: eye[name_eye, i, j] = ( np.eye, vchunk, hchunk, k - (j - i) * chunks, dtype, ) else: eye[name_eye, i, j] = (np.zeros, (vchunk, hchunk), dtype) return Array(eye, name_eye, shape=(N, M), chunks=(chunks, chunks), dtype=dtype) @derived_from(np)"," Return a 2-D Array with ones on the diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the output. chunks : int, str How to chunk the array. Must be one of the following forms: - A blocksize like 1000. - A size in bytes, like ""100 MiB"" which will choose a uniform block-like shape - The word ""auto"" which acts like the above, but uses a configuration value ``array.chunk-size`` for the chunk size M : int, optional Number of columns in the output. If None, defaults to `N`. k : int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. Returns ------- I : Array of shape (N,M) An array where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. ",162,121,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def eye(N, chunks=""auto"", M=None, k=0, dtype=float): eye = {} if M is None: M = N if dtype is None: dtype = float if not isinstance(chunks, (int, str)): raise ValueError(""chunks must be an int or string"") vchunks, hchunks = normalize_chunks(chunks, shape=(N, M), dtype=dtype) chunks = vchunks[0] token = tokenize(N, chunks, M, k, dtype) name_eye = ""eye-"" + token for i, vchunk in enumerate(vchunks): for j, hchunk in enumerate(hchunks): if (j - i - 1) * chunks <= k <= (j - i + 1) * chunks: eye[name_eye, i, j] = ( np.eye, vchunk, hchunk, k - (j - i) * chunks, dtype, ) else: eye[name_eye, i, j] = (np.zeros, (vchunk, hchunk), dtype) return Array(eye, name_eye, shape=(N, M), chunks=(chunks, chunks), dtype=dtype) @derived_from(np) ``` ###Assistant : Return a 2-D Array with ones on the diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the output. chunks : int, str How to chunk the array. Must be one of the following forms: - A blocksize like 1000. - A size in bytes, like ""100 MiB"" which will choose a uniform block-like shape - The word ""auto"" which acts like the above, but uses a configuration value ``array.chunk-size`` for the chunk size M : int, optional Number of columns in the output. If None, defaults to `N`. k : int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. Returns ------- I : Array of shape (N,M) An array where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. " 804,"def is_composite_or_composite_value(tensor): # TODO(b/125094323): This should be isinstance(CompositeTensor) or # isinstance(CompositeTensorValue) once we support that. return isinstance( tensor, ( tf.__internal__.CompositeTensor, tf.compat.v1.SparseTensorValue, tf.compat.v1.ragged.RaggedTensorValue, ), ) ",Returns true if 'tensor' is a CompositeTensor or a CT Value object.,12,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_composite_or_composite_value(tensor): # TODO(b/125094323): This should be isinstance(CompositeTensor) or # isinstance(CompositeTensorValue) once we support that. return isinstance( tensor, ( tf.__internal__.CompositeTensor, tf.compat.v1.SparseTensorValue, tf.compat.v1.ragged.RaggedTensorValue, ), ) ``` ###Assistant : Returns true if 'tensor' is a CompositeTensor or a CT Value object. " 805,"def create_partition(tblname, start=None, end=None, partition_label=None, minutely=False): current_time = now() if not start: if minutely: start = current_time.replace(microsecond=0, second=0) else: start = current_time.replace(microsecond=0, second=0, minute=0) if not end: if minutely: end = start.replace(microsecond=0, second=0) + timedelta(minutes=1) else: end = start.replace(microsecond=0, second=0, minute=0) + timedelta(hours=1) start_timestamp = str(start) end_timestamp = str(end) if not partition_label: if minutely: partition_label = start.strftime('%Y%m%d_%H%M') else: partition_label = start.strftime('%Y%m%d_%H') try: with transaction.atomic(): with connection.cursor() as cursor: cursor.execute( f'CREATE TABLE IF NOT EXISTS {tblname}_{partition_label} ' f'PARTITION OF {tblname} ' f'FOR VALUES FROM (\'{start_timestamp}\') to (\'{end_timestamp}\');' ) except ProgrammingError as e: logger.debug(f'Caught known error due to existing partition: {e}') ","Creates new partition table for events. - start defaults to beginning of current hour - end defaults to end of current hour - partition_label defaults to YYYYMMDD_HH - minutely will create partitions that span _a single minute_ for testing purposes ",40,99,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def create_partition(tblname, start=None, end=None, partition_label=None, minutely=False): current_time = now() if not start: if minutely: start = current_time.replace(microsecond=0, second=0) else: start = current_time.replace(microsecond=0, second=0, minute=0) if not end: if minutely: end = start.replace(microsecond=0, second=0) + timedelta(minutes=1) else: end = start.replace(microsecond=0, second=0, minute=0) + timedelta(hours=1) start_timestamp = str(start) end_timestamp = str(end) if not partition_label: if minutely: partition_label = start.strftime('%Y%m%d_%H%M') else: partition_label = start.strftime('%Y%m%d_%H') try: with transaction.atomic(): with connection.cursor() as cursor: cursor.execute( f'CREATE TABLE IF NOT EXISTS {tblname}_{partition_label} ' f'PARTITION OF {tblname} ' f'FOR VALUES FROM (\'{start_timestamp}\') to (\'{end_timestamp}\');' ) except ProgrammingError as e: logger.debug(f'Caught known error due to existing partition: {e}') ``` ###Assistant : Creates new partition table for events. - start defaults to beginning of current hour - end defaults to end of current hour - partition_label defaults to YYYYMMDD_HH - minutely will create partitions that span _a single minute_ for testing purposes " 806,"def rbf_kernel(X, Y=None, gamma=None): X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = euclidean_distances(X, Y, squared=True) K *= -gamma np.exp(K, K) # exponentiate K in-place return K ","Compute the rbf (gaussian) kernel between X and Y. K(x, y) = exp(-gamma ||x-y||^2) for each pair of rows x in X and y in Y. Read more in the :ref:`User Guide `. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) A feature array. Y : ndarray of shape (n_samples_Y, n_features), default=None An optional second feature array. If `None`, uses `Y=X`. gamma : float, default=None If None, defaults to 1.0 / n_features. Returns ------- kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y) The RBF kernel. ",85,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rbf_kernel(X, Y=None, gamma=None): X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = euclidean_distances(X, Y, squared=True) K *= -gamma np.exp(K, K) # exponentiate K in-place return K ``` ###Assistant : Compute the rbf (gaussian) kernel between X and Y. K(x, y) = exp(-gamma ||x-y||^2) for each pair of rows x in X and y in Y. Read more in the :ref:`User Guide `. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) A feature array. Y : ndarray of shape (n_samples_Y, n_features), default=None An optional second feature array. If `None`, uses `Y=X`. gamma : float, default=None If None, defaults to 1.0 / n_features. Returns ------- kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y) The RBF kernel. " 807,"def _validate_argument_values(argument_spec, parameters, options_context=None, errors=None): if errors is None: errors = AnsibleValidationErrorMultiple() for param, spec in argument_spec.items(): choices = spec.get('choices') if choices is None: continue if isinstance(choices, (frozenset, KeysView, Sequence)) and not isinstance(choices, (binary_type, text_type)): if param in parameters: # Allow one or more when type='list' param with choices if isinstance(parameters[param], list): diff_list = [item for item in parameters[param] if item not in choices] if diff_list: choices_str = "", "".join([to_native(c) for c in choices]) diff_str = "", "".join(diff_list) msg = ""value of %s must be one or more of: %s. Got no match for: %s"" % (param, choices_str, diff_str) if options_context: msg = ""{0} found in {1}"".format(msg, "" -> "".join(options_context)) errors.append(ArgumentValueError(msg)) elif parameters[param] not in choices: # PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking # the value. If we can't figure this out, module author is responsible. if parameters[param] == 'False': overlap = BOOLEANS_FALSE.intersection(choices) if len(overlap) == 1: # Extract from a set (parameters[param],) = overlap if parameters[param] == 'True': overlap = BOOLEANS_TRUE.intersection(choices) if len(overlap) == 1: (parameters[param],) = overlap if parameters[param] not in choices: choices_str = "", "".join([to_native(c) for c in choices]) msg = ""value of %s must be one of: %s, got: %s"" % (param, choices_str, parameters[param]) if options_context: msg = ""{0} found in {1}"".format(msg, "" -> "".join(options_context)) errors.append(ArgumentValueError(msg)) else: msg = ""internal error: choices for argument %s are not iterable: %s"" % (param, choices) if options_context: msg = ""{0} found in {1}"".format(msg, "" -> "".join(options_context)) errors.append(ArgumentTypeError(msg)) ","Ensure all arguments have the requested values, and there are no stray arguments",13,248,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _validate_argument_values(argument_spec, parameters, options_context=None, errors=None): if errors is None: errors = AnsibleValidationErrorMultiple() for param, spec in argument_spec.items(): choices = spec.get('choices') if choices is None: continue if isinstance(choices, (frozenset, KeysView, Sequence)) and not isinstance(choices, (binary_type, text_type)): if param in parameters: # Allow one or more when type='list' param with choices if isinstance(parameters[param], list): diff_list = [item for item in parameters[param] if item not in choices] if diff_list: choices_str = "", "".join([to_native(c) for c in choices]) diff_str = "", "".join(diff_list) msg = ""value of %s must be one or more of: %s. Got no match for: %s"" % (param, choices_str, diff_str) if options_context: msg = ""{0} found in {1}"".format(msg, "" -> "".join(options_context)) errors.append(ArgumentValueError(msg)) elif parameters[param] not in choices: # PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking # the value. If we can't figure this out, module author is responsible. if parameters[param] == 'False': overlap = BOOLEANS_FALSE.intersection(choices) if len(overlap) == 1: # Extract from a set (parameters[param],) = overlap if parameters[param] == 'True': overlap = BOOLEANS_TRUE.intersection(choices) if len(overlap) == 1: (parameters[param],) = overlap if parameters[param] not in choices: choices_str = "", "".join([to_native(c) for c in choices]) msg = ""value of %s must be one of: %s, got: %s"" % (param, choices_str, parameters[param]) if options_context: msg = ""{0} found in {1}"".format(msg, "" -> "".join(options_context)) errors.append(ArgumentValueError(msg)) else: msg = ""internal error: choices for argument %s are not iterable: %s"" % (param, choices) if options_context: msg = ""{0} found in {1}"".format(msg, "" -> "".join(options_context)) errors.append(ArgumentTypeError(msg)) ``` ###Assistant : Ensure all arguments have the requested values, and there are no stray arguments " 808,"def content(self): if self._content is None: self._load() return self._content "," The content of the artifact (representation varies) ",7,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def content(self): if self._content is None: self._load() return self._content ``` ###Assistant : The content of the artifact (representation varies) " 809,"def get_product_filter_data(query_args=None): if isinstance(query_args, str): query_args = json.loads(query_args) query_args = frappe._dict(query_args) if query_args: search = query_args.get(""search"") field_filters = query_args.get(""field_filters"", {}) attribute_filters = query_args.get(""attribute_filters"", {}) start = cint(query_args.start) if query_args.get(""start"") else 0 item_group = query_args.get(""item_group"") from_filters = query_args.get(""from_filters"") else: search, attribute_filters, item_group, from_filters = None, None, None, None field_filters = {} start = 0 # if new filter is checked, reset start to show filtered items from page 1 if from_filters: start = 0 sub_categories = [] if item_group: sub_categories = get_child_groups_for_website(item_group, immediate=True) engine = ProductQuery() try: result = engine.query( attribute_filters, field_filters, search_term=search, start=start, item_group=item_group ) except Exception: traceback = frappe.get_traceback() frappe.log_error(traceback, frappe._(""Product Engine Error"")) return {""exc"": ""Something went wrong!""} # discount filter data filters = {} discounts = result[""discounts""] if discounts: filter_engine = ProductFiltersBuilder() filters[""discount_filters""] = filter_engine.get_discount_filters(discounts) return { ""items"": result[""items""] or [], ""filters"": filters, ""settings"": engine.settings, ""sub_categories"": sub_categories, ""items_count"": result[""items_count""] } @frappe.whitelist(allow_guest=True)"," Returns filtered products and discount filters. :param query_args (dict): contains filters to get products list Query Args filters: search (str): Search Term. field_filters (dict): Keys include item_group, brand, etc. attribute_filters(dict): Keys include Color, Size, etc. start (int): Offset items by item_group (str): Valid Item Group from_filters (bool): Set as True to jump to page 1 ",55,143,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_product_filter_data(query_args=None): if isinstance(query_args, str): query_args = json.loads(query_args) query_args = frappe._dict(query_args) if query_args: search = query_args.get(""search"") field_filters = query_args.get(""field_filters"", {}) attribute_filters = query_args.get(""attribute_filters"", {}) start = cint(query_args.start) if query_args.get(""start"") else 0 item_group = query_args.get(""item_group"") from_filters = query_args.get(""from_filters"") else: search, attribute_filters, item_group, from_filters = None, None, None, None field_filters = {} start = 0 # if new filter is checked, reset start to show filtered items from page 1 if from_filters: start = 0 sub_categories = [] if item_group: sub_categories = get_child_groups_for_website(item_group, immediate=True) engine = ProductQuery() try: result = engine.query( attribute_filters, field_filters, search_term=search, start=start, item_group=item_group ) except Exception: traceback = frappe.get_traceback() frappe.log_error(traceback, frappe._(""Product Engine Error"")) return {""exc"": ""Something went wrong!""} # discount filter data filters = {} discounts = result[""discounts""] if discounts: filter_engine = ProductFiltersBuilder() filters[""discount_filters""] = filter_engine.get_discount_filters(discounts) return { ""items"": result[""items""] or [], ""filters"": filters, ""settings"": engine.settings, ""sub_categories"": sub_categories, ""items_count"": result[""items_count""] } @frappe.whitelist(allow_guest=True) ``` ###Assistant : Returns filtered products and discount filters. :param query_args (dict): contains filters to get products list Query Args filters: search (str): Search Term. field_filters (dict): Keys include item_group, brand, etc. attribute_filters(dict): Keys include Color, Size, etc. start (int): Offset items by item_group (str): Valid Item Group from_filters (bool): Set as True to jump to page 1 " 810,"async def async_turn_on(self, **kwargs): # noqa: C901 should_update = False on_command_type = self._config[CONF_ON_COMMAND_TYPE] ","Turn the device on. This method is a coroutine. ",9,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_turn_on(self, **kwargs): # noqa: C901 should_update = False on_command_type = self._config[CONF_ON_COMMAND_TYPE] ``` ###Assistant : Turn the device on. This method is a coroutine. " 811,"def test_fake_mac(self, modifiers, expected): seq = keyutils.KeySequence() info = keyutils.KeyInfo(key=Qt.Key.Key_A, modifiers=modifiers) new = seq.append_event(info.to_event()) assert new[0] == keyutils.KeyInfo(Qt.Key.Key_A, expected) ",Make sure Control/Meta are swapped with a simulated Mac.,9,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_fake_mac(self, modifiers, expected): seq = keyutils.KeySequence() info = keyutils.KeyInfo(key=Qt.Key.Key_A, modifiers=modifiers) new = seq.append_event(info.to_event()) assert new[0] == keyutils.KeyInfo(Qt.Key.Key_A, expected) ``` ###Assistant : Make sure Control/Meta are swapped with a simulated Mac. " 812,"def default_params(self) -> dict: return {""order"": ""asc"", ""sort"": self.sort_key, ""limit"": self.limit} "," Returns the parameters to be sent together with the API call to Recurly ",13,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def default_params(self) -> dict: return {""order"": ""asc"", ""sort"": self.sort_key, ""limit"": self.limit} ``` ###Assistant : Returns the parameters to be sent together with the API call to Recurly " 813,"def _should_start_new_health_check(self) -> bool: if self._health_check_ref is not None: # There's already an active health check. return False # If there's no active health check, kick off another and reset # the timer if it's been long enough since the last health # check. Add some randomness to avoid synchronizing across all # replicas. time_since_last = time.time() - self._last_health_check_time randomized_period = self._health_check_period_s * random.uniform(0.9, 1.1) return time_since_last > randomized_period ","Determines if a new health check should be kicked off. A health check will be started if: 1) There is not already an active health check. 2) It has been more than self._health_check_period_s since the previous health check was *started*. This assumes that self._health_check_ref is reset to `None` when an active health check succeeds or fails (due to returning or timeout). ",61,69,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _should_start_new_health_check(self) -> bool: if self._health_check_ref is not None: # There's already an active health check. return False # If there's no active health check, kick off another and reset # the timer if it's been long enough since the last health # check. Add some randomness to avoid synchronizing across all # replicas. time_since_last = time.time() - self._last_health_check_time randomized_period = self._health_check_period_s * random.uniform(0.9, 1.1) return time_since_last > randomized_period ``` ###Assistant : Determines if a new health check should be kicked off. A health check will be started if: 1) There is not already an active health check. 2) It has been more than self._health_check_period_s since the previous health check was *started*. This assumes that self._health_check_ref is reset to `None` when an active health check succeeds or fails (due to returning or timeout). " 814,"def numpy_pad_and_concatenate(array1, array2, padding_index=-100): array1 = atleast_1d(array1) array2 = atleast_1d(array2) if len(array1.shape) == 1 or array1.shape[1] == array2.shape[1]: return np.concatenate((array1, array2), axis=0) # Let's figure out the new shape new_shape = (array1.shape[0] + array2.shape[0], max(array1.shape[1], array2.shape[1])) + array1.shape[2:] # Now let's fill the result tensor result = np.full_like(array1, padding_index, shape=new_shape) result[: array1.shape[0], : array1.shape[1]] = array1 result[array1.shape[0] :, : array2.shape[1]] = array2 return result ","Concatenates `array1` and `array2` on first axis, applying padding on the second if necessary.",14,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def numpy_pad_and_concatenate(array1, array2, padding_index=-100): array1 = atleast_1d(array1) array2 = atleast_1d(array2) if len(array1.shape) == 1 or array1.shape[1] == array2.shape[1]: return np.concatenate((array1, array2), axis=0) # Let's figure out the new shape new_shape = (array1.shape[0] + array2.shape[0], max(array1.shape[1], array2.shape[1])) + array1.shape[2:] # Now let's fill the result tensor result = np.full_like(array1, padding_index, shape=new_shape) result[: array1.shape[0], : array1.shape[1]] = array1 result[array1.shape[0] :, : array2.shape[1]] = array2 return result ``` ###Assistant : Concatenates `array1` and `array2` on first axis, applying padding on the second if necessary. " 815,"def _ray(self) -> ""ray"": global ray if ray is None: try: import ray except ImportError as exc: raise RuntimeError( ""Using the `RayTaskRunner` requires `ray` to be installed."" ) from exc return ray "," Delayed import of `ray` allowing configuration of the task runner without the extra installed and improves `prefect` import times. ",19,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _ray(self) -> ""ray"": global ray if ray is None: try: import ray except ImportError as exc: raise RuntimeError( ""Using the `RayTaskRunner` requires `ray` to be installed."" ) from exc return ray ``` ###Assistant : Delayed import of `ray` allowing configuration of the task runner without the extra installed and improves `prefect` import times. " 816,"def temperature_unit(self) -> str: if ( self._unit_value and self._unit_value.metadata.unit and ""f"" in self._unit_value.metadata.unit.lower() ): return UnitOfTemperature.FAHRENHEIT return UnitOfTemperature.CELSIUS ",Return the unit of measurement used by the platform.,9,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def temperature_unit(self) -> str: if ( self._unit_value and self._unit_value.metadata.unit and ""f"" in self._unit_value.metadata.unit.lower() ): return UnitOfTemperature.FAHRENHEIT return UnitOfTemperature.CELSIUS ``` ###Assistant : Return the unit of measurement used by the platform. " 817,"def get_point_of_rotated_vertical(origin, line_length, degrees): rad = np.deg2rad(-degrees) return [origin[0] + line_length * np.sin(rad), origin[1] + line_length * np.cos(rad)] fig, ax = plt.subplots(figsize=(8, 7)) ax.set(xlim=(0, 6), ylim=(-1, 4)) ax.set_title(""Orientation of the bracket arrows relative to angleA and angleB"") for i, style in enumerate([""]-["", ""|-|""]): for j, angle in enumerate([-40, 60]): y = 2*i + j arrow_centers = ((1, y), (5, y)) vlines = ((1, y + 0.5), (5, y + 0.5)) anglesAB = (angle, -angle) bracketstyle = f""{style}, angleA={anglesAB[0]}, angleB={anglesAB[1]}"" bracket = FancyArrowPatch(*arrow_centers, arrowstyle=bracketstyle, mutation_scale=42) ax.add_patch(bracket) ax.text(3, y + 0.05, bracketstyle, ha=""center"", va=""bottom"") ax.vlines([i[0] for i in vlines], [y, y], [i[1] for i in vlines], linestyles=""--"", color=""C0"") # Get the top coordinates for the drawn patches at A and B patch_tops = [get_point_of_rotated_vertical(center, 0.5, angle) for center, angle in zip(arrow_centers, anglesAB)] # Define the connection directions for the annotation arrows connection_dirs = (1, -1) if angle > 0 else (-1, 1) # Add arrows and annotation text arrowstyle = ""Simple, tail_width=0.5, head_width=4, head_length=8"" for vline, dir, patch_top, angle in zip(vlines, connection_dirs, patch_tops, anglesAB): kw = dict(connectionstyle=f""arc3,rad={dir * 0.5}"", arrowstyle=arrowstyle, color=""C0"") ax.add_patch(FancyArrowPatch(vline, patch_top, **kw)) ax.text(vline[0] - dir * 0.15, y + 0.3, f'{angle}°', ha=""center"", va=""center"") ############################################################################# # # .. admonition:: References # # The use of the following functions, methods, classes and modules is shown # in this example: # # - `matplotlib.patches.ArrowStyle` ",Return xy coordinates of the vertical line end rotated by degrees.,11,221,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_point_of_rotated_vertical(origin, line_length, degrees): rad = np.deg2rad(-degrees) return [origin[0] + line_length * np.sin(rad), origin[1] + line_length * np.cos(rad)] fig, ax = plt.subplots(figsize=(8, 7)) ax.set(xlim=(0, 6), ylim=(-1, 4)) ax.set_title(""Orientation of the bracket arrows relative to angleA and angleB"") for i, style in enumerate([""]-["", ""|-|""]): for j, angle in enumerate([-40, 60]): y = 2*i + j arrow_centers = ((1, y), (5, y)) vlines = ((1, y + 0.5), (5, y + 0.5)) anglesAB = (angle, -angle) bracketstyle = f""{style}, angleA={anglesAB[0]}, angleB={anglesAB[1]}"" bracket = FancyArrowPatch(*arrow_centers, arrowstyle=bracketstyle, mutation_scale=42) ax.add_patch(bracket) ax.text(3, y + 0.05, bracketstyle, ha=""center"", va=""bottom"") ax.vlines([i[0] for i in vlines], [y, y], [i[1] for i in vlines], linestyles=""--"", color=""C0"") # Get the top coordinates for the drawn patches at A and B patch_tops = [get_point_of_rotated_vertical(center, 0.5, angle) for center, angle in zip(arrow_centers, anglesAB)] # Define the connection directions for the annotation arrows connection_dirs = (1, -1) if angle > 0 else (-1, 1) # Add arrows and annotation text arrowstyle = ""Simple, tail_width=0.5, head_width=4, head_length=8"" for vline, dir, patch_top, angle in zip(vlines, connection_dirs, patch_tops, anglesAB): kw = dict(connectionstyle=f""arc3,rad={dir * 0.5}"", arrowstyle=arrowstyle, color=""C0"") ax.add_patch(FancyArrowPatch(vline, patch_top, **kw)) ax.text(vline[0] - dir * 0.15, y + 0.3, f'{angle}°', ha=""center"", va=""center"") ############################################################################# # # .. admonition:: References # # The use of the following functions, methods, classes and modules is shown # in this example: # # - `matplotlib.patches.ArrowStyle` ``` ###Assistant : Return xy coordinates of the vertical line end rotated by degrees. " 818,"def processEntityInAttribute(self, allowedChar): self.consumeEntity(allowedChar=allowedChar, fromAttribute=True) ","This method replaces the need for ""entityInAttributeValueState"". ",7,5,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def processEntityInAttribute(self, allowedChar): self.consumeEntity(allowedChar=allowedChar, fromAttribute=True) ``` ###Assistant : This method replaces the need for ""entityInAttributeValueState"". " 819,"def _get_ordered_dummies(mul, verbose=False): # setup dicts to avoid repeated calculations in key() args = Mul.make_args(mul) fac_dum = { fac: fac.atoms(Dummy) for fac in args } fac_repr = { fac: __kprint(fac) for fac in args } all_dums = set().union(*fac_dum.values()) mask = {} for d in all_dums: if d.assumptions0.get('below_fermi'): mask[d] = '0' elif d.assumptions0.get('above_fermi'): mask[d] = '1' else: mask[d] = '2' dum_repr = {d: __kprint(d) for d in all_dums} ","Returns all dummies in the mul sorted in canonical order. Explanation =========== The purpose of the canonical ordering is that dummies can be substituted consistently across terms with the result that equivalent terms can be simplified. It is not possible to determine if two terms are equivalent based solely on the dummy order. However, a consistent substitution guided by the ordered dummies should lead to trivially (non-)equivalent terms, thereby revealing the equivalence. This also means that if two terms have identical sequences of dummies, the (non-)equivalence should already be apparent. Strategy -------- The canonical order is given by an arbitrary sorting rule. A sort key is determined for each dummy as a tuple that depends on all factors where the index is present. The dummies are thereby sorted according to the contraction structure of the term, instead of sorting based solely on the dummy symbol itself. After all dummies in the term has been assigned a key, we check for identical keys, i.e. unorderable dummies. If any are found, we call a specialized method, _determine_ambiguous(), that will determine a unique order based on recursive calls to _get_ordered_dummies(). Key description --------------- A high level description of the sort key: 1. Range of the dummy index 2. Relation to external (non-dummy) indices 3. Position of the index in the first factor 4. Position of the index in the second factor The sort key is a tuple with the following components: 1. A single character indicating the range of the dummy (above, below or general.) 2. A list of strings with fully masked string representations of all factors where the dummy is present. By masked, we mean that dummies are represented by a symbol to indicate either below fermi, above or general. No other information is displayed about the dummies at this point. The list is sorted stringwise. 3. An integer number indicating the position of the index, in the first factor as sorted in 2. 4. An integer number indicating the position of the index, in the second factor as sorted in 2. If a factor is either of type AntiSymmetricTensor or SqOperator, the index position in items 3 and 4 is indicated as 'upper' or 'lower' only. (Creation operators are considered upper and annihilation operators lower.) If the masked factors are identical, the two factors cannot be ordered unambiguously in item 2. In this case, items 3, 4 are left out. If several indices are contracted between the unorderable factors, it will be handled by _determine_ambiguous() ",415,67,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_ordered_dummies(mul, verbose=False): # setup dicts to avoid repeated calculations in key() args = Mul.make_args(mul) fac_dum = { fac: fac.atoms(Dummy) for fac in args } fac_repr = { fac: __kprint(fac) for fac in args } all_dums = set().union(*fac_dum.values()) mask = {} for d in all_dums: if d.assumptions0.get('below_fermi'): mask[d] = '0' elif d.assumptions0.get('above_fermi'): mask[d] = '1' else: mask[d] = '2' dum_repr = {d: __kprint(d) for d in all_dums} ``` ###Assistant : Returns all dummies in the mul sorted in canonical order. Explanation =========== The purpose of the canonical ordering is that dummies can be substituted consistently across terms with the result that equivalent terms can be simplified. It is not possible to determine if two terms are equivalent based solely on the dummy order. However, a consistent substitution guided by the ordered dummies should lead to trivially (non-)equivalent terms, thereby revealing the equivalence. This also means that if two terms have identical sequences of dummies, the (non-)equivalence should already be apparent. Strategy -------- The canonical order is given by an arbitrary sorting rule. A sort key is determined for each dummy as a tuple that depends on all factors where the index is present. The dummies are thereby sorted according to the contraction structure of the term, instead of sorting based solely on the dummy symbol itself. After all dummies in the term has been assigned a key, we check for identical keys, i.e. unorderable dummies. If any are found, we call a specialized method, _determine_ambiguous(), that will determine a unique order based on recursive calls to _get_ordered_dummies(). Key description --------------- A high level description of the sort key: 1. Range of the dummy index 2. Relation to external (non-dummy) indices 3. Position of the index in the first factor 4. Position of the index in the second factor The sort key is a tuple with the following components: 1. A single character indicating the range of the dummy (above, below or general.) 2. A list of strings with fully masked string representations of all factors where the dummy is present. By masked, we mean that dummies are represented by a symbol to indicate either below fermi, above or general. No other information is displayed about the dummies at this point. The list is sorted stringwise. 3. An integer number indicating the position of the index, in the first factor as sorted in 2. 4. An integer number indicating the position of the index, in the second factor as sorted in 2. If a factor is either of type AntiSymmetricTensor or SqOperator, the index position in items 3 and 4 is indicated as 'upper' or 'lower' only. (Creation operators are considered upper and annihilation operators lower.) If the masked factors are identical, the two factors cannot be ordered unambiguously in item 2. In this case, items 3, 4 are left out. If several indices are contracted between the unorderable factors, it will be handled by _determine_ambiguous() " 820,"def read_results_from_s3(query_execution_id): s3_hook = S3Hook() file_obj = s3_hook.get_conn().get_object(Bucket=S3_BUCKET, Key=f'{S3_KEY}/{query_execution_id}.csv') file_content = file_obj['Body'].read().decode('utf-8') print(file_content) QUERY_CREATE_TABLE = f QUERY_READ_TABLE = f QUERY_DROP_TABLE = f with DAG( dag_id='example_athena', schedule_interval=None, start_date=datetime(2021, 1, 1), tags=['example'], catchup=False, ) as dag: upload_sample_data = S3CreateObjectOperator( task_id='upload_sample_data', s3_bucket=S3_BUCKET, s3_key=f'{S3_KEY}/{ATHENA_TABLE}/{SAMPLE_FILENAME}', data=SAMPLE_DATA, replace=True, ) create_table = AthenaOperator( task_id='create_table', query=QUERY_CREATE_TABLE, database=ATHENA_DATABASE, output_location=f's3://{S3_BUCKET}/{S3_KEY}', ) # [START howto_athena_operator] read_table = AthenaOperator( task_id='read_table', query=QUERY_READ_TABLE, database=ATHENA_DATABASE, output_location=f's3://{S3_BUCKET}/{S3_KEY}', ) # [END howto_athena_operator] # [START howto_athena_sensor] await_query = AthenaSensor( task_id='await_query', query_execution_id=read_table.output, ) # [END howto_athena_sensor] drop_table = AthenaOperator( task_id='drop_table', query=QUERY_DROP_TABLE, database=ATHENA_DATABASE, output_location=f's3://{S3_BUCKET}/{S3_KEY}', ) remove_s3_files = S3DeleteObjectsOperator( task_id='remove_s3_files', bucket=S3_BUCKET, prefix=S3_KEY, ) ( upload_sample_data >> create_table >> read_table >> await_query >> read_results_from_s3(read_table.output) >> drop_table >> remove_s3_files ) "," CREATE EXTERNAL TABLE IF NOT EXISTS {ATHENA_DATABASE}.{ATHENA_TABLE} ( `name` string, `age` int ) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' WITH SERDEPROPERTIES ( 'serialization.format' = ',', 'field.delim' = ',' ) LOCATION 's3://{S3_BUCKET}/{S3_KEY}/{ATHENA_TABLE}' TBLPROPERTIES ('has_encrypted_data'='false') SELECT * from {ATHENA_DATABASE}.{ATHENA_TABLE} DROP TABLE IF EXISTS {ATHENA_DATABASE}.{ATHENA_TABLE} ",40,107,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def read_results_from_s3(query_execution_id): s3_hook = S3Hook() file_obj = s3_hook.get_conn().get_object(Bucket=S3_BUCKET, Key=f'{S3_KEY}/{query_execution_id}.csv') file_content = file_obj['Body'].read().decode('utf-8') print(file_content) QUERY_CREATE_TABLE = f QUERY_READ_TABLE = f QUERY_DROP_TABLE = f with DAG( dag_id='example_athena', schedule_interval=None, start_date=datetime(2021, 1, 1), tags=['example'], catchup=False, ) as dag: upload_sample_data = S3CreateObjectOperator( task_id='upload_sample_data', s3_bucket=S3_BUCKET, s3_key=f'{S3_KEY}/{ATHENA_TABLE}/{SAMPLE_FILENAME}', data=SAMPLE_DATA, replace=True, ) create_table = AthenaOperator( task_id='create_table', query=QUERY_CREATE_TABLE, database=ATHENA_DATABASE, output_location=f's3://{S3_BUCKET}/{S3_KEY}', ) # [START howto_athena_operator] read_table = AthenaOperator( task_id='read_table', query=QUERY_READ_TABLE, database=ATHENA_DATABASE, output_location=f's3://{S3_BUCKET}/{S3_KEY}', ) # [END howto_athena_operator] # [START howto_athena_sensor] await_query = AthenaSensor( task_id='await_query', query_execution_id=read_table.output, ) # [END howto_athena_sensor] drop_table = AthenaOperator( task_id='drop_table', query=QUERY_DROP_TABLE, database=ATHENA_DATABASE, output_location=f's3://{S3_BUCKET}/{S3_KEY}', ) remove_s3_files = S3DeleteObjectsOperator( task_id='remove_s3_files', bucket=S3_BUCKET, prefix=S3_KEY, ) ( upload_sample_data >> create_table >> read_table >> await_query >> read_results_from_s3(read_table.output) >> drop_table >> remove_s3_files ) ``` ###Assistant : CREATE EXTERNAL TABLE IF NOT EXISTS {ATHENA_DATABASE}.{ATHENA_TABLE} ( `name` string, `age` int ) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' WITH SERDEPROPERTIES ( 'serialization.format' = ',', 'field.delim' = ',' ) LOCATION 's3://{S3_BUCKET}/{S3_KEY}/{ATHENA_TABLE}' TBLPROPERTIES ('has_encrypted_data'='false') SELECT * from {ATHENA_DATABASE}.{ATHENA_TABLE} DROP TABLE IF EXISTS {ATHENA_DATABASE}.{ATHENA_TABLE} " 821,"def is_datetime64_ns_dtype(arr_or_dtype) -> bool: if arr_or_dtype is None: return False try: tipo = get_dtype(arr_or_dtype) except TypeError: if is_datetime64tz_dtype(arr_or_dtype): tipo = get_dtype(arr_or_dtype.dtype) else: return False return tipo == DT64NS_DTYPE or ( isinstance(tipo, DatetimeTZDtype) and tipo._unit == ""ns"" ) "," Check whether the provided array or dtype is of the datetime64[ns] dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- bool Whether or not the array or dtype is of the datetime64[ns] dtype. Examples -------- >>> is_datetime64_ns_dtype(str) False >>> is_datetime64_ns_dtype(int) False >>> is_datetime64_ns_dtype(np.datetime64) # no unit False >>> is_datetime64_ns_dtype(DatetimeTZDtype(""ns"", ""US/Eastern"")) True >>> is_datetime64_ns_dtype(np.array(['a', 'b'])) False >>> is_datetime64_ns_dtype(np.array([1, 2])) False >>> is_datetime64_ns_dtype(np.array([], dtype=""datetime64"")) # no unit False >>> is_datetime64_ns_dtype(np.array([], dtype=""datetime64[ps]"")) # wrong unit False >>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3], dtype=""datetime64[ns]"")) True ",86,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_datetime64_ns_dtype(arr_or_dtype) -> bool: if arr_or_dtype is None: return False try: tipo = get_dtype(arr_or_dtype) except TypeError: if is_datetime64tz_dtype(arr_or_dtype): tipo = get_dtype(arr_or_dtype.dtype) else: return False return tipo == DT64NS_DTYPE or ( isinstance(tipo, DatetimeTZDtype) and tipo._unit == ""ns"" ) ``` ###Assistant : Check whether the provided array or dtype is of the datetime64[ns] dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- bool Whether or not the array or dtype is of the datetime64[ns] dtype. Examples -------- >>> is_datetime64_ns_dtype(str) False >>> is_datetime64_ns_dtype(int) False >>> is_datetime64_ns_dtype(np.datetime64) # no unit False >>> is_datetime64_ns_dtype(DatetimeTZDtype(""ns"", ""US/Eastern"")) True >>> is_datetime64_ns_dtype(np.array(['a', 'b'])) False >>> is_datetime64_ns_dtype(np.array([1, 2])) False >>> is_datetime64_ns_dtype(np.array([], dtype=""datetime64"")) # no unit False >>> is_datetime64_ns_dtype(np.array([], dtype=""datetime64[ps]"")) # wrong unit False >>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3], dtype=""datetime64[ns]"")) True " 822,"def regroup(parser, token): bits = token.split_contents() if len(bits) != 6: raise TemplateSyntaxError(""'regroup' tag takes five arguments"") target = parser.compile_filter(bits[1]) if bits[2] != 'by': raise TemplateSyntaxError(""second argument to 'regroup' tag must be 'by'"") if bits[4] != 'as': raise TemplateSyntaxError( ""next-to-last argument to 'regroup' tag must be 'as'"" ) var_name = bits[5] # RegroupNode will take each item in 'target', put it in the context under # 'var_name', evaluate 'var_name'.'expression' in the current context, and # group by the resulting value. After all items are processed, it will # save the final result in the context under 'var_name', thus clearing the # temporary values. This hack is necessary because the template engine # doesn't provide a context-aware equivalent of Python's getattr. expression = parser.compile_filter(var_name + VARIABLE_ATTRIBUTE_SEPARATOR + bits[3]) return RegroupNode(target, expression, var_name) @register.tag"," Regroup a list of alike objects by a common attribute. This complex tag is best illustrated by use of an example: say that ``musicians`` is a list of ``Musician`` objects that have ``name`` and ``instrument`` attributes, and you'd like to display a list that looks like: * Guitar: * Django Reinhardt * Emily Remler * Piano: * Lovie Austin * Bud Powell * Trumpet: * Duke Ellington The following snippet of template code would accomplish this dubious task:: {% regroup musicians by instrument as grouped %}
      {% for group in grouped %}
    • {{ group.grouper }}
        {% for musician in group.list %}
      • {{ musician.name }}
      • {% endfor %}
      {% endfor %}
    As you can see, ``{% regroup %}`` populates a variable with a list of objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the item that was grouped by; ``list`` contains the list of objects that share that ``grouper``. In this case, ``grouper`` would be ``Guitar``, ``Piano`` and ``Trumpet``, and ``list`` is the list of musicians who play this instrument. Note that ``{% regroup %}`` does not work when the list to be grouped is not sorted by the key you are grouping by! This means that if your list of musicians was not sorted by instrument, you'd need to make sure it is sorted before using it, i.e.:: {% regroup musicians|dictsort:""instrument"" by instrument as grouped %} ",230,131,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def regroup(parser, token): bits = token.split_contents() if len(bits) != 6: raise TemplateSyntaxError(""'regroup' tag takes five arguments"") target = parser.compile_filter(bits[1]) if bits[2] != 'by': raise TemplateSyntaxError(""second argument to 'regroup' tag must be 'by'"") if bits[4] != 'as': raise TemplateSyntaxError( ""next-to-last argument to 'regroup' tag must be 'as'"" ) var_name = bits[5] # RegroupNode will take each item in 'target', put it in the context under # 'var_name', evaluate 'var_name'.'expression' in the current context, and # group by the resulting value. After all items are processed, it will # save the final result in the context under 'var_name', thus clearing the # temporary values. This hack is necessary because the template engine # doesn't provide a context-aware equivalent of Python's getattr. expression = parser.compile_filter(var_name + VARIABLE_ATTRIBUTE_SEPARATOR + bits[3]) return RegroupNode(target, expression, var_name) @register.tag ``` ###Assistant : Regroup a list of alike objects by a common attribute. This complex tag is best illustrated by use of an example: say that ``musicians`` is a list of ``Musician`` objects that have ``name`` and ``instrument`` attributes, and you'd like to display a list that looks like: * Guitar: * Django Reinhardt * Emily Remler * Piano: * Lovie Austin * Bud Powell * Trumpet: * Duke Ellington The following snippet of template code would accomplish this dubious task:: {% regroup musicians by instrument as grouped %}
      {% for group in grouped %}
    • {{ group.grouper }}
        {% for musician in group.list %}
      • {{ musician.name }}
      • {% endfor %}
      {% endfor %}
    As you can see, ``{% regroup %}`` populates a variable with a list of objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the item that was grouped by; ``list`` contains the list of objects that share that ``grouper``. In this case, ``grouper`` would be ``Guitar``, ``Piano`` and ``Trumpet``, and ``list`` is the list of musicians who play this instrument. Note that ``{% regroup %}`` does not work when the list to be grouped is not sorted by the key you are grouping by! This means that if your list of musicians was not sorted by instrument, you'd need to make sure it is sorted before using it, i.e.:: {% regroup musicians|dictsort:""instrument"" by instrument as grouped %} " 823,"def create_basic_list(cls) -> ""Saved"": metadata = cls.get_metadata(""saved"") urls = cls.get_urls(""saved"") return cls(**metadata, urls=urls, songs=[]) "," Create a basic list with only the required metadata and urls. ### Returns - The Saved object. ",17,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def create_basic_list(cls) -> ""Saved"": metadata = cls.get_metadata(""saved"") urls = cls.get_urls(""saved"") return cls(**metadata, urls=urls, songs=[]) ``` ###Assistant : Create a basic list with only the required metadata and urls. ### Returns - The Saved object. " 824,"def assert_lists_same(a, b): assert len(a) == len(b) for i in a: assert i in b for i in b: assert i in a ","Compare two lists, ignoring order. Check both that all items in a are in b and that all items in b are in a, otherwise assert_lists_same([""1"", ""1""], [""1"", ""2""]) could be True. ",32,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def assert_lists_same(a, b): assert len(a) == len(b) for i in a: assert i in b for i in b: assert i in a ``` ###Assistant : Compare two lists, ignoring order. Check both that all items in a are in b and that all items in b are in a, otherwise assert_lists_same([""1"", ""1""], [""1"", ""2""]) could be True. " 825,"def test_dataframe_format_with_index(): pytest.importorskip(""jinja2"") df = pd.DataFrame( { ""A"": [1, 2, 3, 4, 5, 6, 7, 8], ""B"": list(""ABCDEFGH""), ""C"": pd.Categorical(list(""AAABBBCC"")), }, index=list(""ABCDEFGH""), ) ddf = dd.from_pandas(df, 3) exp = ( ""Dask DataFrame Structure:\n"" "" A B C\n"" ""npartitions=3 \n"" ""A int64 object category[known]\n"" ""D ... ... ...\n"" ""G ... ... ...\n"" ""H ... ... ...\n"" ""Dask Name: from_pandas, 1 graph layer"" ) assert repr(ddf) == exp assert str(ddf) == exp exp_table = exp = .format( exp_table=exp_table ) assert ddf.to_html() == exp # table is boxed with div and has style exp = .format( style=style, exp_table=exp_table ) assert ddf._repr_html_() == exp ","
    A B C
    npartitions=3
    A int64 object category[known]
    D ... ... ...
    G ... ... ...
    H ... ... ...
    Dask DataFrame Structure:
    {exp_table}
    Dask Name: from_pandas, 1 graph layer
    Dask DataFrame Structure:
    {style}{exp_table}
    Dask Name: from_pandas, 1 graph layer
    ",66,100,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_dataframe_format_with_index(): pytest.importorskip(""jinja2"") df = pd.DataFrame( { ""A"": [1, 2, 3, 4, 5, 6, 7, 8], ""B"": list(""ABCDEFGH""), ""C"": pd.Categorical(list(""AAABBBCC"")), }, index=list(""ABCDEFGH""), ) ddf = dd.from_pandas(df, 3) exp = ( ""Dask DataFrame Structure:\n"" "" A B C\n"" ""npartitions=3 \n"" ""A int64 object category[known]\n"" ""D ... ... ...\n"" ""G ... ... ...\n"" ""H ... ... ...\n"" ""Dask Name: from_pandas, 1 graph layer"" ) assert repr(ddf) == exp assert str(ddf) == exp exp_table = exp = .format( exp_table=exp_table ) assert ddf.to_html() == exp # table is boxed with div and has style exp = .format( style=style, exp_table=exp_table ) assert ddf._repr_html_() == exp ``` ###Assistant :
    A B C
    npartitions=3
    A int64 object category[known]
    D ... ... ...
    G ... ... ...
    H ... ... ...
    Dask DataFrame Structure:
    {exp_table}
    Dask Name: from_pandas, 1 graph layer
    Dask DataFrame Structure:
    {style}{exp_table}
    Dask Name: from_pandas, 1 graph layer
    " 826,"def test_run_from_argv_closes_connections(self): command = BaseCommand() command.check = lambda: [] command.handle = lambda *args, **kwargs: args with mock.patch(""django.core.management.base.connections"") as mock_connections: command.run_from_argv(["""", """"]) # Test connections have been closed self.assertTrue(mock_connections.close_all.called) "," A command called from the command line should close connections after being executed (#21255). ",14,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_run_from_argv_closes_connections(self): command = BaseCommand() command.check = lambda: [] command.handle = lambda *args, **kwargs: args with mock.patch(""django.core.management.base.connections"") as mock_connections: command.run_from_argv(["""", """"]) # Test connections have been closed self.assertTrue(mock_connections.close_all.called) ``` ###Assistant : A command called from the command line should close connections after being executed (#21255). " 827,"def test_version_managing(self, data_handler): # set up df = pd.DataFrame([ {'a': 1, 'b': dt.datetime(2020, 1, 1)}, {'a': 2, 'b': dt.datetime(2020, 1, 2)}, {'a': 1, 'b': dt.datetime(2020, 1, 3)}, ]) self.set_handler(data_handler, name='pg', tables={'tasks': df}) # ================= retrain cycles ===================== # create folder self.run_sql('create database proj') # -- create model -- self.run_sql( ) self.wait_predictor('proj', 'task_model') assert data_handler().native_query.call_args[0][0] == 'select * from tasks' # tag works in create model ret = self.run_sql('select * from proj.models') assert ret['TAG'][0] == 'first' # use model ret = self.run_sql() assert len(ret) == 3 assert ret.predicted[0] == 42 # -- retrain predictor with tag -- data_handler.reset_mock() self.run_sql( ) self.wait_predictor('proj', 'task_model', {'tag': 'second'}) # get current model ret = self.run_sql('select * from proj.models') # check target assert ret['PREDICT'][0] == 'b' # check label assert ret['TAG'][0] == 'second' # check integration sql assert data_handler().native_query.call_args[0][0] == 'select * from tasks where a=2' # use model ret = self.run_sql() assert ret.predicted[0] == 42 # used model has tag 'second' models = self.get_models() model_id = ret.predictor_id[0] assert models[model_id].label == 'second' # -- retrain again with active=0 -- data_handler.reset_mock() self.run_sql( ) self.wait_predictor('proj', 'task_model', {'tag': 'third'}) ret = self.run_sql('select * from proj.models') # check target is from previous retrain assert ret['PREDICT'][0] == 'b' # use model ret = self.run_sql() # used model has tag 'second' (previous) models = self.get_models() model_id = ret.predictor_id[0] assert models[model_id].label == 'second' # ================ working with inactive versions ================= # run 3st version model and check used model version ret = self.run_sql() models = self.get_models() model_id = ret.predictor_id[0] assert models[model_id].label == 'third' # one-line query model by version ret = self.run_sql('SELECT * from proj.task_model.3 where a=1 and b=2') model_id = ret.predictor_id[0] assert models[model_id].label == 'third' # not existing version with pytest.raises(Exception) as exc_info: self.run_sql( 'SELECT * from proj.task_model.4 where a=1 and b=2', ) assert 'does not exists' in str(exc_info.value) # ================== managing versions ========================= # show models command # Show models where ret = self.run_sql('Show models') assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql('Show models from proj') assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql('Show models in proj') assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql(""Show models where name='task_model'"") assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql(""Show models from proj where name='xxx'"") assert len(ret) == 0 # ---------------- # See all versions ret = self.run_sql('select * from proj.models_versions') # we have all tags in versions assert set(ret['TAG']) == {'first', 'second', 'third'} # Set active selected version self.run_sql() # get active version ret = self.run_sql('select * from proj.models_versions where active = 1') assert ret['TAG'][0] == 'first' # use active version ? # Delete specific version self.run_sql() # deleted version not in list ret = self.run_sql('select * from proj.models_versions') assert len(ret) == 2 assert 'second' not in ret['TAG'] # try to use deleted version with pytest.raises(Exception) as exc_info: self.run_sql( 'SELECT * from proj.task_model.2 where a=1', ) assert 'does not exists' in str(exc_info.value) # exception with deleting active version with pytest.raises(Exception) as exc_info: self.run_sql() assert 'is not found' in str(exc_info.value) # drop predictor and check model is deleted and no versions self.run_sql('drop predictor proj.task_model') ret = self.run_sql('select * from proj.models') assert len(ret) == 0 ret = self.run_sql('select * from proj.models_versions') assert len(ret) == 0 "," CREATE PREDICTOR proj.task_model from pg (select * from tasks) PREDICT a using engine='dummy_ml', tag = 'first' SELECT m.* FROM pg.tasks as t JOIN proj.task_model as m retrain proj.task_model from pg (select * from tasks where a=2) PREDICT b using tag = 'second' SELECT m.* FROM pg.tasks as t JOIN proj.task_model as m retrain proj.task_model from pg (select * from tasks where a=2) PREDICT a using tag='third', active=0 SELECT m.* FROM pg.tasks as t JOIN proj.task_model as m SELECT m.* FROM pg.tasks as t JOIN proj.task_model.3 as m update proj.models_versions set active=1 where version=1 and name='task_model' delete from proj.models_versions where version=2 and name='task_model' delete from proj.models_versions where version=3 and model='task_model' ",109,536,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_version_managing(self, data_handler): # set up df = pd.DataFrame([ {'a': 1, 'b': dt.datetime(2020, 1, 1)}, {'a': 2, 'b': dt.datetime(2020, 1, 2)}, {'a': 1, 'b': dt.datetime(2020, 1, 3)}, ]) self.set_handler(data_handler, name='pg', tables={'tasks': df}) # ================= retrain cycles ===================== # create folder self.run_sql('create database proj') # -- create model -- self.run_sql( ) self.wait_predictor('proj', 'task_model') assert data_handler().native_query.call_args[0][0] == 'select * from tasks' # tag works in create model ret = self.run_sql('select * from proj.models') assert ret['TAG'][0] == 'first' # use model ret = self.run_sql() assert len(ret) == 3 assert ret.predicted[0] == 42 # -- retrain predictor with tag -- data_handler.reset_mock() self.run_sql( ) self.wait_predictor('proj', 'task_model', {'tag': 'second'}) # get current model ret = self.run_sql('select * from proj.models') # check target assert ret['PREDICT'][0] == 'b' # check label assert ret['TAG'][0] == 'second' # check integration sql assert data_handler().native_query.call_args[0][0] == 'select * from tasks where a=2' # use model ret = self.run_sql() assert ret.predicted[0] == 42 # used model has tag 'second' models = self.get_models() model_id = ret.predictor_id[0] assert models[model_id].label == 'second' # -- retrain again with active=0 -- data_handler.reset_mock() self.run_sql( ) self.wait_predictor('proj', 'task_model', {'tag': 'third'}) ret = self.run_sql('select * from proj.models') # check target is from previous retrain assert ret['PREDICT'][0] == 'b' # use model ret = self.run_sql() # used model has tag 'second' (previous) models = self.get_models() model_id = ret.predictor_id[0] assert models[model_id].label == 'second' # ================ working with inactive versions ================= # run 3st version model and check used model version ret = self.run_sql() models = self.get_models() model_id = ret.predictor_id[0] assert models[model_id].label == 'third' # one-line query model by version ret = self.run_sql('SELECT * from proj.task_model.3 where a=1 and b=2') model_id = ret.predictor_id[0] assert models[model_id].label == 'third' # not existing version with pytest.raises(Exception) as exc_info: self.run_sql( 'SELECT * from proj.task_model.4 where a=1 and b=2', ) assert 'does not exists' in str(exc_info.value) # ================== managing versions ========================= # show models command # Show models where ret = self.run_sql('Show models') assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql('Show models from proj') assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql('Show models in proj') assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql(""Show models where name='task_model'"") assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql(""Show models from proj where name='xxx'"") assert len(ret) == 0 # ---------------- # See all versions ret = self.run_sql('select * from proj.models_versions') # we have all tags in versions assert set(ret['TAG']) == {'first', 'second', 'third'} # Set active selected version self.run_sql() # get active version ret = self.run_sql('select * from proj.models_versions where active = 1') assert ret['TAG'][0] == 'first' # use active version ? # Delete specific version self.run_sql() # deleted version not in list ret = self.run_sql('select * from proj.models_versions') assert len(ret) == 2 assert 'second' not in ret['TAG'] # try to use deleted version with pytest.raises(Exception) as exc_info: self.run_sql( 'SELECT * from proj.task_model.2 where a=1', ) assert 'does not exists' in str(exc_info.value) # exception with deleting active version with pytest.raises(Exception) as exc_info: self.run_sql() assert 'is not found' in str(exc_info.value) # drop predictor and check model is deleted and no versions self.run_sql('drop predictor proj.task_model') ret = self.run_sql('select * from proj.models') assert len(ret) == 0 ret = self.run_sql('select * from proj.models_versions') assert len(ret) == 0 ``` ###Assistant : CREATE PREDICTOR proj.task_model from pg (select * from tasks) PREDICT a using engine='dummy_ml', tag = 'first' SELECT m.* FROM pg.tasks as t JOIN proj.task_model as m retrain proj.task_model from pg (select * from tasks where a=2) PREDICT b using tag = 'second' SELECT m.* FROM pg.tasks as t JOIN proj.task_model as m retrain proj.task_model from pg (select * from tasks where a=2) PREDICT a using tag='third', active=0 SELECT m.* FROM pg.tasks as t JOIN proj.task_model as m SELECT m.* FROM pg.tasks as t JOIN proj.task_model.3 as m update proj.models_versions set active=1 where version=1 and name='task_model' delete from proj.models_versions where version=2 and name='task_model' delete from proj.models_versions where version=3 and model='task_model' " 828,"def _looks_like_red_hat_lib() -> bool: from distutils.command.install import INSTALL_SCHEMES # type: ignore return all( k in INSTALL_SCHEMES and _looks_like_red_hat_patched_platlib_purelib(INSTALL_SCHEMES[k]) for k in (""unix_prefix"", ""unix_home"") ) @functools.lru_cache(maxsize=None)","Red Hat patches platlib in unix_prefix and unix_home, but not purelib. This is the only way I can see to tell a Red Hat-patched Python. ",25,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _looks_like_red_hat_lib() -> bool: from distutils.command.install import INSTALL_SCHEMES # type: ignore return all( k in INSTALL_SCHEMES and _looks_like_red_hat_patched_platlib_purelib(INSTALL_SCHEMES[k]) for k in (""unix_prefix"", ""unix_home"") ) @functools.lru_cache(maxsize=None) ``` ###Assistant : Red Hat patches platlib in unix_prefix and unix_home, but not purelib. This is the only way I can see to tell a Red Hat-patched Python. " 829,"def get_valid_parent_pages(self, user): # Get queryset of pages where this page type can be added allowed_parent_page_content_types = list( ContentType.objects.get_for_models( *self.model.allowed_parent_page_models() ).values() ) allowed_parent_pages = Page.objects.filter( content_type__in=allowed_parent_page_content_types ) # Get queryset of pages where the user has permission to add subpages if user.is_superuser: pages_where_user_can_add = Page.objects.all() else: pages_where_user_can_add = Page.objects.none() user_perms = UserPagePermissionsProxy(user) for perm in user_perms.permissions.filter(permission_type=""add""): # user has add permission on any subpage of perm.page # (including perm.page itself) pages_where_user_can_add |= Page.objects.descendant_of( perm.page, inclusive=True ) # Combine them return allowed_parent_pages & pages_where_user_can_add "," Identifies possible parent pages for the current user by first looking at allowed_parent_page_models() on self.model to limit options to the correct type of page, then checking permissions on those individual pages to make sure we have permission to add a subpage to it. ",43,83,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_valid_parent_pages(self, user): # Get queryset of pages where this page type can be added allowed_parent_page_content_types = list( ContentType.objects.get_for_models( *self.model.allowed_parent_page_models() ).values() ) allowed_parent_pages = Page.objects.filter( content_type__in=allowed_parent_page_content_types ) # Get queryset of pages where the user has permission to add subpages if user.is_superuser: pages_where_user_can_add = Page.objects.all() else: pages_where_user_can_add = Page.objects.none() user_perms = UserPagePermissionsProxy(user) for perm in user_perms.permissions.filter(permission_type=""add""): # user has add permission on any subpage of perm.page # (including perm.page itself) pages_where_user_can_add |= Page.objects.descendant_of( perm.page, inclusive=True ) # Combine them return allowed_parent_pages & pages_where_user_can_add ``` ###Assistant : Identifies possible parent pages for the current user by first looking at allowed_parent_page_models() on self.model to limit options to the correct type of page, then checking permissions on those individual pages to make sure we have permission to add a subpage to it. " 830,"def get_attendance_list(conditions, filters): attendance_list = frappe.db.sql( % conditions, filters, as_dict=1, ) if not attendance_list: msgprint(_(""No attendance record found""), alert=True, indicator=""orange"") att_map = {} for d in attendance_list: att_map.setdefault(d.employee, frappe._dict()).setdefault(d.day_of_month, """") att_map[d.employee][d.day_of_month] = d.status return att_map ","select employee, day(attendance_date) as day_of_month, status from tabAttendance where docstatus = 1 %s order by employee, attendance_date",17,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_attendance_list(conditions, filters): attendance_list = frappe.db.sql( % conditions, filters, as_dict=1, ) if not attendance_list: msgprint(_(""No attendance record found""), alert=True, indicator=""orange"") att_map = {} for d in attendance_list: att_map.setdefault(d.employee, frappe._dict()).setdefault(d.day_of_month, """") att_map[d.employee][d.day_of_month] = d.status return att_map ``` ###Assistant : select employee, day(attendance_date) as day_of_month, status from tabAttendance where docstatus = 1 %s order by employee, attendance_date " 831,"def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(tf.gather(past_state, beam_idx, axis=0) for past_state in layer_past),) return reordered_past @add_start_docstrings( , REMBERT_START_DOCSTRING, )"," RemBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks. ",14,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(tf.gather(past_state, beam_idx, axis=0) for past_state in layer_past),) return reordered_past @add_start_docstrings( , REMBERT_START_DOCSTRING, ) ``` ###Assistant : RemBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks. " 832,"def test_remove_as_admin_not_in_team(self): # an org with closed membership (byproduct of flags=0) org = self.create_organization(owner=self.user, flags=0) team = self.create_team(organization=org) admin_user = self.create_user(email=""foo@example.com"", is_superuser=False) self.create_member( organization=org, user=admin_user, role=""admin"", teams=[], # note that admin_user isn't a member of `team` ) self.login_as(admin_user) # first, try deleting the team with open membership off self.get_error_response(team.organization.slug, team.slug, status_code=403) self.assert_team_not_deleted(team.id) # now, with open membership on org.flags.allow_joinleave = True org.save() self.get_success_response(team.organization.slug, team.slug, status_code=204) self.assert_team_deleted(team.id) ","Admins can't remove teams of which they're not a part, unless open membership is on.",15,66,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_remove_as_admin_not_in_team(self): # an org with closed membership (byproduct of flags=0) org = self.create_organization(owner=self.user, flags=0) team = self.create_team(organization=org) admin_user = self.create_user(email=""foo@example.com"", is_superuser=False) self.create_member( organization=org, user=admin_user, role=""admin"", teams=[], # note that admin_user isn't a member of `team` ) self.login_as(admin_user) # first, try deleting the team with open membership off self.get_error_response(team.organization.slug, team.slug, status_code=403) self.assert_team_not_deleted(team.id) # now, with open membership on org.flags.allow_joinleave = True org.save() self.get_success_response(team.organization.slug, team.slug, status_code=204) self.assert_team_deleted(team.id) ``` ###Assistant : Admins can't remove teams of which they're not a part, unless open membership is on. " 833,"def batch_test(num_threads, delay): with mock.patch( ""ray.autoscaler._private.aws.node_provider.make_ec2_client"" ), mock.patch.object(AWSNodeProvider, ""_create_tags"", mock_create_tags): provider = AWSNodeProvider( provider_config={""region"": ""nowhere""}, cluster_name=""default"" ) provider.batch_counter = 0 provider.tag_update_counter = 0 provider.tag_cache = {str(x): {} for x in range(num_threads)} threads = [] for x in range(num_threads): thread = threading.Thread( target=provider.set_node_tags, args=(str(x), {""foo"": ""bar""}) ) threads.append(thread) for thread in threads: thread.start() time.sleep(delay) for thread in threads: thread.join() return provider.batch_counter, provider.tag_update_counter ","Run AWSNodeProvider.set_node_tags in several threads, with a specified delay between thread launches. Return the number of batches of tag updates and the number of tags updated. ",26,61,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def batch_test(num_threads, delay): with mock.patch( ""ray.autoscaler._private.aws.node_provider.make_ec2_client"" ), mock.patch.object(AWSNodeProvider, ""_create_tags"", mock_create_tags): provider = AWSNodeProvider( provider_config={""region"": ""nowhere""}, cluster_name=""default"" ) provider.batch_counter = 0 provider.tag_update_counter = 0 provider.tag_cache = {str(x): {} for x in range(num_threads)} threads = [] for x in range(num_threads): thread = threading.Thread( target=provider.set_node_tags, args=(str(x), {""foo"": ""bar""}) ) threads.append(thread) for thread in threads: thread.start() time.sleep(delay) for thread in threads: thread.join() return provider.batch_counter, provider.tag_update_counter ``` ###Assistant : Run AWSNodeProvider.set_node_tags in several threads, with a specified delay between thread launches. Return the number of batches of tag updates and the number of tags updated. " 834,"def forward(self, feats, img_metas): y = self.last_feat_conv(feats[-1]) for i in range(self.num_inputs - 2, -1, -1): x = feats[i] cur_fpn = self.lateral_convs[i](x) y = cur_fpn + \ F.interpolate(y, size=cur_fpn.shape[-2:], mode='nearest') y = self.output_convs[i](y) mask_feature = self.mask_feature(y) memory = feats[-1] return mask_feature, memory @PLUGIN_LAYERS.register_module()"," Args: feats (list[Tensor]): Feature maps of each level. Each has shape of (batch_size, c, h, w). img_metas (list[dict]): List of image information. Pass in for creating more accurate padding mask. Not used here. Returns: tuple: a tuple containing the following: - mask_feature (Tensor): Shape (batch_size, c, h, w). - memory (Tensor): Output of last stage of backbone.\ Shape (batch_size, c, h, w). ",62,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def forward(self, feats, img_metas): y = self.last_feat_conv(feats[-1]) for i in range(self.num_inputs - 2, -1, -1): x = feats[i] cur_fpn = self.lateral_convs[i](x) y = cur_fpn + \ F.interpolate(y, size=cur_fpn.shape[-2:], mode='nearest') y = self.output_convs[i](y) mask_feature = self.mask_feature(y) memory = feats[-1] return mask_feature, memory @PLUGIN_LAYERS.register_module() ``` ###Assistant : Args: feats (list[Tensor]): Feature maps of each level. Each has shape of (batch_size, c, h, w). img_metas (list[dict]): List of image information. Pass in for creating more accurate padding mask. Not used here. Returns: tuple: a tuple containing the following: - mask_feature (Tensor): Shape (batch_size, c, h, w). - memory (Tensor): Output of last stage of backbone.\ Shape (batch_size, c, h, w). " 835,"async def test_remote_scanner_expires_non_connectable(hass): manager = _get_manager() switchbot_device = BLEDevice( ""44:44:33:11:23:45"", ""wohand"", {}, rssi=-100, ) switchbot_device_adv = generate_advertisement_data( local_name=""wohand"", service_uuids=[], manufacturer_data={1: b""\x01""}, rssi=-100, ) ",Test the remote scanner expires stale non connectable data.,9,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_remote_scanner_expires_non_connectable(hass): manager = _get_manager() switchbot_device = BLEDevice( ""44:44:33:11:23:45"", ""wohand"", {}, rssi=-100, ) switchbot_device_adv = generate_advertisement_data( local_name=""wohand"", service_uuids=[], manufacturer_data={1: b""\x01""}, rssi=-100, ) ``` ###Assistant : Test the remote scanner expires stale non connectable data. " 836,"def convert_yaml_objects_to_native(obj): if isinstance(obj, dict): return dict((k, convert_yaml_objects_to_native(v)) for k, v in obj.items()) elif isinstance(obj, list): return [convert_yaml_objects_to_native(v) for v in obj] elif isinstance(obj, text_type): return text_type(obj) else: return obj ","Older versions of the ``toml`` python library, and tomllib, don't have a pluggable way to tell the encoder about custom types, so we need to ensure objects that we pass are native types. Used with: - ``toml<0.10.0`` where ``toml.TomlEncoder`` is missing - ``tomli`` or ``tomllib`` This function recurses an object and ensures we cast any of the types from ``ansible.parsing.yaml.objects`` into their native types, effectively cleansing the data before we hand it over to the toml library. This function doesn't directly check for the types from ``ansible.parsing.yaml.objects`` but instead checks for the types those objects inherit from, to offer more flexibility. ",101,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def convert_yaml_objects_to_native(obj): if isinstance(obj, dict): return dict((k, convert_yaml_objects_to_native(v)) for k, v in obj.items()) elif isinstance(obj, list): return [convert_yaml_objects_to_native(v) for v in obj] elif isinstance(obj, text_type): return text_type(obj) else: return obj ``` ###Assistant : Older versions of the ``toml`` python library, and tomllib, don't have a pluggable way to tell the encoder about custom types, so we need to ensure objects that we pass are native types. Used with: - ``toml<0.10.0`` where ``toml.TomlEncoder`` is missing - ``tomli`` or ``tomllib`` This function recurses an object and ensures we cast any of the types from ``ansible.parsing.yaml.objects`` into their native types, effectively cleansing the data before we hand it over to the toml library. This function doesn't directly check for the types from ``ansible.parsing.yaml.objects`` but instead checks for the types those objects inherit from, to offer more flexibility. " 837,"def make_future_dataframe(self, periods, freq='D', include_history=True): if self.history_dates is None: raise Exception('Model has not been fit.') if freq is None: # taking the tail makes freq inference more reliable freq = pd.infer_freq(self.history_dates.tail(5)) # returns None if inference failed if freq is None: raise Exception('Unable to infer `freq`') last_date = self.history_dates.max() dates = pd.date_range( start=last_date, periods=periods + 1, # An extra in case we include start freq=freq) dates = dates[dates > last_date] # Drop start if equals last_date dates = dates[:periods] # Return correct number of periods if include_history: dates = np.concatenate((np.array(self.history_dates), dates)) return pd.DataFrame({'ds': dates}) ","Simulate the trend using the extrapolated generative model. Parameters ---------- periods: Int number of periods to forecast forward. freq: Any valid frequency for pd.date_range, such as 'D' or 'M'. include_history: Boolean to include the historical dates in the data frame for predictions. Returns ------- pd.Dataframe that extends forward from the end of self.history for the requested number of periods. ",59,94,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def make_future_dataframe(self, periods, freq='D', include_history=True): if self.history_dates is None: raise Exception('Model has not been fit.') if freq is None: # taking the tail makes freq inference more reliable freq = pd.infer_freq(self.history_dates.tail(5)) # returns None if inference failed if freq is None: raise Exception('Unable to infer `freq`') last_date = self.history_dates.max() dates = pd.date_range( start=last_date, periods=periods + 1, # An extra in case we include start freq=freq) dates = dates[dates > last_date] # Drop start if equals last_date dates = dates[:periods] # Return correct number of periods if include_history: dates = np.concatenate((np.array(self.history_dates), dates)) return pd.DataFrame({'ds': dates}) ``` ###Assistant : Simulate the trend using the extrapolated generative model. Parameters ---------- periods: Int number of periods to forecast forward. freq: Any valid frequency for pd.date_range, such as 'D' or 'M'. include_history: Boolean to include the historical dates in the data frame for predictions. Returns ------- pd.Dataframe that extends forward from the end of self.history for the requested number of periods. " 838,"def array_to_blobproto(arr, diff=None): blob = caffe_pb2.BlobProto() blob.shape.dim.extend(arr.shape) blob.data.extend(arr.astype(float).flat) if diff is not None: blob.diff.extend(diff.astype(float).flat) return blob ","Converts a N-dimensional array to blob proto. If diff is given, also convert the diff. You need to make sure that arr and diff have the same shape, and this function does not do sanity check. ",36,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def array_to_blobproto(arr, diff=None): blob = caffe_pb2.BlobProto() blob.shape.dim.extend(arr.shape) blob.data.extend(arr.astype(float).flat) if diff is not None: blob.diff.extend(diff.astype(float).flat) return blob ``` ###Assistant : Converts a N-dimensional array to blob proto. If diff is given, also convert the diff. You need to make sure that arr and diff have the same shape, and this function does not do sanity check. " 839,"def test_cross_signing_keys_retry(self): remote_user_id = ""@john:test_remote"" remote_master_key = ""85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY"" remote_self_signing_key = ""QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ"" # Register mock device list retrieval on the federation client. federation_client = self.homeserver.get_federation_client() federation_client.query_user_devices = Mock( return_value=make_awaitable( { ""user_id"": remote_user_id, ""stream_id"": 1, ""devices"": [], ""master_key"": { ""user_id"": remote_user_id, ""usage"": [""master""], ""keys"": {""ed25519:"" + remote_master_key: remote_master_key}, }, ""self_signing_key"": { ""user_id"": remote_user_id, ""usage"": [""self_signing""], ""keys"": { ""ed25519:"" + remote_self_signing_key: remote_self_signing_key }, }, } ) ) # Resync the device list. device_handler = self.homeserver.get_device_handler() self.get_success( device_handler.device_list_updater.user_device_resync(remote_user_id), ) # Retrieve the cross-signing keys for this user. keys = self.get_success( self.store.get_e2e_cross_signing_keys_bulk(user_ids=[remote_user_id]), ) self.assertTrue(remote_user_id in keys) # Check that the master key is the one returned by the mock. master_key = keys[remote_user_id][""master""] self.assertEqual(len(master_key[""keys""]), 1) self.assertTrue(""ed25519:"" + remote_master_key in master_key[""keys""].keys()) self.assertTrue(remote_master_key in master_key[""keys""].values()) # Check that the self-signing key is the one returned by the mock. self_signing_key = keys[remote_user_id][""self_signing""] self.assertEqual(len(self_signing_key[""keys""]), 1) self.assertTrue( ""ed25519:"" + remote_self_signing_key in self_signing_key[""keys""].keys(), ) self.assertTrue(remote_self_signing_key in self_signing_key[""keys""].values()) ","Tests that resyncing a device list correctly processes cross-signing keys from the remote server. ",14,145,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_cross_signing_keys_retry(self): remote_user_id = ""@john:test_remote"" remote_master_key = ""85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY"" remote_self_signing_key = ""QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ"" # Register mock device list retrieval on the federation client. federation_client = self.homeserver.get_federation_client() federation_client.query_user_devices = Mock( return_value=make_awaitable( { ""user_id"": remote_user_id, ""stream_id"": 1, ""devices"": [], ""master_key"": { ""user_id"": remote_user_id, ""usage"": [""master""], ""keys"": {""ed25519:"" + remote_master_key: remote_master_key}, }, ""self_signing_key"": { ""user_id"": remote_user_id, ""usage"": [""self_signing""], ""keys"": { ""ed25519:"" + remote_self_signing_key: remote_self_signing_key }, }, } ) ) # Resync the device list. device_handler = self.homeserver.get_device_handler() self.get_success( device_handler.device_list_updater.user_device_resync(remote_user_id), ) # Retrieve the cross-signing keys for this user. keys = self.get_success( self.store.get_e2e_cross_signing_keys_bulk(user_ids=[remote_user_id]), ) self.assertTrue(remote_user_id in keys) # Check that the master key is the one returned by the mock. master_key = keys[remote_user_id][""master""] self.assertEqual(len(master_key[""keys""]), 1) self.assertTrue(""ed25519:"" + remote_master_key in master_key[""keys""].keys()) self.assertTrue(remote_master_key in master_key[""keys""].values()) # Check that the self-signing key is the one returned by the mock. self_signing_key = keys[remote_user_id][""self_signing""] self.assertEqual(len(self_signing_key[""keys""]), 1) self.assertTrue( ""ed25519:"" + remote_self_signing_key in self_signing_key[""keys""].keys(), ) self.assertTrue(remote_self_signing_key in self_signing_key[""keys""].values()) ``` ###Assistant : Tests that resyncing a device list correctly processes cross-signing keys from the remote server. " 840,"def test_exec_success(self, db_mock_class): run = { 'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, } op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run) db_mock = db_mock_class.return_value db_mock.submit_run.return_value = 1 db_mock.get_run_state.return_value = RunState('TERMINATED', 'SUCCESS', '') op.execute(None) expected = databricks_operator._deep_string_coerce( {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID} ) db_mock_class.assert_called_once_with( DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay, retry_args=None, ) db_mock.submit_run.assert_called_once_with(expected) db_mock.get_run_page_url.assert_called_once_with(RUN_ID) db_mock.get_run_state.assert_called_once_with(RUN_ID) assert RUN_ID == op.run_id "," Test the execute function in case where the run is successful. ",11,50,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_exec_success(self, db_mock_class): run = { 'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, } op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run) db_mock = db_mock_class.return_value db_mock.submit_run.return_value = 1 db_mock.get_run_state.return_value = RunState('TERMINATED', 'SUCCESS', '') op.execute(None) expected = databricks_operator._deep_string_coerce( {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID} ) db_mock_class.assert_called_once_with( DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay, retry_args=None, ) db_mock.submit_run.assert_called_once_with(expected) db_mock.get_run_page_url.assert_called_once_with(RUN_ID) db_mock.get_run_state.assert_called_once_with(RUN_ID) assert RUN_ID == op.run_id ``` ###Assistant : Test the execute function in case where the run is successful. " 841,"def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input=""rgb""): rgb = _color_to_rgb(color, input) h, s, l = husl.rgb_to_husl(*rgb) gray_s, gray_l = .15 * s, 15 gray = _color_to_rgb((h, gray_s, gray_l), input=""husl"") colors = [rgb, gray] if reverse else [gray, rgb] return blend_palette(colors, n_colors, as_cmap) ","Make a sequential palette that blends from dark to ``color``. This kind of palette is good for data that range between relatively uninteresting low values and interesting high values. The ``color`` parameter can be specified in a number of ways, including all options for defining a color in matplotlib and several additional color spaces that are handled by seaborn. You can also use the database of named colors from the XKCD color survey. If you are using the IPython notebook, you can also choose this palette interactively with the :func:`choose_dark_palette` function. Parameters ---------- color : base color for high values hex, rgb-tuple, or html color name n_colors : int, optional number of colors in the palette reverse : bool, optional if True, reverse the direction of the blend as_cmap : bool, optional If True, return a :class:`matplotlib.colors.ListedColormap`. input : {'rgb', 'hls', 'husl', xkcd'} Color space to interpret the input color. The first three options apply to tuple inputs and the latter applies to string inputs. Returns ------- palette list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- light_palette : Create a sequential palette with bright low values. diverging_palette : Create a diverging palette with two colors. Examples -------- .. include:: ../docstrings/dark_palette.rst ",201,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input=""rgb""): rgb = _color_to_rgb(color, input) h, s, l = husl.rgb_to_husl(*rgb) gray_s, gray_l = .15 * s, 15 gray = _color_to_rgb((h, gray_s, gray_l), input=""husl"") colors = [rgb, gray] if reverse else [gray, rgb] return blend_palette(colors, n_colors, as_cmap) ``` ###Assistant : Make a sequential palette that blends from dark to ``color``. This kind of palette is good for data that range between relatively uninteresting low values and interesting high values. The ``color`` parameter can be specified in a number of ways, including all options for defining a color in matplotlib and several additional color spaces that are handled by seaborn. You can also use the database of named colors from the XKCD color survey. If you are using the IPython notebook, you can also choose this palette interactively with the :func:`choose_dark_palette` function. Parameters ---------- color : base color for high values hex, rgb-tuple, or html color name n_colors : int, optional number of colors in the palette reverse : bool, optional if True, reverse the direction of the blend as_cmap : bool, optional If True, return a :class:`matplotlib.colors.ListedColormap`. input : {'rgb', 'hls', 'husl', xkcd'} Color space to interpret the input color. The first three options apply to tuple inputs and the latter applies to string inputs. Returns ------- palette list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- light_palette : Create a sequential palette with bright low values. diverging_palette : Create a diverging palette with two colors. Examples -------- .. include:: ../docstrings/dark_palette.rst " 842,"def contour(self, X, win=None, env=None, opts=None): return self._surface(X=X, stype=""contour"", opts=opts, win=win, env=env) "," This function draws a contour plot. It takes as input an `NxM` tensor `X` that specifies the value at each location in the contour plot. The following `opts` are supported: - `opts.colormap`: colormap (`string`; default = `'Viridis'`) - `opts.xmin` : clip minimum value (`number`; default = `X:min()`) - `opts.xmax` : clip maximum value (`number`; default = `X:max()`) ",57,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def contour(self, X, win=None, env=None, opts=None): return self._surface(X=X, stype=""contour"", opts=opts, win=win, env=env) ``` ###Assistant : This function draws a contour plot. It takes as input an `NxM` tensor `X` that specifies the value at each location in the contour plot. The following `opts` are supported: - `opts.colormap`: colormap (`string`; default = `'Viridis'`) - `opts.xmin` : clip minimum value (`number`; default = `X:min()`) - `opts.xmax` : clip maximum value (`number`; default = `X:max()`) " 843,"def get_themes(): themes = {} eps = set(importlib_metadata.entry_points(group='mkdocs.themes')) builtins = {ep.name for ep in eps if ep.dist.name == 'mkdocs'} for theme in eps: if theme.name in builtins and theme.dist.name != 'mkdocs': raise exceptions.ConfigurationError( f""The theme '{theme.name}' is a builtin theme but the package '{theme.dist.name}' "" ""attempts to provide a theme with the same name."" ) elif theme.name in themes: log.warning( f""A theme named '{theme.name}' is provided by the Python packages '{theme.dist.name}' "" f""and '{themes[theme.name].dist.name}'. The one in '{theme.dist.name}' will be used."" ) themes[theme.name] = theme return themes ",Return a dict of all installed themes as {name: EntryPoint}.,10,87,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_themes(): themes = {} eps = set(importlib_metadata.entry_points(group='mkdocs.themes')) builtins = {ep.name for ep in eps if ep.dist.name == 'mkdocs'} for theme in eps: if theme.name in builtins and theme.dist.name != 'mkdocs': raise exceptions.ConfigurationError( f""The theme '{theme.name}' is a builtin theme but the package '{theme.dist.name}' "" ""attempts to provide a theme with the same name."" ) elif theme.name in themes: log.warning( f""A theme named '{theme.name}' is provided by the Python packages '{theme.dist.name}' "" f""and '{themes[theme.name].dist.name}'. The one in '{theme.dist.name}' will be used."" ) themes[theme.name] = theme return themes ``` ###Assistant : Return a dict of all installed themes as {name: EntryPoint}. " 844,"def test_subscribe_by_default(self): NotificationSetting.objects.update_settings( ExternalProviders.EMAIL, NotificationSettingTypes.ISSUE_ALERTS, NotificationSettingOptionValues.NEVER, user=self.user, ) response = self.get_success_response(""me"") assert response.data.get(""subscribeByDefault"") is False "," Test that we expect project-independent issue alert preferences to be returned as `subscribe_by_default`. ",13,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_subscribe_by_default(self): NotificationSetting.objects.update_settings( ExternalProviders.EMAIL, NotificationSettingTypes.ISSUE_ALERTS, NotificationSettingOptionValues.NEVER, user=self.user, ) response = self.get_success_response(""me"") assert response.data.get(""subscribeByDefault"") is False ``` ###Assistant : Test that we expect project-independent issue alert preferences to be returned as `subscribe_by_default`. " 845,"def _get_users_with_any_permission_codenames_filter(self, permission_codenames): permissions = Permission.objects.filter( content_type=self._content_type, codename__in=permission_codenames ) return ( Q(is_superuser=True) | Q(user_permissions__in=permissions) | Q(groups__permissions__in=permissions) ) & Q(is_active=True) "," Given a list of permission codenames, return a filter expression which will find all users which have any of those permissions - either through group permissions, user permissions, or implicitly through being a superuser. ",34,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_users_with_any_permission_codenames_filter(self, permission_codenames): permissions = Permission.objects.filter( content_type=self._content_type, codename__in=permission_codenames ) return ( Q(is_superuser=True) | Q(user_permissions__in=permissions) | Q(groups__permissions__in=permissions) ) & Q(is_active=True) ``` ###Assistant : Given a list of permission codenames, return a filter expression which will find all users which have any of those permissions - either through group permissions, user permissions, or implicitly through being a superuser. " 846,"def _async_update_rssi_and_transients(self) -> None: for ( unique_id, ibeacon_advertisement, ) in self._last_ibeacon_advertisement_by_unique_id.items(): address = unique_id.split(""_"")[-1] service_info = bluetooth.async_last_service_info( self.hass, address, connectable=False ) if not service_info: continue if address in self._transient_seen_count: self._transient_seen_count[address] += 1 if self._transient_seen_count[address] == MIN_SEEN_TRANSIENT_NEW: self._transient_seen_count.pop(address) _async_dispatch_update( self.hass, unique_id, service_info, ibeacon_advertisement, True, True, ) continue if service_info.rssi != ibeacon_advertisement.rssi: ibeacon_advertisement.update_rssi(service_info.rssi) async_dispatcher_send( self.hass, signal_seen(unique_id), ibeacon_advertisement, ) ","Check to see if the rssi has changed and update any devices. We don't callback on RSSI changes so we need to check them here and send them over the dispatcher periodically to ensure the distance calculation is update. If the transient flag is set we also need to check to see if the device is still transmitting and increment the counter ",62,56,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _async_update_rssi_and_transients(self) -> None: for ( unique_id, ibeacon_advertisement, ) in self._last_ibeacon_advertisement_by_unique_id.items(): address = unique_id.split(""_"")[-1] service_info = bluetooth.async_last_service_info( self.hass, address, connectable=False ) if not service_info: continue if address in self._transient_seen_count: self._transient_seen_count[address] += 1 if self._transient_seen_count[address] == MIN_SEEN_TRANSIENT_NEW: self._transient_seen_count.pop(address) _async_dispatch_update( self.hass, unique_id, service_info, ibeacon_advertisement, True, True, ) continue if service_info.rssi != ibeacon_advertisement.rssi: ibeacon_advertisement.update_rssi(service_info.rssi) async_dispatcher_send( self.hass, signal_seen(unique_id), ibeacon_advertisement, ) ``` ###Assistant : Check to see if the rssi has changed and update any devices. We don't callback on RSSI changes so we need to check them here and send them over the dispatcher periodically to ensure the distance calculation is update. If the transient flag is set we also need to check to see if the device is still transmitting and increment the counter " 847,"def setup_app_for_worker(app, loglevel, logfile) -> None: # type: (Celery, Union[str, int], str) -> None app.finalize() app.set_current() app.set_default() type(app.log)._setup = False app.log.setup(loglevel=loglevel, logfile=logfile) ",Setup the app to be used for starting an embedded worker.,11,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def setup_app_for_worker(app, loglevel, logfile) -> None: # type: (Celery, Union[str, int], str) -> None app.finalize() app.set_current() app.set_default() type(app.log)._setup = False app.log.setup(loglevel=loglevel, logfile=logfile) ``` ###Assistant : Setup the app to be used for starting an embedded worker. " 848,"def rank(self): if self._rank is None: self._rank = int(gray_to_bin(self.current), 2) return self._rank "," Ranks the Gray code. A ranking algorithm determines the position (or rank) of a combinatorial object among all the objects w.r.t. a given order. For example, the 4 bit binary reflected Gray code (BRGC) '0101' has a rank of 6 as it appears in the 6th position in the canonical ordering of the family of 4 bit Gray codes. Examples ======== >>> from sympy.combinatorics import GrayCode >>> a = GrayCode(3) >>> list(a.generate_gray()) ['000', '001', '011', '010', '110', '111', '101', '100'] >>> GrayCode(3, start='100').rank 7 >>> GrayCode(3, rank=7).current '100' See Also ======== unrank References ========== .. [1] http://statweb.stanford.edu/~susan/courses/s208/node12.html ",97,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rank(self): if self._rank is None: self._rank = int(gray_to_bin(self.current), 2) return self._rank ``` ###Assistant : Ranks the Gray code. A ranking algorithm determines the position (or rank) of a combinatorial object among all the objects w.r.t. a given order. For example, the 4 bit binary reflected Gray code (BRGC) '0101' has a rank of 6 as it appears in the 6th position in the canonical ordering of the family of 4 bit Gray codes. Examples ======== >>> from sympy.combinatorics import GrayCode >>> a = GrayCode(3) >>> list(a.generate_gray()) ['000', '001', '011', '010', '110', '111', '101', '100'] >>> GrayCode(3, start='100').rank 7 >>> GrayCode(3, rank=7).current '100' See Also ======== unrank References ========== .. [1] http://statweb.stanford.edu/~susan/courses/s208/node12.html " 849,"async def async_update(self, log_errors=True): if not self._async_client: self._async_client = get_async_client( self._hass, verify_ssl=self._verify_ssl ) rendered_headers = template.render_complex(self._headers, parse_result=False) rendered_params = template.render_complex(self._params) _LOGGER.debug(""Updating from %s"", self._resource) try: response = await self._async_client.request( self._method, self._resource, headers=rendered_headers, params=rendered_params, auth=self._auth, content=self._request_data, timeout=self._timeout, follow_redirects=True, ) self.data = response.text self.headers = response.headers except httpx.TimeoutException as ex: if log_errors: _LOGGER.error(""Timeout while fetching data: %s"", self._resource) self.last_exception = ex self.data = None self.headers = None except httpx.RequestError as ex: if log_errors: _LOGGER.error( ""Error fetching data: %s failed with %s"", self._resource, ex ) self.last_exception = ex self.data = None self.headers = None ",Get the latest data from REST service with provided method.,10,91,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_update(self, log_errors=True): if not self._async_client: self._async_client = get_async_client( self._hass, verify_ssl=self._verify_ssl ) rendered_headers = template.render_complex(self._headers, parse_result=False) rendered_params = template.render_complex(self._params) _LOGGER.debug(""Updating from %s"", self._resource) try: response = await self._async_client.request( self._method, self._resource, headers=rendered_headers, params=rendered_params, auth=self._auth, content=self._request_data, timeout=self._timeout, follow_redirects=True, ) self.data = response.text self.headers = response.headers except httpx.TimeoutException as ex: if log_errors: _LOGGER.error(""Timeout while fetching data: %s"", self._resource) self.last_exception = ex self.data = None self.headers = None except httpx.RequestError as ex: if log_errors: _LOGGER.error( ""Error fetching data: %s failed with %s"", self._resource, ex ) self.last_exception = ex self.data = None self.headers = None ``` ###Assistant : Get the latest data from REST service with provided method. " 850,"def set_default_timezone() -> None: dotenv.load_dotenv(USER_ENV_FILE) user_tz = os.getenv(""OPENBB_TIMEZONE"") if not user_tz: dotenv.set_key(USER_ENV_FILE, ""OPENBB_TIMEZONE"", ""America/New_York"") ",Set a default (America/New_York) timezone if one doesn't exist.,9,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_default_timezone() -> None: dotenv.load_dotenv(USER_ENV_FILE) user_tz = os.getenv(""OPENBB_TIMEZONE"") if not user_tz: dotenv.set_key(USER_ENV_FILE, ""OPENBB_TIMEZONE"", ""America/New_York"") ``` ###Assistant : Set a default (America/New_York) timezone if one doesn't exist. " 851,"def return_stopped_response(self) -> dict: return {k: ""Agent stopped due to max iterations."" for k in self.return_values} ",Return response when agent has been stopped due to max iterations.,11,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def return_stopped_response(self) -> dict: return {k: ""Agent stopped due to max iterations."" for k in self.return_values} ``` ###Assistant : Return response when agent has been stopped due to max iterations. " 852,"def clean(self, value): if self.null_option is not None and value == settings.FILTERS_NULL_CHOICE_VALUE: return None return super().clean(value) "," When null option is enabled and ""None"" is sent as part of a form to be submitted, it is sent as the string 'null'. This will check for that condition and gracefully handle the conversion to a NoneType. ",38,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def clean(self, value): if self.null_option is not None and value == settings.FILTERS_NULL_CHOICE_VALUE: return None return super().clean(value) ``` ###Assistant : When null option is enabled and ""None"" is sent as part of a form to be submitted, it is sent as the string 'null'. This will check for that condition and gracefully handle the conversion to a NoneType. " 853,"def test_unexpected_auth_events(self): creator = ""@creator:example.com"" create_event = _create_event(RoomVersions.V9, creator) join_event = _join_event(RoomVersions.V9, creator) pl_event = _power_levels_event( RoomVersions.V9, creator, {""state_default"": 30, ""users"": {""creator"": 100}}, ) join_rules_event = _join_rules_event(RoomVersions.V9, creator, ""public"") event_store = _StubEventSourceStore() event_store.add_events([create_event, join_event, pl_event, join_rules_event]) good_event = _random_state_event( RoomVersions.V9, creator, [create_event, join_event, pl_event] ) # join rules should *not* be included in the auth events. bad_event = _random_state_event( RoomVersions.V9, creator, [create_event, join_event, pl_event, join_rules_event], ) get_awaitable_result( event_auth.check_state_independent_auth_rules(event_store, good_event) ) with self.assertRaises(AuthError): get_awaitable_result( event_auth.check_state_independent_auth_rules(event_store, bad_event) ) ","Events with excess auth_events should be rejected https://spec.matrix.org/v1.3/rooms/v9/#authorization-rules 2. Reject if event has auth_events that: 2. have entries whose type and state_key don’t match those specified by the auth events selection algorithm described in the server specification. ",37,76,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_unexpected_auth_events(self): creator = ""@creator:example.com"" create_event = _create_event(RoomVersions.V9, creator) join_event = _join_event(RoomVersions.V9, creator) pl_event = _power_levels_event( RoomVersions.V9, creator, {""state_default"": 30, ""users"": {""creator"": 100}}, ) join_rules_event = _join_rules_event(RoomVersions.V9, creator, ""public"") event_store = _StubEventSourceStore() event_store.add_events([create_event, join_event, pl_event, join_rules_event]) good_event = _random_state_event( RoomVersions.V9, creator, [create_event, join_event, pl_event] ) # join rules should *not* be included in the auth events. bad_event = _random_state_event( RoomVersions.V9, creator, [create_event, join_event, pl_event, join_rules_event], ) get_awaitable_result( event_auth.check_state_independent_auth_rules(event_store, good_event) ) with self.assertRaises(AuthError): get_awaitable_result( event_auth.check_state_independent_auth_rules(event_store, bad_event) ) ``` ###Assistant : Events with excess auth_events should be rejected https://spec.matrix.org/v1.3/rooms/v9/#authorization-rules 2. Reject if event has auth_events that: 2. have entries whose type and state_key don’t match those specified by the auth events selection algorithm described in the server specification. " 854,"async def _consume_incoming(self) -> None: while True: message_json = await self.incoming_queue.get() if message_json is None: self.incoming_queue.task_done() break type = message_json[""type""] if type == ""client_log"": path = message_json[""payload""][""path""] line_number = message_json[""payload""][""line_number""] timestamp = message_json[""payload""][""timestamp""] encoded_segments = message_json[""payload""][""encoded_segments""] decoded_segments = base64.b64decode(encoded_segments) segments = pickle.loads(decoded_segments) self.service.console.print( DevtoolsLogMessage( segments=segments, path=path, line_number=line_number, unix_timestamp=timestamp, ) ) elif type == ""client_spillover"": spillover = int(message_json[""payload""][""spillover""]) info_renderable = DevtoolsInternalMessage( f""Discarded {spillover} messages"", level=""warning"" ) self.service.console.print(info_renderable) self.incoming_queue.task_done() ","Consume messages from the incoming (client -> server) Queue, and print the corresponding renderables to the console for each message. ",20,67,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def _consume_incoming(self) -> None: while True: message_json = await self.incoming_queue.get() if message_json is None: self.incoming_queue.task_done() break type = message_json[""type""] if type == ""client_log"": path = message_json[""payload""][""path""] line_number = message_json[""payload""][""line_number""] timestamp = message_json[""payload""][""timestamp""] encoded_segments = message_json[""payload""][""encoded_segments""] decoded_segments = base64.b64decode(encoded_segments) segments = pickle.loads(decoded_segments) self.service.console.print( DevtoolsLogMessage( segments=segments, path=path, line_number=line_number, unix_timestamp=timestamp, ) ) elif type == ""client_spillover"": spillover = int(message_json[""payload""][""spillover""]) info_renderable = DevtoolsInternalMessage( f""Discarded {spillover} messages"", level=""warning"" ) self.service.console.print(info_renderable) self.incoming_queue.task_done() ``` ###Assistant : Consume messages from the incoming (client -> server) Queue, and print the corresponding renderables to the console for each message. " 855,"def _get_basic_ray_cr() -> dict: cr_path = str( Path(__file__).resolve().parents[2] / ""python"" / ""ray"" / ""autoscaler"" / ""kuberay"" / ""ray-cluster.complete.yaml"" ) return yaml.safe_load(open(cr_path).read()) ",Returns the example Ray CR included in the Ray documentation.,10,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_basic_ray_cr() -> dict: cr_path = str( Path(__file__).resolve().parents[2] / ""python"" / ""ray"" / ""autoscaler"" / ""kuberay"" / ""ray-cluster.complete.yaml"" ) return yaml.safe_load(open(cr_path).read()) ``` ###Assistant : Returns the example Ray CR included in the Ray documentation. " 856,"def spectral_graph_forge(G, alpha, transformation=""identity"", seed=None): import numpy as np import scipy as sp import scipy.stats # call as sp.stats available_transformations = [""identity"", ""modularity""] alpha = np.clip(alpha, 0, 1) A = nx.to_numpy_array(G) n = A.shape[1] level = int(round(n * alpha)) if transformation not in available_transformations: msg = f""{transformation!r} is not a valid transformation. "" msg += f""Transformations: {available_transformations}"" raise nx.NetworkXError(msg) K = np.ones((1, n)) @ A B = A if transformation == ""modularity"": B -= K.T @ K / K.sum() # Compute low-rank approximation of B evals, evecs = np.linalg.eigh(B) k = np.argsort(np.abs(evals))[::-1] # indices of evals in descending order evecs[:, k[np.arange(level, n)]] = 0 # set smallest eigenvectors to 0 B = evecs @ np.diag(evals) @ evecs.T if transformation == ""modularity"": B += K.T @ K / K.sum() B = np.clip(B, 0, 1) np.fill_diagonal(B, 0) for i in range(n - 1): B[i, i + 1 :] = sp.stats.bernoulli.rvs(B[i, i + 1 :], random_state=seed) B[i + 1 :, i] = np.transpose(B[i, i + 1 :]) H = nx.from_numpy_array(B) return H ","Returns a random simple graph with spectrum resembling that of `G` This algorithm, called Spectral Graph Forge (SGF), computes the eigenvectors of a given graph adjacency matrix, filters them and builds a random graph with a similar eigenstructure. SGF has been proved to be particularly useful for synthesizing realistic social networks and it can also be used to anonymize graph sensitive data. Parameters ---------- G : Graph alpha : float Ratio representing the percentage of eigenvectors of G to consider, values in [0,1]. transformation : string, optional Represents the intended matrix linear transformation, possible values are 'identity' and 'modularity' seed : integer, random_state, or None (default) Indicator of numpy random number generation state. See :ref:`Randomness`. Returns ------- H : Graph A graph with a similar eigenvector structure of the input one. Raises ------ NetworkXError If transformation has a value different from 'identity' or 'modularity' Notes ----- Spectral Graph Forge (SGF) generates a random simple graph resembling the global properties of the given one. It leverages the low-rank approximation of the associated adjacency matrix driven by the *alpha* precision parameter. SGF preserves the number of nodes of the input graph and their ordering. This way, nodes of output graphs resemble the properties of the input one and attributes can be directly mapped. It considers the graph adjacency matrices which can optionally be transformed to other symmetric real matrices (currently transformation options include *identity* and *modularity*). The *modularity* transformation, in the sense of Newman's modularity matrix allows the focusing on community structure related properties of the graph. SGF applies a low-rank approximation whose fixed rank is computed from the ratio *alpha* of the input graph adjacency matrix dimension. This step performs a filtering on the input eigenvectors similar to the low pass filtering common in telecommunications. The filtered values (after truncation) are used as input to a Bernoulli sampling for constructing a random adjacency matrix. References ---------- .. [1] L. Baldesi, C. T. Butts, A. Markopoulou, ""Spectral Graph Forge: Graph Generation Targeting Modularity"", IEEE Infocom, '18. https://arxiv.org/abs/1801.01715 .. [2] M. Newman, ""Networks: an introduction"", Oxford university press, 2010 Examples -------- >>> G = nx.karate_club_graph() >>> H = nx.spectral_graph_forge(G, 0.3) >>> ",358,169,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def spectral_graph_forge(G, alpha, transformation=""identity"", seed=None): import numpy as np import scipy as sp import scipy.stats # call as sp.stats available_transformations = [""identity"", ""modularity""] alpha = np.clip(alpha, 0, 1) A = nx.to_numpy_array(G) n = A.shape[1] level = int(round(n * alpha)) if transformation not in available_transformations: msg = f""{transformation!r} is not a valid transformation. "" msg += f""Transformations: {available_transformations}"" raise nx.NetworkXError(msg) K = np.ones((1, n)) @ A B = A if transformation == ""modularity"": B -= K.T @ K / K.sum() # Compute low-rank approximation of B evals, evecs = np.linalg.eigh(B) k = np.argsort(np.abs(evals))[::-1] # indices of evals in descending order evecs[:, k[np.arange(level, n)]] = 0 # set smallest eigenvectors to 0 B = evecs @ np.diag(evals) @ evecs.T if transformation == ""modularity"": B += K.T @ K / K.sum() B = np.clip(B, 0, 1) np.fill_diagonal(B, 0) for i in range(n - 1): B[i, i + 1 :] = sp.stats.bernoulli.rvs(B[i, i + 1 :], random_state=seed) B[i + 1 :, i] = np.transpose(B[i, i + 1 :]) H = nx.from_numpy_array(B) return H ``` ###Assistant : Returns a random simple graph with spectrum resembling that of `G` This algorithm, called Spectral Graph Forge (SGF), computes the eigenvectors of a given graph adjacency matrix, filters them and builds a random graph with a similar eigenstructure. SGF has been proved to be particularly useful for synthesizing realistic social networks and it can also be used to anonymize graph sensitive data. Parameters ---------- G : Graph alpha : float Ratio representing the percentage of eigenvectors of G to consider, values in [0,1]. transformation : string, optional Represents the intended matrix linear transformation, possible values are 'identity' and 'modularity' seed : integer, random_state, or None (default) Indicator of numpy random number generation state. See :ref:`Randomness`. Returns ------- H : Graph A graph with a similar eigenvector structure of the input one. Raises ------ NetworkXError If transformation has a value different from 'identity' or 'modularity' Notes ----- Spectral Graph Forge (SGF) generates a random simple graph resembling the global properties of the given one. It leverages the low-rank approximation of the associated adjacency matrix driven by the *alpha* precision parameter. SGF preserves the number of nodes of the input graph and their ordering. This way, nodes of output graphs resemble the properties of the input one and attributes can be directly mapped. It considers the graph adjacency matrices which can optionally be transformed to other symmetric real matrices (currently transformation options include *identity* and *modularity*). The *modularity* transformation, in the sense of Newman's modularity matrix allows the focusing on community structure related properties of the graph. SGF applies a low-rank approximation whose fixed rank is computed from the ratio *alpha* of the input graph adjacency matrix dimension. This step performs a filtering on the input eigenvectors similar to the low pass filtering common in telecommunications. The filtered values (after truncation) are used as input to a Bernoulli sampling for constructing a random adjacency matrix. References ---------- .. [1] L. Baldesi, C. T. Butts, A. Markopoulou, ""Spectral Graph Forge: Graph Generation Targeting Modularity"", IEEE Infocom, '18. https://arxiv.org/abs/1801.01715 .. [2] M. Newman, ""Networks: an introduction"", Oxford university press, 2010 Examples -------- >>> G = nx.karate_club_graph() >>> H = nx.spectral_graph_forge(G, 0.3) >>> " 857,"def encode_example(self, example): example = cast_to_python_objects(example) return encode_nested_example(self, example) "," Encode example into a format for Arrow. Args: example (`dict[str, Any]`): Data in a Dataset row. Returns: `dict[str, Any]` ",19,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def encode_example(self, example): example = cast_to_python_objects(example) return encode_nested_example(self, example) ``` ###Assistant : Encode example into a format for Arrow. Args: example (`dict[str, Any]`): Data in a Dataset row. Returns: `dict[str, Any]` " 858,"def _add_unique_metric_name(self, metric_name, metric_fn, output_index): # For multi-output models, prepend the output names to the metric name. if len(self.output_names) > 1: # If we're loading from an already-serialized model, we've already # prepended the output name, and we don't want to do it again. # # Alternatively, we may be receiving a stateless metric (e.g. the string # ""accuracy"") rather than a `Metric` object, in which case we want to # prepend the output name even if we are loading a serialized model. if not getattr(metric_fn, ""_from_serialized"", False): metric_name = ""%s_%s"" % ( self.output_names[output_index], metric_name, ) j = 1 base_metric_name = metric_name while metric_name in self.metrics_names: metric_name = ""%s_%d"" % (base_metric_name, j) j += 1 return metric_name ","Makes the metric name unique. If there are multiple outputs for which the metrics are calculated, the metric names have to be made unique by appending an integer. Args: metric_name: Metric name that corresponds to the metric specified by the user. For example: 'acc'. metric_fn: The Metric object. output_index: The index of the model output for which the metric name is being added. Returns: string, name of the model's unique metric name ",72,117,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _add_unique_metric_name(self, metric_name, metric_fn, output_index): # For multi-output models, prepend the output names to the metric name. if len(self.output_names) > 1: # If we're loading from an already-serialized model, we've already # prepended the output name, and we don't want to do it again. # # Alternatively, we may be receiving a stateless metric (e.g. the string # ""accuracy"") rather than a `Metric` object, in which case we want to # prepend the output name even if we are loading a serialized model. if not getattr(metric_fn, ""_from_serialized"", False): metric_name = ""%s_%s"" % ( self.output_names[output_index], metric_name, ) j = 1 base_metric_name = metric_name while metric_name in self.metrics_names: metric_name = ""%s_%d"" % (base_metric_name, j) j += 1 return metric_name ``` ###Assistant : Makes the metric name unique. If there are multiple outputs for which the metrics are calculated, the metric names have to be made unique by appending an integer. Args: metric_name: Metric name that corresponds to the metric specified by the user. For example: 'acc'. metric_fn: The Metric object. output_index: The index of the model output for which the metric name is being added. Returns: string, name of the model's unique metric name " 859,"def strongly_connected_components(G): preorder = {} lowlink = {} scc_found = set() scc_queue = [] i = 0 # Preorder counter neighbors = {v: iter(G[v]) for v in G} for source in G: if source not in scc_found: queue = [source] while queue: v = queue[-1] if v not in preorder: i = i + 1 preorder[v] = i done = True for w in neighbors[v]: if w not in preorder: queue.append(w) done = False break if done: lowlink[v] = preorder[v] for w in G[v]: if w not in scc_found: if preorder[w] > preorder[v]: lowlink[v] = min([lowlink[v], lowlink[w]]) else: lowlink[v] = min([lowlink[v], preorder[w]]) queue.pop() if lowlink[v] == preorder[v]: scc = {v} while scc_queue and preorder[scc_queue[-1]] > preorder[v]: k = scc_queue.pop() scc.add(k) scc_found.update(scc) yield scc else: scc_queue.append(v) @not_implemented_for(""undirected"")","Generate nodes in strongly connected components of graph. Parameters ---------- G : NetworkX Graph A directed graph. Returns ------- comp : generator of sets A generator of sets of nodes, one for each strongly connected component of G. Raises ------ NetworkXNotImplemented If G is undirected. Examples -------- Generate a sorted list of strongly connected components, largest first. >>> G = nx.cycle_graph(4, create_using=nx.DiGraph()) >>> nx.add_cycle(G, [10, 11, 12]) >>> [ ... len(c) ... for c in sorted(nx.strongly_connected_components(G), key=len, reverse=True) ... ] [4, 3] If you only want the largest component, it's more efficient to use max instead of sort. >>> largest = max(nx.strongly_connected_components(G), key=len) See Also -------- connected_components weakly_connected_components kosaraju_strongly_connected_components Notes ----- Uses Tarjan's algorithm[1]_ with Nuutila's modifications[2]_. Nonrecursive version of algorithm. References ---------- .. [1] Depth-first search and linear graph algorithms, R. Tarjan SIAM Journal of Computing 1(2):146-160, (1972). .. [2] On finding the strongly connected components in a directed graph. E. Nuutila and E. Soisalon-Soinen Information Processing Letters 49(1): 9-14, (1994).. ",162,126,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def strongly_connected_components(G): preorder = {} lowlink = {} scc_found = set() scc_queue = [] i = 0 # Preorder counter neighbors = {v: iter(G[v]) for v in G} for source in G: if source not in scc_found: queue = [source] while queue: v = queue[-1] if v not in preorder: i = i + 1 preorder[v] = i done = True for w in neighbors[v]: if w not in preorder: queue.append(w) done = False break if done: lowlink[v] = preorder[v] for w in G[v]: if w not in scc_found: if preorder[w] > preorder[v]: lowlink[v] = min([lowlink[v], lowlink[w]]) else: lowlink[v] = min([lowlink[v], preorder[w]]) queue.pop() if lowlink[v] == preorder[v]: scc = {v} while scc_queue and preorder[scc_queue[-1]] > preorder[v]: k = scc_queue.pop() scc.add(k) scc_found.update(scc) yield scc else: scc_queue.append(v) @not_implemented_for(""undirected"") ``` ###Assistant : Generate nodes in strongly connected components of graph. Parameters ---------- G : NetworkX Graph A directed graph. Returns ------- comp : generator of sets A generator of sets of nodes, one for each strongly connected component of G. Raises ------ NetworkXNotImplemented If G is undirected. Examples -------- Generate a sorted list of strongly connected components, largest first. >>> G = nx.cycle_graph(4, create_using=nx.DiGraph()) >>> nx.add_cycle(G, [10, 11, 12]) >>> [ ... len(c) ... for c in sorted(nx.strongly_connected_components(G), key=len, reverse=True) ... ] [4, 3] If you only want the largest component, it's more efficient to use max instead of sort. >>> largest = max(nx.strongly_connected_components(G), key=len) See Also -------- connected_components weakly_connected_components kosaraju_strongly_connected_components Notes ----- Uses Tarjan's algorithm[1]_ with Nuutila's modifications[2]_. Nonrecursive version of algorithm. References ---------- .. [1] Depth-first search and linear graph algorithms, R. Tarjan SIAM Journal of Computing 1(2):146-160, (1972). .. [2] On finding the strongly connected components in a directed graph. E. Nuutila and E. Soisalon-Soinen Information Processing Letters 49(1): 9-14, (1994).. " 860,"def score(self, X, y, sample_weight=None): # TODO: Adapt link to User Guide in the docstring, once # https://github.com/scikit-learn/scikit-learn/pull/22118 is merged. # # Note, default score defined in RegressorMixin is R^2 score. # TODO: make D^2 a score function in module metrics (and thereby get # input validation and so on) raw_prediction = self._linear_predictor(X) # validates X # required by losses y = check_array(y, dtype=raw_prediction.dtype, order=""C"", ensure_2d=False) if sample_weight is not None: # Note that _check_sample_weight calls check_array(order=""C"") required by # losses. sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype) base_loss = self._linear_loss.base_loss if not base_loss.in_y_true_range(y): raise ValueError( ""Some value(s) of y are out of the valid range of the loss"" f"" {self._base_loss.__name__}."" ) # Note that constant_to_optimal_zero is already multiplied by sample_weight. constant = np.mean(base_loss.constant_to_optimal_zero(y_true=y)) if sample_weight is not None: constant *= sample_weight.shape[0] / np.sum(sample_weight) # Missing factor of 2 in deviance cancels out. deviance = base_loss( y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=1, ) y_mean = base_loss.link.link(np.average(y, weights=sample_weight)) deviance_null = base_loss( y_true=y, raw_prediction=np.tile(y_mean, y.shape[0]), sample_weight=sample_weight, n_threads=1, ) return 1 - (deviance + constant) / (deviance_null + constant) ","Compute D^2, the percentage of deviance explained. D^2 is a generalization of the coefficient of determination R^2. R^2 uses squared error and D^2 uses the deviance of this GLM, see the :ref:`User Guide `. D^2 is defined as :math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`, :math:`D_{null}` is the null deviance, i.e. the deviance of a model with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`. The mean :math:`\\bar{y}` is averaged by sample_weight. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Test samples. y : array-like of shape (n_samples,) True values of target. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float D^2 of self.predict(X) w.r.t. y. ",127,172,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def score(self, X, y, sample_weight=None): # TODO: Adapt link to User Guide in the docstring, once # https://github.com/scikit-learn/scikit-learn/pull/22118 is merged. # # Note, default score defined in RegressorMixin is R^2 score. # TODO: make D^2 a score function in module metrics (and thereby get # input validation and so on) raw_prediction = self._linear_predictor(X) # validates X # required by losses y = check_array(y, dtype=raw_prediction.dtype, order=""C"", ensure_2d=False) if sample_weight is not None: # Note that _check_sample_weight calls check_array(order=""C"") required by # losses. sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype) base_loss = self._linear_loss.base_loss if not base_loss.in_y_true_range(y): raise ValueError( ""Some value(s) of y are out of the valid range of the loss"" f"" {self._base_loss.__name__}."" ) # Note that constant_to_optimal_zero is already multiplied by sample_weight. constant = np.mean(base_loss.constant_to_optimal_zero(y_true=y)) if sample_weight is not None: constant *= sample_weight.shape[0] / np.sum(sample_weight) # Missing factor of 2 in deviance cancels out. deviance = base_loss( y_true=y, raw_prediction=raw_prediction, sample_weight=sample_weight, n_threads=1, ) y_mean = base_loss.link.link(np.average(y, weights=sample_weight)) deviance_null = base_loss( y_true=y, raw_prediction=np.tile(y_mean, y.shape[0]), sample_weight=sample_weight, n_threads=1, ) return 1 - (deviance + constant) / (deviance_null + constant) ``` ###Assistant : Compute D^2, the percentage of deviance explained. D^2 is a generalization of the coefficient of determination R^2. R^2 uses squared error and D^2 uses the deviance of this GLM, see the :ref:`User Guide `. D^2 is defined as :math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`, :math:`D_{null}` is the null deviance, i.e. the deviance of a model with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`. The mean :math:`\\bar{y}` is averaged by sample_weight. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Test samples. y : array-like of shape (n_samples,) True values of target. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float D^2 of self.predict(X) w.r.t. y. " 861,"def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'): r assert isinstance(factor, int) and factor >= 1 # Check weight shape. w = tf.convert_to_tensor(w) assert w.shape.rank == 4 convH = w.shape[0].value convW = w.shape[1].value inC = _shape(w, 2) outC = _shape(w, 3) assert convW == convH # Setup filter kernel. if k is None: k = [1] * factor k = _setup_kernel(k) * (gain * (factor ** 2)) p = (k.shape[0] - factor) - (convW - 1) # Determine data dimensions. if data_format == 'NCHW': stride = [1, 1, factor, factor] output_shape = [_shape(x, 0), outC, (_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW] num_groups = _shape(x, 1) // inC else: stride = [1, factor, factor, 1] output_shape = [_shape(x, 0), (_shape(x, 1) - 1) * factor + convH, (_shape(x, 2) - 1) * factor + convW, outC] num_groups = _shape(x, 3) // inC # Transpose weights. w = tf.reshape(w, [convH, convW, inC, num_groups, -1]) w = tf.transpose(w[::-1, ::-1], [0, 1, 4, 3, 2]) w = tf.reshape(w, [convH, convW, -1, num_groups * inC]) # Execute. x = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=stride, padding='VALID', data_format=data_format) return _simple_upfirdn_2d(x, k, pad0=(p+1)//2+factor-1, pad1=p//2+1, data_format=data_format, impl=impl) #---------------------------------------------------------------------------- ","Fused `upsample_2d()` followed by `tf.nn.conv2d()`. Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary order. Args: x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`). impl: Name of the implementation to use. Can be `""ref""` or `""cuda""` (default). Returns: Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as `x`. ",158,198,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'): r assert isinstance(factor, int) and factor >= 1 # Check weight shape. w = tf.convert_to_tensor(w) assert w.shape.rank == 4 convH = w.shape[0].value convW = w.shape[1].value inC = _shape(w, 2) outC = _shape(w, 3) assert convW == convH # Setup filter kernel. if k is None: k = [1] * factor k = _setup_kernel(k) * (gain * (factor ** 2)) p = (k.shape[0] - factor) - (convW - 1) # Determine data dimensions. if data_format == 'NCHW': stride = [1, 1, factor, factor] output_shape = [_shape(x, 0), outC, (_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW] num_groups = _shape(x, 1) // inC else: stride = [1, factor, factor, 1] output_shape = [_shape(x, 0), (_shape(x, 1) - 1) * factor + convH, (_shape(x, 2) - 1) * factor + convW, outC] num_groups = _shape(x, 3) // inC # Transpose weights. w = tf.reshape(w, [convH, convW, inC, num_groups, -1]) w = tf.transpose(w[::-1, ::-1], [0, 1, 4, 3, 2]) w = tf.reshape(w, [convH, convW, -1, num_groups * inC]) # Execute. x = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=stride, padding='VALID', data_format=data_format) return _simple_upfirdn_2d(x, k, pad0=(p+1)//2+factor-1, pad1=p//2+1, data_format=data_format, impl=impl) #---------------------------------------------------------------------------- ``` ###Assistant : Fused `upsample_2d()` followed by `tf.nn.conv2d()`. Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary order. Args: x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`). impl: Name of the implementation to use. Can be `""ref""` or `""cuda""` (default). Returns: Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as `x`. " 862,"def normalize_config(config): return json.loads(json.dumps(config, cls=NumpyEncoder)) ",Convert to json string and back again to remove numpy types.,11,5,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def normalize_config(config): return json.loads(json.dumps(config, cls=NumpyEncoder)) ``` ###Assistant : Convert to json string and back again to remove numpy types. " 863,"def ensure_srgb(img, srgb_profile=None): img_info = dict(img.info) icc = img_info.pop(""icc_profile"", None) if not icc: return img if ImageCms is None: raise RuntimeError(""ImageCms is required for color profile utilities"") if srgb_profile is not None: srgb_profile = ImageCms.ImageCmsProfile(srgb_profile) else: srgb_profile = DEFAULT_SRGB_PROFILE buf = BytesIO(icc) try: orig_profile = ImageCms.ImageCmsProfile(buf) color_space = orig_profile.profile.xcolor_space except (AttributeError, OSError, TypeError, ValueError): return None finally: buf.close() if color_space == ""RGB "": logger.debug(""Returning img (RGB)"") return img if color_space not in (""GRAY"", ""CMYK""): # Other color spaces are rare, but best not to try to convert them. # Upstream understands a None return as meaning it should not # use it for the target encoder. logger.debug( ""Cannot convert to sRGB; color space = %s"", (color_space.strip()), ) return None # Probably not possible to have an animated image with CMYK or GRAY icc # profile, but best leave it alone if we have one if getattr(img, ""is_animated"", False): return None if color_space == ""GRAY"": pil_mode = ""L"" else: pil_mode = ""CMYK"" logger.debug(""Converting from %s to sRGB"", color_space) transform = ImageCms.ImageCmsTransform( orig_profile, srgb_profile, pil_mode, ""RGBA"", intent=ImageCms.INTENT_RELATIVE_COLORIMETRIC, flags=TRANSFORM_FLAGS, ) src_im = Image.new(pil_mode, img.size, ""white"") src_im.paste(img) dst_im = Image.new(""RGBA"", img.size, ""white"") dst_im.info = img_info dst_im = transform.apply(src_im, dst_im) dst_im = dst_im.convert(""RGB"") dst_im.info = img_info return dst_im "," Ensures that an image either has no ICC profile (and so is implicitly sRGB) or has an sRGB color profile. If the image is sRGB, it is returned unchanged. If it has a CMYK or Gray color profile, this function will return an image converted to sRGB. Any color profiles in other color spaces will return None. ",57,203,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ensure_srgb(img, srgb_profile=None): img_info = dict(img.info) icc = img_info.pop(""icc_profile"", None) if not icc: return img if ImageCms is None: raise RuntimeError(""ImageCms is required for color profile utilities"") if srgb_profile is not None: srgb_profile = ImageCms.ImageCmsProfile(srgb_profile) else: srgb_profile = DEFAULT_SRGB_PROFILE buf = BytesIO(icc) try: orig_profile = ImageCms.ImageCmsProfile(buf) color_space = orig_profile.profile.xcolor_space except (AttributeError, OSError, TypeError, ValueError): return None finally: buf.close() if color_space == ""RGB "": logger.debug(""Returning img (RGB)"") return img if color_space not in (""GRAY"", ""CMYK""): # Other color spaces are rare, but best not to try to convert them. # Upstream understands a None return as meaning it should not # use it for the target encoder. logger.debug( ""Cannot convert to sRGB; color space = %s"", (color_space.strip()), ) return None # Probably not possible to have an animated image with CMYK or GRAY icc # profile, but best leave it alone if we have one if getattr(img, ""is_animated"", False): return None if color_space == ""GRAY"": pil_mode = ""L"" else: pil_mode = ""CMYK"" logger.debug(""Converting from %s to sRGB"", color_space) transform = ImageCms.ImageCmsTransform( orig_profile, srgb_profile, pil_mode, ""RGBA"", intent=ImageCms.INTENT_RELATIVE_COLORIMETRIC, flags=TRANSFORM_FLAGS, ) src_im = Image.new(pil_mode, img.size, ""white"") src_im.paste(img) dst_im = Image.new(""RGBA"", img.size, ""white"") dst_im.info = img_info dst_im = transform.apply(src_im, dst_im) dst_im = dst_im.convert(""RGB"") dst_im.info = img_info return dst_im ``` ###Assistant : Ensures that an image either has no ICC profile (and so is implicitly sRGB) or has an sRGB color profile. If the image is sRGB, it is returned unchanged. If it has a CMYK or Gray color profile, this function will return an image converted to sRGB. Any color profiles in other color spaces will return None. " 864,"def _cast_single_input(self, x): if self._should_cast_single_input(x): return tf.cast(x, self._compute_dtype_object) else: return x # _dtype used to be an attribute set in the constructor. We still expose it # because some clients still use it. # TODO(reedwm): Deprecate, then remove the _dtype property.",Cast a single Tensor or TensorSpec to the compute dtype.,10,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _cast_single_input(self, x): if self._should_cast_single_input(x): return tf.cast(x, self._compute_dtype_object) else: return x # _dtype used to be an attribute set in the constructor. We still expose it # because some clients still use it. # TODO(reedwm): Deprecate, then remove the _dtype property. ``` ###Assistant : Cast a single Tensor or TensorSpec to the compute dtype. " 865,"def queryset_in_batches(queryset): start_pk = 0 while True: qs = queryset.filter(pk__gt=start_pk)[:BATCH_SIZE] pks = list(qs.values_list(""pk"", flat=True)) if not pks: break yield pks start_pk = pks[-1] ","Slice a queryset into batches. Input queryset should be sorted be pk. ",12,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def queryset_in_batches(queryset): start_pk = 0 while True: qs = queryset.filter(pk__gt=start_pk)[:BATCH_SIZE] pks = list(qs.values_list(""pk"", flat=True)) if not pks: break yield pks start_pk = pks[-1] ``` ###Assistant : Slice a queryset into batches. Input queryset should be sorted be pk. " 866,"def execute(): frappe.reload_doc(""stock"", ""doctype"", ""purchase_receipt"") frappe.reload_doc(""stock"", ""doctype"", ""purchase_receipt_item"") frappe.reload_doc(""stock"", ""doctype"", ""delivery_note"") frappe.reload_doc(""stock"", ""doctype"", ""delivery_note_item"") frappe.reload_doc(""stock"", ""doctype"", ""stock_settings"") def update_from_return_docs(doctype): for return_doc in frappe.get_all( doctype, filters={""is_return"": 1, ""docstatus"": 1, ""return_against"": (""!="", """")} ): # Update original receipt/delivery document from return return_doc = frappe.get_cached_doc(doctype, return_doc.name) try: return_doc.update_prevdoc_status() except OverAllowanceError: frappe.db.rollback() continue return_against = frappe.get_doc(doctype, return_doc.return_against) return_against.update_billing_status() frappe.db.commit() # Set received qty in stock uom in PR, as returned qty is checked against it frappe.db.sql( ) for doctype in (""Purchase Receipt"", ""Delivery Note""): update_from_return_docs(doctype) "," update `tabPurchase Receipt Item` set received_stock_qty = received_qty * conversion_factor where docstatus = 1 ",14,81,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def execute(): frappe.reload_doc(""stock"", ""doctype"", ""purchase_receipt"") frappe.reload_doc(""stock"", ""doctype"", ""purchase_receipt_item"") frappe.reload_doc(""stock"", ""doctype"", ""delivery_note"") frappe.reload_doc(""stock"", ""doctype"", ""delivery_note_item"") frappe.reload_doc(""stock"", ""doctype"", ""stock_settings"") def update_from_return_docs(doctype): for return_doc in frappe.get_all( doctype, filters={""is_return"": 1, ""docstatus"": 1, ""return_against"": (""!="", """")} ): # Update original receipt/delivery document from return return_doc = frappe.get_cached_doc(doctype, return_doc.name) try: return_doc.update_prevdoc_status() except OverAllowanceError: frappe.db.rollback() continue return_against = frappe.get_doc(doctype, return_doc.return_against) return_against.update_billing_status() frappe.db.commit() # Set received qty in stock uom in PR, as returned qty is checked against it frappe.db.sql( ) for doctype in (""Purchase Receipt"", ""Delivery Note""): update_from_return_docs(doctype) ``` ###Assistant : update `tabPurchase Receipt Item` set received_stock_qty = received_qty * conversion_factor where docstatus = 1 " 867,"def call_dex(self, other_args): parser = argparse.ArgumentParser( prog=""dex"", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( ""-l"", ""--limit"", dest=""limit"", type=check_positive, help=""Number of records to display"", default=15, ) parser.add_argument( ""-s"", ""--sort"", dest=""sortby"", nargs=""+"", help=""Sort by given column. Default: Daily Volume [$]"", default=""Daily Volume [$]"", ) ns_parser = self.parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: dappradar_view.display_top_dexes( sortby="" "".join(ns_parser.sortby), limit=ns_parser.limit, export=ns_parser.export, ) ","Process dex command Shows top decentralized exchanges [Source: https://dappradar.com/] Accepts --sort {Name,Daily Users,Daily Volume [$]} to sort by column ",19,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call_dex(self, other_args): parser = argparse.ArgumentParser( prog=""dex"", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( ""-l"", ""--limit"", dest=""limit"", type=check_positive, help=""Number of records to display"", default=15, ) parser.add_argument( ""-s"", ""--sort"", dest=""sortby"", nargs=""+"", help=""Sort by given column. Default: Daily Volume [$]"", default=""Daily Volume [$]"", ) ns_parser = self.parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: dappradar_view.display_top_dexes( sortby="" "".join(ns_parser.sortby), limit=ns_parser.limit, export=ns_parser.export, ) ``` ###Assistant : Process dex command Shows top decentralized exchanges [Source: https://dappradar.com/] Accepts --sort {Name,Daily Users,Daily Volume [$]} to sort by column " 868,"def get_backend_for_dir(self, location): # type: (str) -> Optional[VersionControl] vcs_backends = {} for vcs_backend in self._registry.values(): repo_path = vcs_backend.get_repository_root(location) if not repo_path: continue logger.debug('Determine that %s uses VCS: %s', location, vcs_backend.name) vcs_backends[repo_path] = vcs_backend if not vcs_backends: return None # Choose the VCS in the inner-most directory. Since all repository # roots found here would be either `location` or one of its # parents, the longest path should have the most path components, # i.e. the backend representing the inner-most repository. inner_most_repo_path = max(vcs_backends, key=len) return vcs_backends[inner_most_repo_path] "," Return a VersionControl object if a repository of that type is found at the given directory. ",16,86,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_backend_for_dir(self, location): # type: (str) -> Optional[VersionControl] vcs_backends = {} for vcs_backend in self._registry.values(): repo_path = vcs_backend.get_repository_root(location) if not repo_path: continue logger.debug('Determine that %s uses VCS: %s', location, vcs_backend.name) vcs_backends[repo_path] = vcs_backend if not vcs_backends: return None # Choose the VCS in the inner-most directory. Since all repository # roots found here would be either `location` or one of its # parents, the longest path should have the most path components, # i.e. the backend representing the inner-most repository. inner_most_repo_path = max(vcs_backends, key=len) return vcs_backends[inner_most_repo_path] ``` ###Assistant : Return a VersionControl object if a repository of that type is found at the given directory. " 869,"def has_access(self, action_name, resource_name, user=None) -> bool: if not user: user = g.user if user.is_anonymous: user.roles = self.get_user_roles(user) has_access = self._has_access(user, action_name, resource_name) # FAB built-in view access method. Won't work for AllDag access. if self.is_dag_resource(resource_name): if action_name == permissions.ACTION_CAN_READ: has_access |= self.can_read_dag(resource_name, user) elif action_name == permissions.ACTION_CAN_EDIT: has_access |= self.can_edit_dag(resource_name, user) return has_access "," Verify whether a given user could perform a certain action (e.g can_read, can_write) on the given resource. :param action_name: action_name on resource (e.g can_read, can_edit). :param resource_name: name of view-menu or resource. :param user: user name :return: Whether user could perform certain action on the resource. :rtype bool ",48,54,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def has_access(self, action_name, resource_name, user=None) -> bool: if not user: user = g.user if user.is_anonymous: user.roles = self.get_user_roles(user) has_access = self._has_access(user, action_name, resource_name) # FAB built-in view access method. Won't work for AllDag access. if self.is_dag_resource(resource_name): if action_name == permissions.ACTION_CAN_READ: has_access |= self.can_read_dag(resource_name, user) elif action_name == permissions.ACTION_CAN_EDIT: has_access |= self.can_edit_dag(resource_name, user) return has_access ``` ###Assistant : Verify whether a given user could perform a certain action (e.g can_read, can_write) on the given resource. :param action_name: action_name on resource (e.g can_read, can_edit). :param resource_name: name of view-menu or resource. :param user: user name :return: Whether user could perform certain action on the resource. :rtype bool " 870,"def has_refs(self) -> bool: return len(self._session_report_run_counts) > 0 ","True if this Entry has references from any AppSession. If not, it can be removed from the cache. ",18,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def has_refs(self) -> bool: return len(self._session_report_run_counts) > 0 ``` ###Assistant : True if this Entry has references from any AppSession. If not, it can be removed from the cache. " 871,"def _reshape_to_record_metrics(self, batch, losses, num_target_tokens, indices): val_id_shape = batch.valid_indices.shape reshaped_losses = torch.zeros( val_id_shape, device=losses.device, dtype=losses.dtype ) reshaped_num_target_tokens = torch.zeros( val_id_shape, device=num_target_tokens.device, dtype=num_target_tokens.dtype ) reshaped_losses[indices] = losses reshaped_num_target_tokens[indices] = num_target_tokens return (reshaped_losses, reshaped_num_target_tokens) "," MultitaskAgent shuffles and combines examples from both classifier and the generator tasks in a single batch. We compute losses only for those exs in the batch resulting in losses and num_target_tokens vectors that are smaller than the. This method reshapes the losses and num_target_tokens vectors back to the batch size. This is needed to record local metrics as the metrics need to be of batch size. Args: batch: batch being processed in this iteration. losses: classifier or generator loss vector (shape: b' X 1), where b' <= b. num_target_tokens: number of tokens in each examples for classification or generation tasks. (shape: b' X 1), where b' <= b. indices: indices of (either classification or generation) exs for which the loss was computed. Returns: A tuple of reshaped losses and num_target_tokens, both of shape: b X 1. ",136,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _reshape_to_record_metrics(self, batch, losses, num_target_tokens, indices): val_id_shape = batch.valid_indices.shape reshaped_losses = torch.zeros( val_id_shape, device=losses.device, dtype=losses.dtype ) reshaped_num_target_tokens = torch.zeros( val_id_shape, device=num_target_tokens.device, dtype=num_target_tokens.dtype ) reshaped_losses[indices] = losses reshaped_num_target_tokens[indices] = num_target_tokens return (reshaped_losses, reshaped_num_target_tokens) ``` ###Assistant : MultitaskAgent shuffles and combines examples from both classifier and the generator tasks in a single batch. We compute losses only for those exs in the batch resulting in losses and num_target_tokens vectors that are smaller than the. This method reshapes the losses and num_target_tokens vectors back to the batch size. This is needed to record local metrics as the metrics need to be of batch size. Args: batch: batch being processed in this iteration. losses: classifier or generator loss vector (shape: b' X 1), where b' <= b. num_target_tokens: number of tokens in each examples for classification or generation tasks. (shape: b' X 1), where b' <= b. indices: indices of (either classification or generation) exs for which the loss was computed. Returns: A tuple of reshaped losses and num_target_tokens, both of shape: b X 1. " 872,"def add_support(self, location, type): if location not in self._node_labels: raise ValueError(""Support must be added on a known node"") else: self._supports[location] = type "," This method adds a pinned or roller support at a particular node Parameters ========== location: String or Symbol Label of the Node at which support is added. type: String Type of the support being provided at the node. Examples ======== >>> from sympy.physics.continuum_mechanics.truss import Truss >>> from sympy import symbols >>> t = Truss() >>> t.add_node('A', 0, 0) >>> t.add_node('B', 3, 0) >>> t.add_support('A', 'pinned') >>> t.supports {'A': 'pinned', 'B': 'none'} ",71,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add_support(self, location, type): if location not in self._node_labels: raise ValueError(""Support must be added on a known node"") else: self._supports[location] = type ``` ###Assistant : This method adds a pinned or roller support at a particular node Parameters ========== location: String or Symbol Label of the Node at which support is added. type: String Type of the support being provided at the node. Examples ======== >>> from sympy.physics.continuum_mechanics.truss import Truss >>> from sympy import symbols >>> t = Truss() >>> t.add_node('A', 0, 0) >>> t.add_node('B', 3, 0) >>> t.add_support('A', 'pinned') >>> t.supports {'A': 'pinned', 'B': 'none'} " 873,"def makeport(self): sock = socket.create_server(("""", 0), family=self.af, backlog=1) port = sock.getsockname()[1] # Get proper port host = self.sock.getsockname()[0] # Get proper host if self.af == socket.AF_INET: resp = self.sendport(host, port) else: resp = self.sendeprt(host, port) if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(self.timeout) return sock ",Create a new socket and send a PORT command for it.,11,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def makeport(self): sock = socket.create_server(("""", 0), family=self.af, backlog=1) port = sock.getsockname()[1] # Get proper port host = self.sock.getsockname()[0] # Get proper host if self.af == socket.AF_INET: resp = self.sendport(host, port) else: resp = self.sendeprt(host, port) if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(self.timeout) return sock ``` ###Assistant : Create a new socket and send a PORT command for it. " 874,"def test_all(gm_manager): _save_script(test_gm_script, 'test.user.js') gm_manager.load_scripts() assert (gm_manager.all_scripts()[0].name == ""qutebrowser test userscript"") @pytest.mark.parametrize(""url, expected_matches"", [ # included ('http://trolol.com/', 1), # neither included nor excluded ('http://aaaaaaaaaa.com/', 0), # excluded ('https://badhost.xxx/', 0), ])","Test that a script gets read from file, parsed and returned.",11,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_all(gm_manager): _save_script(test_gm_script, 'test.user.js') gm_manager.load_scripts() assert (gm_manager.all_scripts()[0].name == ""qutebrowser test userscript"") @pytest.mark.parametrize(""url, expected_matches"", [ # included ('http://trolol.com/', 1), # neither included nor excluded ('http://aaaaaaaaaa.com/', 0), # excluded ('https://badhost.xxx/', 0), ]) ``` ###Assistant : Test that a script gets read from file, parsed and returned. " 875,"def split_path_msys(path): if path.startswith(('/', '\\')) and not path.startswith(('//', '\\\\')): global msysroot if not msysroot: msysroot = subprocess.check_output(['cygpath', '-w', '/']).decode(sys.stdout.encoding or 'latin-1') msysroot = msysroot.strip() path = os.path.normpath(msysroot + os.sep + path) return split_path_win32(path) if sys.platform == 'cygwin': split_path = split_path_cygwin elif is_win32: if os.environ.get('MSYSTEM') and sys.executable.startswith('/'): split_path = split_path_msys else: split_path = split_path_win32 else: split_path = split_path_unix split_path.__doc__ = "," Splits a path by / or \\; do not confuse this function with with ``os.path.split`` :type path: string :param path: path to split :return: list of string ",27,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def split_path_msys(path): if path.startswith(('/', '\\')) and not path.startswith(('//', '\\\\')): global msysroot if not msysroot: msysroot = subprocess.check_output(['cygpath', '-w', '/']).decode(sys.stdout.encoding or 'latin-1') msysroot = msysroot.strip() path = os.path.normpath(msysroot + os.sep + path) return split_path_win32(path) if sys.platform == 'cygwin': split_path = split_path_cygwin elif is_win32: if os.environ.get('MSYSTEM') and sys.executable.startswith('/'): split_path = split_path_msys else: split_path = split_path_win32 else: split_path = split_path_unix split_path.__doc__ = ``` ###Assistant : Splits a path by / or \\; do not confuse this function with with ``os.path.split`` :type path: string :param path: path to split :return: list of string " 876,"def find_image_duplicates(image, user, permission_policy): instances = permission_policy.instances_user_has_permission_for(user, ""choose"") return instances.exclude(pk=image.pk).filter(file_hash=image.file_hash) "," Finds all the duplicates of a given image. To keep things simple, two images are considered to be duplicates if they have the same `file_hash` value. This function also ensures that the `user` can choose one of the duplicate images returned (if any). ",43,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_image_duplicates(image, user, permission_policy): instances = permission_policy.instances_user_has_permission_for(user, ""choose"") return instances.exclude(pk=image.pk).filter(file_hash=image.file_hash) ``` ###Assistant : Finds all the duplicates of a given image. To keep things simple, two images are considered to be duplicates if they have the same `file_hash` value. This function also ensures that the `user` can choose one of the duplicate images returned (if any). " 877,"def _trim_arity(func, maxargs=2): global _trim_arity_call_line if func in _single_arg_builtins: return lambda s, l, t: func(t) limit = 0 found_arity = False ",decorator to trim function calls to match the arity of the target,12,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _trim_arity(func, maxargs=2): global _trim_arity_call_line if func in _single_arg_builtins: return lambda s, l, t: func(t) limit = 0 found_arity = False ``` ###Assistant : decorator to trim function calls to match the arity of the target " 878,"def call_exmarkets(self, other_args): parser = argparse.ArgumentParser( prog=""exmarkets"", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( ""-e"", ""--exchange"", help=""Identifier of exchange e.g for Binance Exchange -> binance"", dest=""exchange"", default=""binance"", type=str, ) parser.add_argument( ""-l"", ""--limit"", dest=""limit"", type=check_positive, help=""display N number records"", default=10, ) parser.add_argument( ""-s"", ""--sortby"", dest=""sortby"", type=str, help=""Sort by given column. Default: reported_volume_24h_share"", default=""reported_volume_24h_share"", choices=coinpaprika_model.EXMARKETS_FILTERS, ) parser.add_argument( ""--descend"", action=""store_false"", help=""Flag to sort in descending order (lowest first)"", dest=""descend"", default=False, ) parser.add_argument( ""-u"", ""--urls"", dest=""urls"", action=""store_true"", help=, default=False, ) if other_args and ""-"" not in other_args[0][0]: other_args.insert(0, ""-e"") ns_parser = self.parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: coinpaprika_view.display_exchange_markets( exchange=ns_parser.exchange, limit=ns_parser.limit, export=ns_parser.export, sortby=ns_parser.sortby, ascend=not ns_parser.descend, links=ns_parser.urls, ) ","Process exmarkets commandGet all exchange markets found for given exchange You can display only N number of records with --limit parameter. You can sort data by pair, base_currency_name, quote_currency_name, market_url, category, reported_volume_24h_share, trust_score --sortby parameter and also with --descend flag to sort descending. You can use additional flag --urls to see urls for each market Displays: exchange_id, pair, base_currency_name, quote_currency_name, market_url, category, reported_volume_24h_share, trust_score,Flag to show urls. If you will use that flag you will see only: exchange, pair, trust_score, market_url columns",82,101,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call_exmarkets(self, other_args): parser = argparse.ArgumentParser( prog=""exmarkets"", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( ""-e"", ""--exchange"", help=""Identifier of exchange e.g for Binance Exchange -> binance"", dest=""exchange"", default=""binance"", type=str, ) parser.add_argument( ""-l"", ""--limit"", dest=""limit"", type=check_positive, help=""display N number records"", default=10, ) parser.add_argument( ""-s"", ""--sortby"", dest=""sortby"", type=str, help=""Sort by given column. Default: reported_volume_24h_share"", default=""reported_volume_24h_share"", choices=coinpaprika_model.EXMARKETS_FILTERS, ) parser.add_argument( ""--descend"", action=""store_false"", help=""Flag to sort in descending order (lowest first)"", dest=""descend"", default=False, ) parser.add_argument( ""-u"", ""--urls"", dest=""urls"", action=""store_true"", help=, default=False, ) if other_args and ""-"" not in other_args[0][0]: other_args.insert(0, ""-e"") ns_parser = self.parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: coinpaprika_view.display_exchange_markets( exchange=ns_parser.exchange, limit=ns_parser.limit, export=ns_parser.export, sortby=ns_parser.sortby, ascend=not ns_parser.descend, links=ns_parser.urls, ) ``` ###Assistant : Process exmarkets commandGet all exchange markets found for given exchange You can display only N number of records with --limit parameter. You can sort data by pair, base_currency_name, quote_currency_name, market_url, category, reported_volume_24h_share, trust_score --sortby parameter and also with --descend flag to sort descending. You can use additional flag --urls to see urls for each market Displays: exchange_id, pair, base_currency_name, quote_currency_name, market_url, category, reported_volume_24h_share, trust_score,Flag to show urls. If you will use that flag you will see only: exchange, pair, trust_score, market_url columns " 879,"async def test_unload_config_entry(hass, entry, lcn_connection): await hass.config_entries.async_unload(entry.entry_id) assert hass.states.get(""cover.cover_outputs"").state == STATE_UNAVAILABLE assert hass.states.get(""cover.cover_relays"").state == STATE_UNAVAILABLE ",Test the cover is removed when the config entry is unloaded.,11,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_unload_config_entry(hass, entry, lcn_connection): await hass.config_entries.async_unload(entry.entry_id) assert hass.states.get(""cover.cover_outputs"").state == STATE_UNAVAILABLE assert hass.states.get(""cover.cover_relays"").state == STATE_UNAVAILABLE ``` ###Assistant : Test the cover is removed when the config entry is unloaded. " 880,"def topk(self, k, axis=-1, split_every=None): from dask.array.reductions import topk return topk(self, k, axis=axis, split_every=split_every) ","The top k elements of an array. See :func:`dask.array.topk` for docstring. ",11,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def topk(self, k, axis=-1, split_every=None): from dask.array.reductions import topk return topk(self, k, axis=axis, split_every=split_every) ``` ###Assistant : The top k elements of an array. See :func:`dask.array.topk` for docstring. " 881,"def for_each_ternary(self, fn, selector=None, row=None, col=None) -> ""Figure"": for obj in self.select_ternaries(selector=selector, row=row, col=col): fn(obj) return self "," Apply a function to all ternary objects that satisfy the specified selection criteria Parameters ---------- fn: Function that inputs a single ternary object. selector: dict, function, or None (default None) Dict to use as selection criteria. ternary objects will be selected if they contain properties corresponding to all of the dictionary's keys, with values that exactly match the supplied values. If None (the default), all ternary objects are selected. If a function, it must be a function accepting a single argument and returning a boolean. The function will be called on each ternary and those for which the function returned True will be in the selection. row, col: int or None (default None) Subplot row and column index of ternary objects to select. To select ternary objects by row and column, the Figure must have been created using plotly.subplots.make_subplots. If None (the default), all ternary objects are selected. Returns ------- self Returns the Figure object that the method was called on ",161,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def for_each_ternary(self, fn, selector=None, row=None, col=None) -> ""Figure"": for obj in self.select_ternaries(selector=selector, row=row, col=col): fn(obj) return self ``` ###Assistant : Apply a function to all ternary objects that satisfy the specified selection criteria Parameters ---------- fn: Function that inputs a single ternary object. selector: dict, function, or None (default None) Dict to use as selection criteria. ternary objects will be selected if they contain properties corresponding to all of the dictionary's keys, with values that exactly match the supplied values. If None (the default), all ternary objects are selected. If a function, it must be a function accepting a single argument and returning a boolean. The function will be called on each ternary and those for which the function returned True will be in the selection. row, col: int or None (default None) Subplot row and column index of ternary objects to select. To select ternary objects by row and column, the Figure must have been created using plotly.subplots.make_subplots. If None (the default), all ternary objects are selected. Returns ------- self Returns the Figure object that the method was called on " 882,"def call(self, *args, **kwargs): warnings.warn( ""'call()' method is deprecated. "" + ""Use '__call__()' instead"", DeprecationWarning, ) return self.__call__(*args, **kwargs) ",Use ``__call__`` instead because this method is deprecated.,8,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call(self, *args, **kwargs): warnings.warn( ""'call()' method is deprecated. "" + ""Use '__call__()' instead"", DeprecationWarning, ) return self.__call__(*args, **kwargs) ``` ###Assistant : Use ``__call__`` instead because this method is deprecated. " 883,"def validate_per_replica_inputs(distribution_strategy, x): # Convert the inputs and targets into a list of PerReplica objects. per_replica_list = tf.nest.flatten(x) x_values_list = [] for x in per_replica_list: # At this point x should contain only tensors. x_values = distribution_strategy.unwrap(x) for value in x_values: if not tf.is_tensor(value): raise ValueError('Dataset input to the model should be tensors instead ' 'they are of type {}'.format(type(value))) if not tf.executing_eagerly(): # Validate that the shape and dtype of all the elements in x are the same. validate_all_tensor_shapes(x, x_values) validate_all_tensor_types(x, x_values) x_values_list.append(x_values[0]) return x_values_list ","Validates PerReplica dataset input list. Args: distribution_strategy: The current DistributionStrategy used to call `fit`, `evaluate` and `predict`. x: A list of PerReplica objects that represent the input or target values. Returns: List containing the first element of each of the PerReplica objects in the input list. Raises: ValueError: If any of the objects in the `per_replica_list` is not a tensor. ",60,86,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def validate_per_replica_inputs(distribution_strategy, x): # Convert the inputs and targets into a list of PerReplica objects. per_replica_list = tf.nest.flatten(x) x_values_list = [] for x in per_replica_list: # At this point x should contain only tensors. x_values = distribution_strategy.unwrap(x) for value in x_values: if not tf.is_tensor(value): raise ValueError('Dataset input to the model should be tensors instead ' 'they are of type {}'.format(type(value))) if not tf.executing_eagerly(): # Validate that the shape and dtype of all the elements in x are the same. validate_all_tensor_shapes(x, x_values) validate_all_tensor_types(x, x_values) x_values_list.append(x_values[0]) return x_values_list ``` ###Assistant : Validates PerReplica dataset input list. Args: distribution_strategy: The current DistributionStrategy used to call `fit`, `evaluate` and `predict`. x: A list of PerReplica objects that represent the input or target values. Returns: List containing the first element of each of the PerReplica objects in the input list. Raises: ValueError: If any of the objects in the `per_replica_list` is not a tensor. " 884,"def get_mac_addr(self): if ( self.bulb.host_firmware_version and AwesomeVersion(self.bulb.host_firmware_version) >= FIX_MAC_FW ): octets = [int(octet, 16) for octet in self.mac_addr.split("":"")] octets[5] = (octets[5] + 1) % 256 return "":"".join(f""{octet:02x}"" for octet in octets) return self.mac_addr ",Increment the last byte of the mac address by one for FW>3.70.,12,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_mac_addr(self): if ( self.bulb.host_firmware_version and AwesomeVersion(self.bulb.host_firmware_version) >= FIX_MAC_FW ): octets = [int(octet, 16) for octet in self.mac_addr.split("":"")] octets[5] = (octets[5] + 1) % 256 return "":"".join(f""{octet:02x}"" for octet in octets) return self.mac_addr ``` ###Assistant : Increment the last byte of the mac address by one for FW>3.70. " 885,"def test_positive_integer_or_none_4(): assert positive_integer_or_none('none') is None assert positive_integer_or_none('None') is None ",Assert that the TPOT CLI interface's positive_integer_or_none parsing return None when value is string 'None' or 'none'.,17,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_positive_integer_or_none_4(): assert positive_integer_or_none('none') is None assert positive_integer_or_none('None') is None ``` ###Assistant : Assert that the TPOT CLI interface's positive_integer_or_none parsing return None when value is string 'None' or 'none'. " 886,"def get_local_part(value): local_part = LocalPart() leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) if not value: raise errors.HeaderParseError( ""expected local-part but found '{}'"".format(value)) try: token, value = get_dot_atom(value) except errors.HeaderParseError: try: token, value = get_word(value) except errors.HeaderParseError: if value[0] != '\\' and value[0] in PHRASE_ENDS: raise token = TokenList() if leader is not None: token[:0] = [leader] local_part.append(token) if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS): obs_local_part, value = get_obs_local_part(str(local_part) + value) if obs_local_part.token_type == 'invalid-obs-local-part': local_part.defects.append(errors.InvalidHeaderDefect( ""local-part is not dot-atom, quoted-string, or obs-local-part"")) else: local_part.defects.append(errors.ObsoleteHeaderDefect( ""local-part is not a dot-atom (contains CFWS)"")) local_part[0] = obs_local_part try: local_part.value.encode('ascii') except UnicodeEncodeError: local_part.defects.append(errors.NonASCIILocalPartDefect( ""local-part contains non-ASCII characters)"")) return local_part, value "," local-part = dot-atom / quoted-string / obs-local-part ",7,112,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_local_part(value): local_part = LocalPart() leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) if not value: raise errors.HeaderParseError( ""expected local-part but found '{}'"".format(value)) try: token, value = get_dot_atom(value) except errors.HeaderParseError: try: token, value = get_word(value) except errors.HeaderParseError: if value[0] != '\\' and value[0] in PHRASE_ENDS: raise token = TokenList() if leader is not None: token[:0] = [leader] local_part.append(token) if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS): obs_local_part, value = get_obs_local_part(str(local_part) + value) if obs_local_part.token_type == 'invalid-obs-local-part': local_part.defects.append(errors.InvalidHeaderDefect( ""local-part is not dot-atom, quoted-string, or obs-local-part"")) else: local_part.defects.append(errors.ObsoleteHeaderDefect( ""local-part is not a dot-atom (contains CFWS)"")) local_part[0] = obs_local_part try: local_part.value.encode('ascii') except UnicodeEncodeError: local_part.defects.append(errors.NonASCIILocalPartDefect( ""local-part contains non-ASCII characters)"")) return local_part, value ``` ###Assistant : local-part = dot-atom / quoted-string / obs-local-part " 887,"def prepare_video_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False): assert not (numpify and torchify), ""You cannot specify both numpy and PyTorch tensors at the same time"" video_inputs = [] for i in range(feature_extract_tester.batch_size): if equal_resolution: width = height = feature_extract_tester.max_resolution else: width, height = np.random.choice( np.arange(feature_extract_tester.min_resolution, feature_extract_tester.max_resolution), 2 ) video = prepare_video( feature_extract_tester=feature_extract_tester, width=width, height=height, numpify=numpify, torchify=torchify, ) video_inputs.append(video) return video_inputs ","This function prepares a batch of videos: a list of list of PIL images, or a list of list of numpy arrays if one specifies numpify=True, or a list of list of PyTorch tensors if one specifies torchify=True. One can specify whether the videos are of the same resolution or not. ",51,57,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def prepare_video_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False): assert not (numpify and torchify), ""You cannot specify both numpy and PyTorch tensors at the same time"" video_inputs = [] for i in range(feature_extract_tester.batch_size): if equal_resolution: width = height = feature_extract_tester.max_resolution else: width, height = np.random.choice( np.arange(feature_extract_tester.min_resolution, feature_extract_tester.max_resolution), 2 ) video = prepare_video( feature_extract_tester=feature_extract_tester, width=width, height=height, numpify=numpify, torchify=torchify, ) video_inputs.append(video) return video_inputs ``` ###Assistant : This function prepares a batch of videos: a list of list of PIL images, or a list of list of numpy arrays if one specifies numpify=True, or a list of list of PyTorch tensors if one specifies torchify=True. One can specify whether the videos are of the same resolution or not. " 888,"def _verify_no_matching_http_header(self, ssl_vhost, header_substring): header_path = self.parser.find_dir(""Header"", None, start=ssl_vhost.path) if header_path: # ""Existing Header directive for virtualhost"" pat = '(?:[ ""]|^)(%s)(?:[ ""]|$)' % (header_substring.lower()) for match in header_path: if re.search(pat, self.parser.aug.get(match).lower()): raise errors.PluginEnhancementAlreadyPresent( ""Existing %s header"" % header_substring) ","Checks to see if there is an existing Header directive that contains the string header_substring. :param ssl_vhost: vhost to check :type vhost: :class:`~certbot_apache._internal.obj.VirtualHost` :param header_substring: string that uniquely identifies a header. e.g: Strict-Transport-Security, Upgrade-Insecure-Requests. :type str :returns: boolean :rtype: (bool) :raises errors.PluginEnhancementAlreadyPresent When header header_substring exists ",46,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _verify_no_matching_http_header(self, ssl_vhost, header_substring): header_path = self.parser.find_dir(""Header"", None, start=ssl_vhost.path) if header_path: # ""Existing Header directive for virtualhost"" pat = '(?:[ ""]|^)(%s)(?:[ ""]|$)' % (header_substring.lower()) for match in header_path: if re.search(pat, self.parser.aug.get(match).lower()): raise errors.PluginEnhancementAlreadyPresent( ""Existing %s header"" % header_substring) ``` ###Assistant : Checks to see if there is an existing Header directive that contains the string header_substring. :param ssl_vhost: vhost to check :type vhost: :class:`~certbot_apache._internal.obj.VirtualHost` :param header_substring: string that uniquely identifies a header. e.g: Strict-Transport-Security, Upgrade-Insecure-Requests. :type str :returns: boolean :rtype: (bool) :raises errors.PluginEnhancementAlreadyPresent When header header_substring exists " 889,"def indices(dimensions, dtype=int32, sparse=False): dimensions = tuple( core.concrete_or_error(operator.index, d, ""dimensions argument of jnp.indices"") for d in dimensions) N = len(dimensions) output = [] s = dimensions for i, dim in enumerate(dimensions): idx = lax.iota(dtype, dim) if sparse: s = (1,)*i + (dim,) + (1,)*(N - i - 1) output.append(lax.broadcast_in_dim(idx, s, (i,))) if sparse: return tuple(output) return stack(output, 0) if output else array([], dtype=dtype) _TOTAL_REPEAT_LENGTH_DOC = @_wraps(np.repeat, lax_description=_TOTAL_REPEAT_LENGTH_DOC)","\ Jax adds the optional `total_repeat_length` parameter which specifies the total number of repeat, and defaults to sum(repeats). It must be specified for repeat to be compilable. If `sum(repeats)` is larger than the specified `total_repeat_length` the remaining values will be discarded. In the case of `sum(repeats)` being smaller than the specified target length, the final value will be repeated. ",59,67,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def indices(dimensions, dtype=int32, sparse=False): dimensions = tuple( core.concrete_or_error(operator.index, d, ""dimensions argument of jnp.indices"") for d in dimensions) N = len(dimensions) output = [] s = dimensions for i, dim in enumerate(dimensions): idx = lax.iota(dtype, dim) if sparse: s = (1,)*i + (dim,) + (1,)*(N - i - 1) output.append(lax.broadcast_in_dim(idx, s, (i,))) if sparse: return tuple(output) return stack(output, 0) if output else array([], dtype=dtype) _TOTAL_REPEAT_LENGTH_DOC = @_wraps(np.repeat, lax_description=_TOTAL_REPEAT_LENGTH_DOC) ``` ###Assistant : \ Jax adds the optional `total_repeat_length` parameter which specifies the total number of repeat, and defaults to sum(repeats). It must be specified for repeat to be compilable. If `sum(repeats)` is larger than the specified `total_repeat_length` the remaining values will be discarded. In the case of `sum(repeats)` being smaller than the specified target length, the final value will be repeated. " 890,"def test_open_connection(tctx): assert Playbook(tcp.TCPLayer(tctx, True)) << OpenConnection(tctx.server) tctx.server.timestamp_start = 1624544785 assert Playbook(tcp.TCPLayer(tctx, True)) << None "," If there is no server connection yet, establish one, because the server may send data first. ",16,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_open_connection(tctx): assert Playbook(tcp.TCPLayer(tctx, True)) << OpenConnection(tctx.server) tctx.server.timestamp_start = 1624544785 assert Playbook(tcp.TCPLayer(tctx, True)) << None ``` ###Assistant : If there is no server connection yet, establish one, because the server may send data first. " 891,"def get_connected_endpoints(self, obj): endpoints = obj.connected_endpoints if endpoints: serializer = get_serializer_for_model(endpoints[0], prefix='Nested') context = {'request': self.context['request']} return serializer(endpoints, many=True, context=context).data "," Return the appropriate serializer for the type of connected object. ",10,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_connected_endpoints(self, obj): endpoints = obj.connected_endpoints if endpoints: serializer = get_serializer_for_model(endpoints[0], prefix='Nested') context = {'request': self.context['request']} return serializer(endpoints, many=True, context=context).data ``` ###Assistant : Return the appropriate serializer for the type of connected object. " 892,"def test_basic(self): trees = [(nx.full_rary_tree(2, 2**2 - 1), 0) for i in range(2)] actual = nx.join(trees) expected = nx.full_rary_tree(2, 2**3 - 1) assert nx.is_isomorphic(actual, expected) ",Tests for joining multiple subtrees at a root node.,9,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_basic(self): trees = [(nx.full_rary_tree(2, 2**2 - 1), 0) for i in range(2)] actual = nx.join(trees) expected = nx.full_rary_tree(2, 2**3 - 1) assert nx.is_isomorphic(actual, expected) ``` ###Assistant : Tests for joining multiple subtrees at a root node. " 893,"def test_result_list_html(self): new_parent = Parent.objects.create(name=""parent"") new_child = Child.objects.create(name=""name"", parent=new_parent) request = self.factory.get(""/child/"") request.user = self.superuser m = ChildAdmin(Child, custom_site) cl = m.get_changelist_instance(request) cl.formset = None template = Template( ""{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}"" ) context = Context({""cl"": cl, ""opts"": Child._meta}) table_output = template.render(context) link = reverse(""admin:admin_changelist_child_change"", args=(new_child.id,)) row_html = build_tbody_html( new_child.id, link, '%s' % new_parent ) self.assertNotEqual( table_output.find(row_html), -1, ""Failed to find expected row element: %s"" % table_output, ) "," Inclusion tag result_list generates a table when with default ModelAdmin settings. ",11,77,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_result_list_html(self): new_parent = Parent.objects.create(name=""parent"") new_child = Child.objects.create(name=""name"", parent=new_parent) request = self.factory.get(""/child/"") request.user = self.superuser m = ChildAdmin(Child, custom_site) cl = m.get_changelist_instance(request) cl.formset = None template = Template( ""{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}"" ) context = Context({""cl"": cl, ""opts"": Child._meta}) table_output = template.render(context) link = reverse(""admin:admin_changelist_child_change"", args=(new_child.id,)) row_html = build_tbody_html( new_child.id, link, '%s' % new_parent ) self.assertNotEqual( table_output.find(row_html), -1, ""Failed to find expected row element: %s"" % table_output, ) ``` ###Assistant : Inclusion tag result_list generates a table when with default ModelAdmin settings. " 894,"def test_delete_uploaded_image(self): # Send request response = self.client.post( reverse( ""wagtailimages:delete_upload_multiple"", args=(self.uploaded_image.id,) ) ) # Check response self.assertEqual(response.status_code, 200) self.assertEqual(response[""Content-Type""], ""application/json"") # Make sure the image is deleted self.assertFalse( UploadedImage.objects.filter(id=self.uploaded_image.id).exists() ) # Check JSON response_json = json.loads(response.content.decode()) self.assertTrue(response_json[""success""]) "," This tests that a POST request to the delete view deletes the UploadedImage ",13,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_delete_uploaded_image(self): # Send request response = self.client.post( reverse( ""wagtailimages:delete_upload_multiple"", args=(self.uploaded_image.id,) ) ) # Check response self.assertEqual(response.status_code, 200) self.assertEqual(response[""Content-Type""], ""application/json"") # Make sure the image is deleted self.assertFalse( UploadedImage.objects.filter(id=self.uploaded_image.id).exists() ) # Check JSON response_json = json.loads(response.content.decode()) self.assertTrue(response_json[""success""]) ``` ###Assistant : This tests that a POST request to the delete view deletes the UploadedImage " 895,"def matrix(self) -> np.ndarray: if not np.any(self._matrices[self._centering]): matrix = self._matrices[""legacy""].copy() matrix[:, 2] -= self.pose.offset[self._centering] self._matrices[self._centering] = matrix logger.trace(""original matrix: %s, new matrix: %s"", # type: ignore self._matrices[""legacy""], matrix) return self._matrices[self._centering] "," :class:`numpy.ndarray`: The 3x2 transformation matrix for extracting and aligning the core face area out of the original frame, with no padding or sizing applied. The returned matrix is offset for the given :attr:`centering`. ",33,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def matrix(self) -> np.ndarray: if not np.any(self._matrices[self._centering]): matrix = self._matrices[""legacy""].copy() matrix[:, 2] -= self.pose.offset[self._centering] self._matrices[self._centering] = matrix logger.trace(""original matrix: %s, new matrix: %s"", # type: ignore self._matrices[""legacy""], matrix) return self._matrices[self._centering] ``` ###Assistant : :class:`numpy.ndarray`: The 3x2 transformation matrix for extracting and aligning the core face area out of the original frame, with no padding or sizing applied. The returned matrix is offset for the given :attr:`centering`. " 896,"def read_docstub(filename): in_documentation = False capturing = False indent_detection = '' doc_stub = [] with open(filename, 'r') as t_module_data: for line in t_module_data: if in_documentation: # start capturing the stub until indentation returns if capturing and line.startswith(indent_detection): doc_stub.append(line) elif capturing and not line.startswith(indent_detection): break elif line.lstrip().startswith('short_description:'): capturing = True # Detect that the short_description continues on the next line if it's indented more # than short_description itself. indent_detection = ' ' * (len(line) - len(line.lstrip()) + 1) doc_stub.append(line) elif line.startswith('DOCUMENTATION') and ('=' in line or ':' in line): in_documentation = True short_description = r''.join(doc_stub).strip().rstrip('.') data = AnsibleLoader(short_description, file_name=filename).get_single_data() return data "," Quickly find short_description using string methods instead of node parsing. This does not return a full set of documentation strings and is intended for operations like ansible-doc -l. ",28,100,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def read_docstub(filename): in_documentation = False capturing = False indent_detection = '' doc_stub = [] with open(filename, 'r') as t_module_data: for line in t_module_data: if in_documentation: # start capturing the stub until indentation returns if capturing and line.startswith(indent_detection): doc_stub.append(line) elif capturing and not line.startswith(indent_detection): break elif line.lstrip().startswith('short_description:'): capturing = True # Detect that the short_description continues on the next line if it's indented more # than short_description itself. indent_detection = ' ' * (len(line) - len(line.lstrip()) + 1) doc_stub.append(line) elif line.startswith('DOCUMENTATION') and ('=' in line or ':' in line): in_documentation = True short_description = r''.join(doc_stub).strip().rstrip('.') data = AnsibleLoader(short_description, file_name=filename).get_single_data() return data ``` ###Assistant : Quickly find short_description using string methods instead of node parsing. This does not return a full set of documentation strings and is intended for operations like ansible-doc -l. " 897,"def test_02_train_predictor(self): query = f response = self.handler.native_query(query) self.assertTrue(response.type == RESPONSE_TYPE.OK) # def test_03_retrain_predictor(self): # query = f""RETRAIN {self.test_model_name_1}"" # response = self.handler.native_query(query) # self.assertTrue(response.type == RESPONSE_TYPE.OK) "," CREATE PREDICTOR {self.test_model_name_1} FROM {PG_HANDLER_NAME} (SELECT * FROM demo_data.home_rentals limit 50) PREDICT rental_price ",13,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_02_train_predictor(self): query = f response = self.handler.native_query(query) self.assertTrue(response.type == RESPONSE_TYPE.OK) # def test_03_retrain_predictor(self): # query = f""RETRAIN {self.test_model_name_1}"" # response = self.handler.native_query(query) # self.assertTrue(response.type == RESPONSE_TYPE.OK) ``` ###Assistant : CREATE PREDICTOR {self.test_model_name_1} FROM {PG_HANDLER_NAME} (SELECT * FROM demo_data.home_rentals limit 50) PREDICT rental_price " 898,"def get_dependencies(dsk, key=None, task=no_default, as_list=False): if key is not None: arg = dsk[key] elif task is not no_default: arg = task else: raise ValueError(""Provide either key or task"") return keys_in_tasks(dsk, [arg], as_list=as_list) ","Get the immediate tasks on which this task depends Examples -------- >>> inc = lambda x: x + 1 >>> add = lambda x, y: x + y >>> dsk = {'x': 1, ... 'y': (inc, 'x'), ... 'z': (add, 'x', 'y'), ... 'w': (inc, 'z'), ... 'a': (add, (inc, 'x'), 1)} >>> get_dependencies(dsk, 'x') set() >>> get_dependencies(dsk, 'y') {'x'} >>> get_dependencies(dsk, 'z') # doctest: +SKIP {'x', 'y'} >>> get_dependencies(dsk, 'w') # Only direct dependencies {'z'} >>> get_dependencies(dsk, 'a') # Ignore non-keys {'x'} >>> get_dependencies(dsk, task=(inc, 'x')) # provide tasks directly {'x'} ",92,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_dependencies(dsk, key=None, task=no_default, as_list=False): if key is not None: arg = dsk[key] elif task is not no_default: arg = task else: raise ValueError(""Provide either key or task"") return keys_in_tasks(dsk, [arg], as_list=as_list) ``` ###Assistant : Get the immediate tasks on which this task depends Examples -------- >>> inc = lambda x: x + 1 >>> add = lambda x, y: x + y >>> dsk = {'x': 1, ... 'y': (inc, 'x'), ... 'z': (add, 'x', 'y'), ... 'w': (inc, 'z'), ... 'a': (add, (inc, 'x'), 1)} >>> get_dependencies(dsk, 'x') set() >>> get_dependencies(dsk, 'y') {'x'} >>> get_dependencies(dsk, 'z') # doctest: +SKIP {'x', 'y'} >>> get_dependencies(dsk, 'w') # Only direct dependencies {'z'} >>> get_dependencies(dsk, 'a') # Ignore non-keys {'x'} >>> get_dependencies(dsk, task=(inc, 'x')) # provide tasks directly {'x'} " 899,"def test_dict_checkpoint_fs(self): checkpoint = self._prepare_dict_checkpoint() # Convert into fs checkpoint path = checkpoint.to_directory() self.assertIsInstance(path, str) # Create from path checkpoint = Checkpoint.from_directory(path) self.assertTrue(checkpoint._local_path) self._assert_dict_checkpoint(checkpoint) ",Test conversion from dict to FS checkpoint and back.,9,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_dict_checkpoint_fs(self): checkpoint = self._prepare_dict_checkpoint() # Convert into fs checkpoint path = checkpoint.to_directory() self.assertIsInstance(path, str) # Create from path checkpoint = Checkpoint.from_directory(path) self.assertTrue(checkpoint._local_path) self._assert_dict_checkpoint(checkpoint) ``` ###Assistant : Test conversion from dict to FS checkpoint and back. " 900,"def box2corners(box): B = box.shape[0] x, y, w, h, alpha = paddle.split(box, 5, axis=-1) x4 = paddle.to_tensor( [0.5, 0.5, -0.5, -0.5], dtype=paddle.float32).reshape( (1, 1, 4)) # (1,1,4) x4 = x4 * w # (B, N, 4) y4 = paddle.to_tensor( [-0.5, 0.5, 0.5, -0.5], dtype=paddle.float32).reshape((1, 1, 4)) y4 = y4 * h # (B, N, 4) corners = paddle.stack([x4, y4], axis=-1) # (B, N, 4, 2) sin = paddle.sin(alpha) cos = paddle.cos(alpha) row1 = paddle.concat([cos, sin], axis=-1) row2 = paddle.concat([-sin, cos], axis=-1) # (B, N, 2) rot_T = paddle.stack([row1, row2], axis=-2) # (B, N, 2, 2) rotated = paddle.bmm(corners.reshape([-1, 4, 2]), rot_T.reshape([-1, 2, 2])) rotated = rotated.reshape([B, -1, 4, 2]) # (B*N, 4, 2) -> (B, N, 4, 2) rotated[..., 0] += x rotated[..., 1] += y return rotated ","convert box coordinate to corners Args: box (Tensor): (B, N, 5) with (x, y, w, h, alpha) angle is in [0, 90) Returns: corners (Tensor): (B, N, 4, 2) with (x1, y1, x2, y2, x3, y3, x4, y4) ",38,128,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def box2corners(box): B = box.shape[0] x, y, w, h, alpha = paddle.split(box, 5, axis=-1) x4 = paddle.to_tensor( [0.5, 0.5, -0.5, -0.5], dtype=paddle.float32).reshape( (1, 1, 4)) # (1,1,4) x4 = x4 * w # (B, N, 4) y4 = paddle.to_tensor( [-0.5, 0.5, 0.5, -0.5], dtype=paddle.float32).reshape((1, 1, 4)) y4 = y4 * h # (B, N, 4) corners = paddle.stack([x4, y4], axis=-1) # (B, N, 4, 2) sin = paddle.sin(alpha) cos = paddle.cos(alpha) row1 = paddle.concat([cos, sin], axis=-1) row2 = paddle.concat([-sin, cos], axis=-1) # (B, N, 2) rot_T = paddle.stack([row1, row2], axis=-2) # (B, N, 2, 2) rotated = paddle.bmm(corners.reshape([-1, 4, 2]), rot_T.reshape([-1, 2, 2])) rotated = rotated.reshape([B, -1, 4, 2]) # (B*N, 4, 2) -> (B, N, 4, 2) rotated[..., 0] += x rotated[..., 1] += y return rotated ``` ###Assistant : convert box coordinate to corners Args: box (Tensor): (B, N, 5) with (x, y, w, h, alpha) angle is in [0, 90) Returns: corners (Tensor): (B, N, 4, 2) with (x1, y1, x2, y2, x3, y3, x4, y4) " 901,"def get_crash_rate_alert_metrics_aggregation_value(self, subscription_update): rows = subscription_update[""values""][""data""] if BaseMetricsEntitySubscription.is_crash_rate_format_v2(rows): version = ""v2"" result = self._get_crash_rate_alert_metrics_aggregation_value_v2(subscription_update) else: version = ""v1"" result = self._get_crash_rate_alert_metrics_aggregation_value_v1(subscription_update) metrics.incr( ""incidents.alert_rules.get_crash_rate_alert_metrics_aggregation_value"", tags={""format"": version}, sample_rate=1.0, ) return result ","Handle both update formats. Once all subscriptions have been updated to v2, we can remove v1 and replace this function with current v2. ",23,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_crash_rate_alert_metrics_aggregation_value(self, subscription_update): rows = subscription_update[""values""][""data""] if BaseMetricsEntitySubscription.is_crash_rate_format_v2(rows): version = ""v2"" result = self._get_crash_rate_alert_metrics_aggregation_value_v2(subscription_update) else: version = ""v1"" result = self._get_crash_rate_alert_metrics_aggregation_value_v1(subscription_update) metrics.incr( ""incidents.alert_rules.get_crash_rate_alert_metrics_aggregation_value"", tags={""format"": version}, sample_rate=1.0, ) return result ``` ###Assistant : Handle both update formats. Once all subscriptions have been updated to v2, we can remove v1 and replace this function with current v2. " 902,"def strict_promotion_if_dtypes_match(dtypes): if all(dtype == dtypes[0] for dtype in dtypes): return jax.numpy_dtype_promotion('strict') return jax.numpy_dtype_promotion('standard') "," Context manager to enable strict promotion if all dtypes match, and enable standard dtype promotion otherwise. ",16,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def strict_promotion_if_dtypes_match(dtypes): if all(dtype == dtypes[0] for dtype in dtypes): return jax.numpy_dtype_promotion('strict') return jax.numpy_dtype_promotion('standard') ``` ###Assistant : Context manager to enable strict promotion if all dtypes match, and enable standard dtype promotion otherwise. " 903,"def test_stroptions_deprecated_subset(): with pytest.raises(ValueError, match=""deprecated options must be a subset""): StrOptions({""a"", ""b"", ""c""}, deprecated={""a"", ""d""}) ",Check that the deprecated parameter must be a subset of options.,11,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_stroptions_deprecated_subset(): with pytest.raises(ValueError, match=""deprecated options must be a subset""): StrOptions({""a"", ""b"", ""c""}, deprecated={""a"", ""d""}) ``` ###Assistant : Check that the deprecated parameter must be a subset of options. " 904,"def _validate_target_and_loss(self, y, loss): # `self.loss` references the loss added via `compile` call. If users have # provided such, the target must be provided; otherwise it's a user error. # Note that `self.loss` does not include losses added via `add_loss`, and it # is a valid use when such loss from `add_loss` exists and target does not. if self.loss and y is None: raise ValueError( ""Target data is missing. Your model was compiled with "" f""loss={self.loss}, "" ""and therefore expects target data to be provided in `fit()`."" ) # For training, there must be compiled loss or regularization loss to exist # in order to apply the gradients. If one is not found, it means no loss # was supplied via `compile` or `add_loss`. elif loss is None: raise ValueError( ""No loss found. You may have forgotten to provide a `loss` argument "" ""in the `compile()` method."" ) ","Raises error if target or loss is not found. This method verifies that the target and loss are properly populated when applicable, or raises errors. Args: y: the target for training. loss: the total loss tensor including loss added via `compile` and `add_loss`. ",43,148,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _validate_target_and_loss(self, y, loss): # `self.loss` references the loss added via `compile` call. If users have # provided such, the target must be provided; otherwise it's a user error. # Note that `self.loss` does not include losses added via `add_loss`, and it # is a valid use when such loss from `add_loss` exists and target does not. if self.loss and y is None: raise ValueError( ""Target data is missing. Your model was compiled with "" f""loss={self.loss}, "" ""and therefore expects target data to be provided in `fit()`."" ) # For training, there must be compiled loss or regularization loss to exist # in order to apply the gradients. If one is not found, it means no loss # was supplied via `compile` or `add_loss`. elif loss is None: raise ValueError( ""No loss found. You may have forgotten to provide a `loss` argument "" ""in the `compile()` method."" ) ``` ###Assistant : Raises error if target or loss is not found. This method verifies that the target and loss are properly populated when applicable, or raises errors. Args: y: the target for training. loss: the total loss tensor including loss added via `compile` and `add_loss`. " 905,"def _validate_datetimelike_monotonic(self): # GH 46061 if self._on.hasnans: self._raise_monotonic_error(""values must not have NaT"") for group_indices in self._grouper.indices.values(): group_on = self._on.take(group_indices) if not ( group_on.is_monotonic_increasing or group_on.is_monotonic_decreasing ): on = ""index"" if self.on is None else self.on raise ValueError( f""Each group within {on} must be monotonic. "" f""Sort the values in {on} first."" ) "," Validate that each group in self._on is monotonic ",8,52,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _validate_datetimelike_monotonic(self): # GH 46061 if self._on.hasnans: self._raise_monotonic_error(""values must not have NaT"") for group_indices in self._grouper.indices.values(): group_on = self._on.take(group_indices) if not ( group_on.is_monotonic_increasing or group_on.is_monotonic_decreasing ): on = ""index"" if self.on is None else self.on raise ValueError( f""Each group within {on} must be monotonic. "" f""Sort the values in {on} first."" ) ``` ###Assistant : Validate that each group in self._on is monotonic " 906,"def framework_info(filename): is_framework = STRICT_FRAMEWORK_RE.match(filename) if not is_framework: return None return is_framework.groupdict() "," A framework name can take one of the following four forms: Location/Name.framework/Versions/SomeVersion/Name_Suffix Location/Name.framework/Versions/SomeVersion/Name Location/Name.framework/Name_Suffix Location/Name.framework/Name returns None if not found, or a mapping equivalent to: dict( location='Location', name='Name.framework/Versions/SomeVersion/Name_Suffix', shortname='Name', version='SomeVersion', suffix='Suffix', ) Note that SomeVersion and Suffix are optional and may be None if not present ",46,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def framework_info(filename): is_framework = STRICT_FRAMEWORK_RE.match(filename) if not is_framework: return None return is_framework.groupdict() ``` ###Assistant : A framework name can take one of the following four forms: Location/Name.framework/Versions/SomeVersion/Name_Suffix Location/Name.framework/Versions/SomeVersion/Name Location/Name.framework/Name_Suffix Location/Name.framework/Name returns None if not found, or a mapping equivalent to: dict( location='Location', name='Name.framework/Versions/SomeVersion/Name_Suffix', shortname='Name', version='SomeVersion', suffix='Suffix', ) Note that SomeVersion and Suffix are optional and may be None if not present " 907,"def contained_in(filename, directory): filename = os.path.normcase(os.path.abspath(filename)) directory = os.path.normcase(os.path.abspath(directory)) return os.path.commonprefix([filename, directory]) == directory ",Test if a file is located within the given directory.,10,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def contained_in(filename, directory): filename = os.path.normcase(os.path.abspath(filename)) directory = os.path.normcase(os.path.abspath(directory)) return os.path.commonprefix([filename, directory]) == directory ``` ###Assistant : Test if a file is located within the given directory. " 908,"def glu(x, axis=-1): a, b = tf.split(x, 2, axis=axis) return a * tf.math.sigmoid(b) if version.parse(tf.version.VERSION) >= version.parse(""2.4""): "," Gated Linear Unit. Implementation as defined in the original paper (see https://arxiv.org/abs/1612.08083), where the input `x` is split in two halves across a dimension (`axis`), A and B, returning A * sigmoid(B). Args: `x`: float Tensor to perform activation `axis`: dimension across which `x` be split in half Returns: `x` with the GLU activation applied (with its size halved across the dimension `axis`). ",63,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def glu(x, axis=-1): a, b = tf.split(x, 2, axis=axis) return a * tf.math.sigmoid(b) if version.parse(tf.version.VERSION) >= version.parse(""2.4""): ``` ###Assistant : Gated Linear Unit. Implementation as defined in the original paper (see https://arxiv.org/abs/1612.08083), where the input `x` is split in two halves across a dimension (`axis`), A and B, returning A * sigmoid(B). Args: `x`: float Tensor to perform activation `axis`: dimension across which `x` be split in half Returns: `x` with the GLU activation applied (with its size halved across the dimension `axis`). " 909,"def matplot(self, plot, opts=None, env=None, win=None): opts = {} if opts is None else opts _title2str(opts) _assert_opts(opts) # write plot to SVG buffer: buffer = StringIO() plot.savefig(buffer, format=""svg"") buffer.seek(0) svg = buffer.read() buffer.close() if opts.get(""resizable"", False): if not BS4_AVAILABLE: raise ImportError(""No module named 'bs4'"") else: try: soup = bs4.BeautifulSoup(svg, ""xml"") except bs4.FeatureNotFound as e: import six six.raise_from(ImportError(""No module named 'lxml'""), e) height = soup.svg.attrs.pop(""height"", None) width = soup.svg.attrs.pop(""width"", None) svg = str(soup) else: height = None width = None # show SVG: if ""height"" not in opts: height = height or re.search(r'height\=""([0-9\.]*)pt""', svg) if height is not None: if not isstr(height): height = height.group(1) height = height.replace(""pt"", ""00"") opts[""height""] = 1.4 * int(math.ceil(float(height))) if ""width"" not in opts: width = width or re.search(r'width\=""([0-9\.]*)pt""', svg) if width is not None: if not isstr(width): width = width.group(1) width = width.replace(""pt"", ""00"") opts[""width""] = 1.35 * int(math.ceil(float(width))) return self.svg(svgstr=svg, opts=opts, env=env, win=win) "," This function draws a Matplotlib `plot`. The function supports one plot-specific option: `resizable`. When set to `True` the plot is resized with the pane. You need `beautifulsoup4` and `lxml` packages installed to use this option. ",35,149,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def matplot(self, plot, opts=None, env=None, win=None): opts = {} if opts is None else opts _title2str(opts) _assert_opts(opts) # write plot to SVG buffer: buffer = StringIO() plot.savefig(buffer, format=""svg"") buffer.seek(0) svg = buffer.read() buffer.close() if opts.get(""resizable"", False): if not BS4_AVAILABLE: raise ImportError(""No module named 'bs4'"") else: try: soup = bs4.BeautifulSoup(svg, ""xml"") except bs4.FeatureNotFound as e: import six six.raise_from(ImportError(""No module named 'lxml'""), e) height = soup.svg.attrs.pop(""height"", None) width = soup.svg.attrs.pop(""width"", None) svg = str(soup) else: height = None width = None # show SVG: if ""height"" not in opts: height = height or re.search(r'height\=""([0-9\.]*)pt""', svg) if height is not None: if not isstr(height): height = height.group(1) height = height.replace(""pt"", ""00"") opts[""height""] = 1.4 * int(math.ceil(float(height))) if ""width"" not in opts: width = width or re.search(r'width\=""([0-9\.]*)pt""', svg) if width is not None: if not isstr(width): width = width.group(1) width = width.replace(""pt"", ""00"") opts[""width""] = 1.35 * int(math.ceil(float(width))) return self.svg(svgstr=svg, opts=opts, env=env, win=win) ``` ###Assistant : This function draws a Matplotlib `plot`. The function supports one plot-specific option: `resizable`. When set to `True` the plot is resized with the pane. You need `beautifulsoup4` and `lxml` packages installed to use this option. " 910,"def acl_clone(consul_url=None, token=None, **kwargs): ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error(""No Consul URL found."") ret[""message""] = ""No Consul URL found."" ret[""res""] = False return ret if ""id"" not in kwargs: ret[""message""] = 'Required parameter ""id"" is missing.' ret[""res""] = False return ret function = ""acl/clone/{}"".format(kwargs[""id""]) res = _query( consul_url=consul_url, token=token, data=data, method=""PUT"", function=function ) if res[""res""]: ret[""res""] = True ret[""message""] = ""ACL {} cloned."".format(kwargs[""name""]) ret[""ID""] = res[""data""] else: ret[""res""] = False ret[""message""] = ""Cloning ACL item {} failed."".format(kwargs[""name""]) return ret "," Information about an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Boolean, message of success or failure, and new ID of cloned ACL. CLI Example: .. code-block:: bash salt '*' consul.acl_info id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ",42,89,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def acl_clone(consul_url=None, token=None, **kwargs): ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error(""No Consul URL found."") ret[""message""] = ""No Consul URL found."" ret[""res""] = False return ret if ""id"" not in kwargs: ret[""message""] = 'Required parameter ""id"" is missing.' ret[""res""] = False return ret function = ""acl/clone/{}"".format(kwargs[""id""]) res = _query( consul_url=consul_url, token=token, data=data, method=""PUT"", function=function ) if res[""res""]: ret[""res""] = True ret[""message""] = ""ACL {} cloned."".format(kwargs[""name""]) ret[""ID""] = res[""data""] else: ret[""res""] = False ret[""message""] = ""Cloning ACL item {} failed."".format(kwargs[""name""]) return ret ``` ###Assistant : Information about an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Boolean, message of success or failure, and new ID of cloned ACL. CLI Example: .. code-block:: bash salt '*' consul.acl_info id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' " 911,"def slice_indexer(self, start=None, end=None, step=None, kind=lib.no_default): self._deprecated_arg(kind, ""kind"", ""slice_indexer"") # For historical reasons DatetimeIndex supports slices between two # instances of datetime.time as if it were applying a slice mask to # an array of (self.hour, self.minute, self.seconds, self.microsecond). if isinstance(start, time) and isinstance(end, time): if step is not None and step != 1: raise ValueError(""Must have step size of 1 with time slices"") return self.indexer_between_time(start, end) if isinstance(start, time) or isinstance(end, time): raise KeyError(""Cannot mix time and non-time slice keys"") "," Return indexer for specified label slice. Index.slice_indexer, customized to handle time slicing. In addition to functionality provided by Index.slice_indexer, does the following: - if both `start` and `end` are instances of `datetime.time`, it invokes `indexer_between_time` - if `start` and `end` are both either string or None perform value-based selection in non-monotonic cases. ",52,81,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def slice_indexer(self, start=None, end=None, step=None, kind=lib.no_default): self._deprecated_arg(kind, ""kind"", ""slice_indexer"") # For historical reasons DatetimeIndex supports slices between two # instances of datetime.time as if it were applying a slice mask to # an array of (self.hour, self.minute, self.seconds, self.microsecond). if isinstance(start, time) and isinstance(end, time): if step is not None and step != 1: raise ValueError(""Must have step size of 1 with time slices"") return self.indexer_between_time(start, end) if isinstance(start, time) or isinstance(end, time): raise KeyError(""Cannot mix time and non-time slice keys"") ``` ###Assistant : Return indexer for specified label slice. Index.slice_indexer, customized to handle time slicing. In addition to functionality provided by Index.slice_indexer, does the following: - if both `start` and `end` are instances of `datetime.time`, it invokes `indexer_between_time` - if `start` and `end` are both either string or None perform value-based selection in non-monotonic cases. " 912,"def fix_old_dry_orders(engine): with engine.begin() as connection: connection.execute( text( ) ) connection.execute( text( ) ) "," update orders set ft_is_open = 0 where ft_is_open = 1 and (ft_trade_id, order_id) not in ( select id, stoploss_order_id from trades where stoploss_order_id is not null ) and ft_order_side = 'stoploss' and order_id like 'dry_%' update orders set ft_is_open = 0 where ft_is_open = 1 and (ft_trade_id, order_id) not in ( select id, open_order_id from trades where open_order_id is not null ) and ft_order_side != 'stoploss' and order_id like 'dry_%' ",70,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fix_old_dry_orders(engine): with engine.begin() as connection: connection.execute( text( ) ) connection.execute( text( ) ) ``` ###Assistant : update orders set ft_is_open = 0 where ft_is_open = 1 and (ft_trade_id, order_id) not in ( select id, stoploss_order_id from trades where stoploss_order_id is not null ) and ft_order_side = 'stoploss' and order_id like 'dry_%' update orders set ft_is_open = 0 where ft_is_open = 1 and (ft_trade_id, order_id) not in ( select id, open_order_id from trades where open_order_id is not null ) and ft_order_side != 'stoploss' and order_id like 'dry_%' " 913,"def build_data_frame(self, data, flags=None, stream_id=1, padding_len=0): flags = set(flags) if flags is not None else set() f = DataFrame(stream_id) f.data = data f.flags = flags if padding_len: flags.add(""PADDED"") f.pad_length = padding_len return f "," Builds a single data frame out of a chunk of data. ",11,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def build_data_frame(self, data, flags=None, stream_id=1, padding_len=0): flags = set(flags) if flags is not None else set() f = DataFrame(stream_id) f.data = data f.flags = flags if padding_len: flags.add(""PADDED"") f.pad_length = padding_len return f ``` ###Assistant : Builds a single data frame out of a chunk of data. " 914,"def set_dryrun_parser(parser=None): if not parser: parser = set_base_parser() parser.add_argument( 'host', type=str, help='The full host address of the Gateway, e.g. grpc://localhost:12345', ) parser.add_argument( '--timeout', type=int, default=3000, help=, ) return parser ","Set the parser for `dryrun` :param parser: an existing parser to build upon :return: the parser Timeout in millisecond of one check -1 for waiting forever ",26,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_dryrun_parser(parser=None): if not parser: parser = set_base_parser() parser.add_argument( 'host', type=str, help='The full host address of the Gateway, e.g. grpc://localhost:12345', ) parser.add_argument( '--timeout', type=int, default=3000, help=, ) return parser ``` ###Assistant : Set the parser for `dryrun` :param parser: an existing parser to build upon :return: the parser Timeout in millisecond of one check -1 for waiting forever " 915,"def wheel_dist_info_dir(source, name): # type: (ZipFile, str) -> str # Zip file path separators must be / subdirs = {p.split(""/"", 1)[0] for p in source.namelist()} info_dirs = [s for s in subdirs if s.endswith("".dist-info"")] if not info_dirs: raise UnsupportedWheel("".dist-info directory not found"") if len(info_dirs) > 1: raise UnsupportedWheel( ""multiple .dist-info directories found: {}"".format("", "".join(info_dirs)) ) info_dir = info_dirs[0] info_dir_name = canonicalize_name(info_dir) canonical_name = canonicalize_name(name) if not info_dir_name.startswith(canonical_name): raise UnsupportedWheel( "".dist-info directory {!r} does not start with {!r}"".format( info_dir, canonical_name ) ) return info_dir ","Returns the name of the contained .dist-info directory. Raises AssertionError or UnsupportedWheel if not found, >1 found, or it doesn't match the provided name. ",24,83,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def wheel_dist_info_dir(source, name): # type: (ZipFile, str) -> str # Zip file path separators must be / subdirs = {p.split(""/"", 1)[0] for p in source.namelist()} info_dirs = [s for s in subdirs if s.endswith("".dist-info"")] if not info_dirs: raise UnsupportedWheel("".dist-info directory not found"") if len(info_dirs) > 1: raise UnsupportedWheel( ""multiple .dist-info directories found: {}"".format("", "".join(info_dirs)) ) info_dir = info_dirs[0] info_dir_name = canonicalize_name(info_dir) canonical_name = canonicalize_name(name) if not info_dir_name.startswith(canonical_name): raise UnsupportedWheel( "".dist-info directory {!r} does not start with {!r}"".format( info_dir, canonical_name ) ) return info_dir ``` ###Assistant : Returns the name of the contained .dist-info directory. Raises AssertionError or UnsupportedWheel if not found, >1 found, or it doesn't match the provided name. " 916,"def in1d(ar1, ar2, assume_unique=False, invert=False): # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() # Ensure that iteration through object arrays yields size-1 arrays if ar2.dtype == object: ar2 = ar2.reshape(-1, 1) # Check if we can use a fast integer algorithm: integer_arrays = (np.issubdtype(ar1.dtype, np.integer) and np.issubdtype(ar2.dtype, np.integer)) if integer_arrays: ar2_min = np.min(ar2) ar2_max = np.max(ar2) ar2_range = ar2_max - ar2_min ar2_size = ar2.size # Optimal performance is for approximately # log10(size) > (log10(range) - 2.27) / 0.927, see discussion on # https://github.com/numpy/numpy/pull/12065 optimal_parameters = ( np.log10(ar2_size + 1) > ((np.log10(ar2_range + 1) - 2.27) / 0.927) ) if optimal_parameters: if invert: outgoing_array = np.ones_like(ar1, dtype=np.bool_) else: outgoing_array = np.zeros_like(ar1, dtype=np.bool_) # Make elements 1 where the integer exists in ar2 if invert: isin_helper_ar = np.ones(ar2_range + 1, dtype=np.bool_) isin_helper_ar[ar2 - ar2_min] = 0 else: isin_helper_ar = np.zeros(ar2_range + 1, dtype=np.bool_) isin_helper_ar[ar2 - ar2_min] = 1 # Mask out elements we know won't work basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] - ar2_min] return outgoing_array # Check if one of the arrays may contain arbitrary objects contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject # This code is run when # a) the first condition is true, making the code significantly faster # b) the second condition is true (i.e. `ar1` or `ar2` may contain # arbitrary objects), since then sorting is not guaranteed to work if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: if invert: mask = np.ones(len(ar1), dtype=bool) for a in ar2: mask &= (ar1 != a) else: mask = np.zeros(len(ar1), dtype=bool) for a in ar2: mask |= (ar1 == a) return mask # Otherwise use sorting if not assume_unique: ar1, rev_idx = np.unique(ar1, return_inverse=True) ar2 = np.unique(ar2) ar = np.concatenate((ar1, ar2)) # We need this to be a stable sort, so always use 'mergesort' # here. The values from the first array should always come before # the values from the second array. order = ar.argsort(kind='mergesort') sar = ar[order] if invert: bool_ar = (sar[1:] != sar[:-1]) else: bool_ar = (sar[1:] == sar[:-1]) flag = np.concatenate((bool_ar, [invert])) ret = np.empty(ar.shape, dtype=bool) ret[order] = flag if assume_unique: return ret[:len(ar1)] else: return ret[rev_idx] "," Test whether each element of a 1-D array is also present in a second array. Returns a boolean array the same length as `ar1` that is True where an element of `ar1` is in `ar2` and False otherwise. We recommend using :func:`isin` instead of `in1d` for new code. Parameters ---------- ar1 : (M,) array_like Input array. ar2 : array_like The values against which to test each value of `ar1`. assume_unique : bool, optional If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. invert : bool, optional If True, the values in the returned array are inverted (that is, False where an element of `ar1` is in `ar2` and True otherwise). Default is False. ``np.in1d(a, b, invert=True)`` is equivalent to (but is faster than) ``np.invert(in1d(a, b))``. .. versionadded:: 1.8.0 Returns ------- in1d : (M,) ndarray, bool The values `ar1[in1d]` are in `ar2`. See Also -------- isin : Version of this function that preserves the shape of ar1. numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Notes ----- `in1d` can be considered as an element-wise function version of the python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly equivalent to ``np.array([item in b for item in a])``. However, this idea fails if `ar2` is a set, or similar (non-sequence) container: As ``ar2`` is converted to an array, in those cases ``asarray(ar2)`` is an object array rather than the expected array of contained values. .. versionadded:: 1.4.0 Examples -------- >>> test = np.array([0, 1, 2, 5, 0]) >>> states = [0, 2] >>> mask = np.in1d(test, states) >>> mask array([ True, False, True, False, True]) >>> test[mask] array([0, 2, 0]) >>> mask = np.in1d(test, states, invert=True) >>> mask array([False, True, False, True, False]) >>> test[mask] array([1, 5]) ",303,367,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def in1d(ar1, ar2, assume_unique=False, invert=False): # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() # Ensure that iteration through object arrays yields size-1 arrays if ar2.dtype == object: ar2 = ar2.reshape(-1, 1) # Check if we can use a fast integer algorithm: integer_arrays = (np.issubdtype(ar1.dtype, np.integer) and np.issubdtype(ar2.dtype, np.integer)) if integer_arrays: ar2_min = np.min(ar2) ar2_max = np.max(ar2) ar2_range = ar2_max - ar2_min ar2_size = ar2.size # Optimal performance is for approximately # log10(size) > (log10(range) - 2.27) / 0.927, see discussion on # https://github.com/numpy/numpy/pull/12065 optimal_parameters = ( np.log10(ar2_size + 1) > ((np.log10(ar2_range + 1) - 2.27) / 0.927) ) if optimal_parameters: if invert: outgoing_array = np.ones_like(ar1, dtype=np.bool_) else: outgoing_array = np.zeros_like(ar1, dtype=np.bool_) # Make elements 1 where the integer exists in ar2 if invert: isin_helper_ar = np.ones(ar2_range + 1, dtype=np.bool_) isin_helper_ar[ar2 - ar2_min] = 0 else: isin_helper_ar = np.zeros(ar2_range + 1, dtype=np.bool_) isin_helper_ar[ar2 - ar2_min] = 1 # Mask out elements we know won't work basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] - ar2_min] return outgoing_array # Check if one of the arrays may contain arbitrary objects contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject # This code is run when # a) the first condition is true, making the code significantly faster # b) the second condition is true (i.e. `ar1` or `ar2` may contain # arbitrary objects), since then sorting is not guaranteed to work if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: if invert: mask = np.ones(len(ar1), dtype=bool) for a in ar2: mask &= (ar1 != a) else: mask = np.zeros(len(ar1), dtype=bool) for a in ar2: mask |= (ar1 == a) return mask # Otherwise use sorting if not assume_unique: ar1, rev_idx = np.unique(ar1, return_inverse=True) ar2 = np.unique(ar2) ar = np.concatenate((ar1, ar2)) # We need this to be a stable sort, so always use 'mergesort' # here. The values from the first array should always come before # the values from the second array. order = ar.argsort(kind='mergesort') sar = ar[order] if invert: bool_ar = (sar[1:] != sar[:-1]) else: bool_ar = (sar[1:] == sar[:-1]) flag = np.concatenate((bool_ar, [invert])) ret = np.empty(ar.shape, dtype=bool) ret[order] = flag if assume_unique: return ret[:len(ar1)] else: return ret[rev_idx] ``` ###Assistant : Test whether each element of a 1-D array is also present in a second array. Returns a boolean array the same length as `ar1` that is True where an element of `ar1` is in `ar2` and False otherwise. We recommend using :func:`isin` instead of `in1d` for new code. Parameters ---------- ar1 : (M,) array_like Input array. ar2 : array_like The values against which to test each value of `ar1`. assume_unique : bool, optional If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. invert : bool, optional If True, the values in the returned array are inverted (that is, False where an element of `ar1` is in `ar2` and True otherwise). Default is False. ``np.in1d(a, b, invert=True)`` is equivalent to (but is faster than) ``np.invert(in1d(a, b))``. .. versionadded:: 1.8.0 Returns ------- in1d : (M,) ndarray, bool The values `ar1[in1d]` are in `ar2`. See Also -------- isin : Version of this function that preserves the shape of ar1. numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Notes ----- `in1d` can be considered as an element-wise function version of the python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly equivalent to ``np.array([item in b for item in a])``. However, this idea fails if `ar2` is a set, or similar (non-sequence) container: As ``ar2`` is converted to an array, in those cases ``asarray(ar2)`` is an object array rather than the expected array of contained values. .. versionadded:: 1.4.0 Examples -------- >>> test = np.array([0, 1, 2, 5, 0]) >>> states = [0, 2] >>> mask = np.in1d(test, states) >>> mask array([ True, False, True, False, True]) >>> test[mask] array([0, 2, 0]) >>> mask = np.in1d(test, states, invert=True) >>> mask array([False, True, False, True, False]) >>> test[mask] array([1, 5]) " 917,"def out_degree_centrality(G): if len(G) <= 1: return {n: 1 for n in G} s = 1.0 / (len(G) - 1.0) centrality = {n: d * s for n, d in G.out_degree()} return centrality ","Compute the out-degree centrality for nodes. The out-degree centrality for a node v is the fraction of nodes its outgoing edges are connected to. Parameters ---------- G : graph A NetworkX graph Returns ------- nodes : dictionary Dictionary of nodes with out-degree centrality as values. Raises ------ NetworkXNotImplemented If G is undirected. Examples -------- >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) >>> nx.out_degree_centrality(G) {0: 1.0, 1: 0.6666666666666666, 2: 0.0, 3: 0.0} See Also -------- degree_centrality, in_degree_centrality Notes ----- The degree centrality values are normalized by dividing by the maximum possible degree in a simple graph n-1 where n is the number of nodes in G. For multigraphs or graphs with self loops the maximum degree might be higher than n-1 and values of degree centrality greater than 1 are possible. ",136,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def out_degree_centrality(G): if len(G) <= 1: return {n: 1 for n in G} s = 1.0 / (len(G) - 1.0) centrality = {n: d * s for n, d in G.out_degree()} return centrality ``` ###Assistant : Compute the out-degree centrality for nodes. The out-degree centrality for a node v is the fraction of nodes its outgoing edges are connected to. Parameters ---------- G : graph A NetworkX graph Returns ------- nodes : dictionary Dictionary of nodes with out-degree centrality as values. Raises ------ NetworkXNotImplemented If G is undirected. Examples -------- >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) >>> nx.out_degree_centrality(G) {0: 1.0, 1: 0.6666666666666666, 2: 0.0, 3: 0.0} See Also -------- degree_centrality, in_degree_centrality Notes ----- The degree centrality values are normalized by dividing by the maximum possible degree in a simple graph n-1 where n is the number of nodes in G. For multigraphs or graphs with self loops the maximum degree might be higher than n-1 and values of degree centrality greater than 1 are possible. " 918,"def _read_html(self, file_url): with open(file_url.replace(""file://"", """").replace("" "", """")) as f: return f.read() if matplotlylib: ","Read and return the HTML contents from a file_url in the form e.g. file:///Users/chriddyp/Repos/plotly.py/plotly-temp.html ",14,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _read_html(self, file_url): with open(file_url.replace(""file://"", """").replace("" "", """")) as f: return f.read() if matplotlylib: ``` ###Assistant : Read and return the HTML contents from a file_url in the form e.g. file:///Users/chriddyp/Repos/plotly.py/plotly-temp.html " 919,"def set_omp_num_threads_if_unset() -> bool: num_threads_from_env = os.environ.get(""OMP_NUM_THREADS"") if num_threads_from_env is not None: # No ops if it's set return False # If unset, try setting the correct CPU count assigned. runtime_ctx = ray.get_runtime_context() if runtime_ctx.worker.mode != ray._private.worker.WORKER_MODE: # Non worker mode, no ops. return False num_assigned_cpus = runtime_ctx.get_assigned_resources().get(""CPU"") if num_assigned_cpus is None: # This is an actor task w/o any num_cpus specified, set it to 1 logger.debug( ""[ray] Forcing OMP_NUM_THREADS=1 to avoid performance "" ""degradation with many workers (issue #6998). You can override this "" ""by explicitly setting OMP_NUM_THREADS, or changing num_cpus."" ) num_assigned_cpus = 1 import math # For num_cpu < 1: Set to 1. # For num_cpus >= 1: Set to the floor of the actual assigned cpus. omp_num_threads = max(math.floor(num_assigned_cpus), 1) os.environ[""OMP_NUM_THREADS""] = str(omp_num_threads) return True ","Set the OMP_NUM_THREADS to default to num cpus assigned to the worker This function sets the environment variable OMP_NUM_THREADS for the worker, if the env is not previously set and it's running in worker (WORKER_MODE). Returns True if OMP_NUM_THREADS is set in this function. ",44,129,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_omp_num_threads_if_unset() -> bool: num_threads_from_env = os.environ.get(""OMP_NUM_THREADS"") if num_threads_from_env is not None: # No ops if it's set return False # If unset, try setting the correct CPU count assigned. runtime_ctx = ray.get_runtime_context() if runtime_ctx.worker.mode != ray._private.worker.WORKER_MODE: # Non worker mode, no ops. return False num_assigned_cpus = runtime_ctx.get_assigned_resources().get(""CPU"") if num_assigned_cpus is None: # This is an actor task w/o any num_cpus specified, set it to 1 logger.debug( ""[ray] Forcing OMP_NUM_THREADS=1 to avoid performance "" ""degradation with many workers (issue #6998). You can override this "" ""by explicitly setting OMP_NUM_THREADS, or changing num_cpus."" ) num_assigned_cpus = 1 import math # For num_cpu < 1: Set to 1. # For num_cpus >= 1: Set to the floor of the actual assigned cpus. omp_num_threads = max(math.floor(num_assigned_cpus), 1) os.environ[""OMP_NUM_THREADS""] = str(omp_num_threads) return True ``` ###Assistant : Set the OMP_NUM_THREADS to default to num cpus assigned to the worker This function sets the environment variable OMP_NUM_THREADS for the worker, if the env is not previously set and it's running in worker (WORKER_MODE). Returns True if OMP_NUM_THREADS is set in this function. " 920,"async def test_binary_device_classes(hass, hk_driver): entity_id = ""binary_sensor.demo"" aid = 1 for device_class, (service, char, _) in BINARY_SENSOR_SERVICE_MAP.items(): hass.states.async_set(entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: device_class}) await hass.async_block_till_done() aid += 1 acc = BinarySensor(hass, hk_driver, ""Binary Sensor"", entity_id, aid, None) assert acc.get_service(service).display_name == service assert acc.char_detected.display_name == char ",Test if services and characteristics are assigned correctly.,8,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_binary_device_classes(hass, hk_driver): entity_id = ""binary_sensor.demo"" aid = 1 for device_class, (service, char, _) in BINARY_SENSOR_SERVICE_MAP.items(): hass.states.async_set(entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: device_class}) await hass.async_block_till_done() aid += 1 acc = BinarySensor(hass, hk_driver, ""Binary Sensor"", entity_id, aid, None) assert acc.get_service(service).display_name == service assert acc.char_detected.display_name == char ``` ###Assistant : Test if services and characteristics are assigned correctly. " 921,"def Internaldate2tuple(resp): mo = InternalDate.match(resp) if not mo: return None mon = Mon2num[mo.group('mon')] zonen = mo.group('zonen') day = int(mo.group('day')) year = int(mo.group('year')) hour = int(mo.group('hour')) min = int(mo.group('min')) sec = int(mo.group('sec')) zoneh = int(mo.group('zoneh')) zonem = int(mo.group('zonem')) # INTERNALDATE timezone must be subtracted to get UT zone = (zoneh*60 + zonem)*60 if zonen == b'-': zone = -zone tt = (year, mon, day, hour, min, sec, -1, -1, -1) utc = calendar.timegm(tt) - zone return time.localtime(utc) ","Parse an IMAP4 INTERNALDATE string. Return corresponding local time. The return value is a time.struct_time tuple or None if the string has wrong format. ",24,76,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def Internaldate2tuple(resp): mo = InternalDate.match(resp) if not mo: return None mon = Mon2num[mo.group('mon')] zonen = mo.group('zonen') day = int(mo.group('day')) year = int(mo.group('year')) hour = int(mo.group('hour')) min = int(mo.group('min')) sec = int(mo.group('sec')) zoneh = int(mo.group('zoneh')) zonem = int(mo.group('zonem')) # INTERNALDATE timezone must be subtracted to get UT zone = (zoneh*60 + zonem)*60 if zonen == b'-': zone = -zone tt = (year, mon, day, hour, min, sec, -1, -1, -1) utc = calendar.timegm(tt) - zone return time.localtime(utc) ``` ###Assistant : Parse an IMAP4 INTERNALDATE string. Return corresponding local time. The return value is a time.struct_time tuple or None if the string has wrong format. " 922,"def test_delete_get(self): # Send request response = self.client.get( reverse(""wagtaildocs:delete_multiple"", args=(self.doc.id,)) ) # Check response self.assertEqual(response.status_code, 405) "," This tests that a GET request to the delete view returns a 405 ""METHOD NOT ALLOWED"" response ",17,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_delete_get(self): # Send request response = self.client.get( reverse(""wagtaildocs:delete_multiple"", args=(self.doc.id,)) ) # Check response self.assertEqual(response.status_code, 405) ``` ###Assistant : This tests that a GET request to the delete view returns a 405 ""METHOD NOT ALLOWED"" response " 923,"def test_stream_admin_remove_others_from_public_stream(self) -> None: result = self.attempt_unsubscribe_of_principal( query_count=15, target_users=[self.example_user(""cordelia"")], is_realm_admin=False, is_stream_admin=True, is_subbed=True, invite_only=False, target_users_subbed=True, ) json = self.assert_json_success(result) self.assert_length(json[""removed""], 1) self.assert_length(json[""not_removed""], 0) "," You can remove others from public streams you're a stream administrator of. ",12,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_stream_admin_remove_others_from_public_stream(self) -> None: result = self.attempt_unsubscribe_of_principal( query_count=15, target_users=[self.example_user(""cordelia"")], is_realm_admin=False, is_stream_admin=True, is_subbed=True, invite_only=False, target_users_subbed=True, ) json = self.assert_json_success(result) self.assert_length(json[""removed""], 1) self.assert_length(json[""not_removed""], 0) ``` ###Assistant : You can remove others from public streams you're a stream administrator of. " 924,"def _mosaic_combine(self, loc, center_position_xy, img_shape_wh): assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right') if loc == 'top_left': # index0 to top left part of image x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ max(center_position_xy[1] - img_shape_wh[1], 0), \ center_position_xy[0], \ center_position_xy[1] crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - ( y2 - y1), img_shape_wh[0], img_shape_wh[1] elif loc == 'top_right': # index1 to top right part of image x1, y1, x2, y2 = center_position_xy[0], \ max(center_position_xy[1] - img_shape_wh[1], 0), \ min(center_position_xy[0] + img_shape_wh[0], self.img_scale[1] * 2), \ center_position_xy[1] crop_coord = 0, img_shape_wh[1] - (y2 - y1), min( img_shape_wh[0], x2 - x1), img_shape_wh[1] elif loc == 'bottom_left': # index2 to bottom left part of image x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ center_position_xy[1], \ center_position_xy[0], \ min(self.img_scale[0] * 2, center_position_xy[1] + img_shape_wh[1]) crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min( y2 - y1, img_shape_wh[1]) else: # index3 to bottom right part of image x1, y1, x2, y2 = center_position_xy[0], \ center_position_xy[1], \ min(center_position_xy[0] + img_shape_wh[0], self.img_scale[1] * 2), \ min(self.img_scale[0] * 2, center_position_xy[1] + img_shape_wh[1]) crop_coord = 0, 0, min(img_shape_wh[0], x2 - x1), min(y2 - y1, img_shape_wh[1]) paste_coord = x1, y1, x2, y2 return paste_coord, crop_coord ","Calculate global coordinate of mosaic image and local coordinate of cropped sub-image. Args: loc (str): Index for the sub-image, loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right'). center_position_xy (Sequence[float]): Mixing center for 4 images, (x, y). img_shape_wh (Sequence[int]): Width and height of sub-image Returns: tuple[tuple[float]]: Corresponding coordinate of pasting and cropping - paste_coord (tuple): paste corner coordinate in mosaic image. - crop_coord (tuple): crop corner coordinate in mosaic image. ",67,201,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _mosaic_combine(self, loc, center_position_xy, img_shape_wh): assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right') if loc == 'top_left': # index0 to top left part of image x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ max(center_position_xy[1] - img_shape_wh[1], 0), \ center_position_xy[0], \ center_position_xy[1] crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - ( y2 - y1), img_shape_wh[0], img_shape_wh[1] elif loc == 'top_right': # index1 to top right part of image x1, y1, x2, y2 = center_position_xy[0], \ max(center_position_xy[1] - img_shape_wh[1], 0), \ min(center_position_xy[0] + img_shape_wh[0], self.img_scale[1] * 2), \ center_position_xy[1] crop_coord = 0, img_shape_wh[1] - (y2 - y1), min( img_shape_wh[0], x2 - x1), img_shape_wh[1] elif loc == 'bottom_left': # index2 to bottom left part of image x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ center_position_xy[1], \ center_position_xy[0], \ min(self.img_scale[0] * 2, center_position_xy[1] + img_shape_wh[1]) crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min( y2 - y1, img_shape_wh[1]) else: # index3 to bottom right part of image x1, y1, x2, y2 = center_position_xy[0], \ center_position_xy[1], \ min(center_position_xy[0] + img_shape_wh[0], self.img_scale[1] * 2), \ min(self.img_scale[0] * 2, center_position_xy[1] + img_shape_wh[1]) crop_coord = 0, 0, min(img_shape_wh[0], x2 - x1), min(y2 - y1, img_shape_wh[1]) paste_coord = x1, y1, x2, y2 return paste_coord, crop_coord ``` ###Assistant : Calculate global coordinate of mosaic image and local coordinate of cropped sub-image. Args: loc (str): Index for the sub-image, loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right'). center_position_xy (Sequence[float]): Mixing center for 4 images, (x, y). img_shape_wh (Sequence[int]): Width and height of sub-image Returns: tuple[tuple[float]]: Corresponding coordinate of pasting and cropping - paste_coord (tuple): paste corner coordinate in mosaic image. - crop_coord (tuple): crop corner coordinate in mosaic image. " 925,"def permute(self, perm, orientation='rows', direction='forward'): r from sympy.combinatorics import Permutation # allow british variants and `columns` if direction == 'forwards': direction = 'forward' if direction == 'backwards': direction = 'backward' if orientation == 'columns': orientation = 'cols' if direction not in ('forward', 'backward'): raise TypeError(""direction='{}' is an invalid kwarg. "" ""Try 'forward' or 'backward'"".format(direction)) if orientation not in ('rows', 'cols'): raise TypeError(""orientation='{}' is an invalid kwarg. "" ""Try 'rows' or 'cols'"".format(orientation)) if not isinstance(perm, (Permutation, Iterable)): raise ValueError( ""{} must be a list, a list of lists, "" ""or a SymPy permutation object."".format(perm)) # ensure all swaps are in range max_index = self.rows if orientation == 'rows' else self.cols if not all(0 <= t <= max_index for t in flatten(list(perm))): raise IndexError(""`swap` indices out of range."") if perm and not isinstance(perm, Permutation) and \ isinstance(perm[0], Iterable): if direction == 'forward': perm = list(reversed(perm)) perm = Permutation(perm, size=max_index+1) else: perm = Permutation(perm, size=max_index+1) if orientation == 'rows': return self._eval_permute_rows(perm) if orientation == 'cols': return self._eval_permute_cols(perm) ","Permute the rows or columns of a matrix by the given list of swaps. Parameters ========== perm : Permutation, list, or list of lists A representation for the permutation. If it is ``Permutation``, it is used directly with some resizing with respect to the matrix size. If it is specified as list of lists, (e.g., ``[[0, 1], [0, 2]]``), then the permutation is formed from applying the product of cycles. The direction how the cyclic product is applied is described in below. If it is specified as a list, the list should represent an array form of a permutation. (e.g., ``[1, 2, 0]``) which would would form the swapping function `0 \mapsto 1, 1 \mapsto 2, 2\mapsto 0`. orientation : 'rows', 'cols' A flag to control whether to permute the rows or the columns direction : 'forward', 'backward' A flag to control whether to apply the permutations from the start of the list first, or from the back of the list first. For example, if the permutation specification is ``[[0, 1], [0, 2]]``, If the flag is set to ``'forward'``, the cycle would be formed as `0 \mapsto 2, 2 \mapsto 1, 1 \mapsto 0`. If the flag is set to ``'backward'``, the cycle would be formed as `0 \mapsto 1, 1 \mapsto 2, 2 \mapsto 0`. If the argument ``perm`` is not in a form of list of lists, this flag takes no effect. Examples ======== >>> from sympy import eye >>> M = eye(3) >>> M.permute([[0, 1], [0, 2]], orientation='rows', direction='forward') Matrix([ [0, 0, 1], [1, 0, 0], [0, 1, 0]]) >>> from sympy import eye >>> M = eye(3) >>> M.permute([[0, 1], [0, 2]], orientation='rows', direction='backward') Matrix([ [0, 1, 0], [0, 0, 1], [1, 0, 0]]) Notes ===== If a bijective function `\sigma : \mathbb{N}_0 \rightarrow \mathbb{N}_0` denotes the permutation. If the matrix `A` is the matrix to permute, represented as a horizontal or a vertical stack of vectors: .. math:: A = \begin{bmatrix} a_0 \\ a_1 \\ \vdots \\ a_{n-1} \end{bmatrix} = \begin{bmatrix} \alpha_0 & \alpha_1 & \cdots & \alpha_{n-1} \end{bmatrix} If the matrix `B` is the result, the permutation of matrix rows is defined as: .. math:: B := \begin{bmatrix} a_{\sigma(0)} \\ a_{\sigma(1)} \\ \vdots \\ a_{\sigma(n-1)} \end{bmatrix} And the permutation of matrix columns is defined as: .. math:: B := \begin{bmatrix} \alpha_{\sigma(0)} & \alpha_{\sigma(1)} & \cdots & \alpha_{\sigma(n-1)} \end{bmatrix} ",395,164,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def permute(self, perm, orientation='rows', direction='forward'): r from sympy.combinatorics import Permutation # allow british variants and `columns` if direction == 'forwards': direction = 'forward' if direction == 'backwards': direction = 'backward' if orientation == 'columns': orientation = 'cols' if direction not in ('forward', 'backward'): raise TypeError(""direction='{}' is an invalid kwarg. "" ""Try 'forward' or 'backward'"".format(direction)) if orientation not in ('rows', 'cols'): raise TypeError(""orientation='{}' is an invalid kwarg. "" ""Try 'rows' or 'cols'"".format(orientation)) if not isinstance(perm, (Permutation, Iterable)): raise ValueError( ""{} must be a list, a list of lists, "" ""or a SymPy permutation object."".format(perm)) # ensure all swaps are in range max_index = self.rows if orientation == 'rows' else self.cols if not all(0 <= t <= max_index for t in flatten(list(perm))): raise IndexError(""`swap` indices out of range."") if perm and not isinstance(perm, Permutation) and \ isinstance(perm[0], Iterable): if direction == 'forward': perm = list(reversed(perm)) perm = Permutation(perm, size=max_index+1) else: perm = Permutation(perm, size=max_index+1) if orientation == 'rows': return self._eval_permute_rows(perm) if orientation == 'cols': return self._eval_permute_cols(perm) ``` ###Assistant : Permute the rows or columns of a matrix by the given list of swaps. Parameters ========== perm : Permutation, list, or list of lists A representation for the permutation. If it is ``Permutation``, it is used directly with some resizing with respect to the matrix size. If it is specified as list of lists, (e.g., ``[[0, 1], [0, 2]]``), then the permutation is formed from applying the product of cycles. The direction how the cyclic product is applied is described in below. If it is specified as a list, the list should represent an array form of a permutation. (e.g., ``[1, 2, 0]``) which would would form the swapping function `0 \mapsto 1, 1 \mapsto 2, 2\mapsto 0`. orientation : 'rows', 'cols' A flag to control whether to permute the rows or the columns direction : 'forward', 'backward' A flag to control whether to apply the permutations from the start of the list first, or from the back of the list first. For example, if the permutation specification is ``[[0, 1], [0, 2]]``, If the flag is set to ``'forward'``, the cycle would be formed as `0 \mapsto 2, 2 \mapsto 1, 1 \mapsto 0`. If the flag is set to ``'backward'``, the cycle would be formed as `0 \mapsto 1, 1 \mapsto 2, 2 \mapsto 0`. If the argument ``perm`` is not in a form of list of lists, this flag takes no effect. Examples ======== >>> from sympy import eye >>> M = eye(3) >>> M.permute([[0, 1], [0, 2]], orientation='rows', direction='forward') Matrix([ [0, 0, 1], [1, 0, 0], [0, 1, 0]]) >>> from sympy import eye >>> M = eye(3) >>> M.permute([[0, 1], [0, 2]], orientation='rows', direction='backward') Matrix([ [0, 1, 0], [0, 0, 1], [1, 0, 0]]) Notes ===== If a bijective function `\sigma : \mathbb{N}_0 \rightarrow \mathbb{N}_0` denotes the permutation. If the matrix `A` is the matrix to permute, represented as a horizontal or a vertical stack of vectors: .. math:: A = \begin{bmatrix} a_0 \\ a_1 \\ \vdots \\ a_{n-1} \end{bmatrix} = \begin{bmatrix} \alpha_0 & \alpha_1 & \cdots & \alpha_{n-1} \end{bmatrix} If the matrix `B` is the result, the permutation of matrix rows is defined as: .. math:: B := \begin{bmatrix} a_{\sigma(0)} \\ a_{\sigma(1)} \\ \vdots \\ a_{\sigma(n-1)} \end{bmatrix} And the permutation of matrix columns is defined as: .. math:: B := \begin{bmatrix} \alpha_{\sigma(0)} & \alpha_{\sigma(1)} & \cdots & \alpha_{\sigma(n-1)} \end{bmatrix} " 926,"def apply(self, func, *args, **kwargs): func(self, *args, **kwargs) return self "," Pass the grid to a user-supplied function and return self. The `func` must accept an object of this type for its first positional argument. Additional arguments are passed through. The return value of `func` is ignored; this method returns self. See the `pipe` method if you want the return value. Added in v0.12.0. ",53,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def apply(self, func, *args, **kwargs): func(self, *args, **kwargs) return self ``` ###Assistant : Pass the grid to a user-supplied function and return self. The `func` must accept an object of this type for its first positional argument. Additional arguments are passed through. The return value of `func` is ignored; this method returns self. See the `pipe` method if you want the return value. Added in v0.12.0. " 927,"def copy_m2m_relationships(obj1, obj2, fields, kwargs=None): for field_name in fields: if hasattr(obj1, field_name): try: field_obj = obj1._meta.get_field(field_name) except FieldDoesNotExist: continue if isinstance(field_obj, ManyToManyField): # Many to Many can be specified as field_name src_field_value = getattr(obj1, field_name) if kwargs and field_name in kwargs: override_field_val = kwargs[field_name] # TODO: Should we spike this our or just put the for loop inside the next if and make everything respect order? if field_name == 'instance_groups': # instance_groups are a list but we need to preserve the order for ig_id in override_field_val: getattr(obj2, field_name).add(ig_id) continue if isinstance(override_field_val, (set, list, QuerySet)): getattr(obj2, field_name).add(*override_field_val) continue if override_field_val.__class__.__name__ == 'ManyRelatedManager': src_field_value = override_field_val dest_field = getattr(obj2, field_name) dest_field.add(*list(src_field_value.all().values_list('id', flat=True))) "," In-place operation. Given two saved objects, copies related objects from obj1 to obj2 to field of same name, if field occurs in `fields` ",23,110,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def copy_m2m_relationships(obj1, obj2, fields, kwargs=None): for field_name in fields: if hasattr(obj1, field_name): try: field_obj = obj1._meta.get_field(field_name) except FieldDoesNotExist: continue if isinstance(field_obj, ManyToManyField): # Many to Many can be specified as field_name src_field_value = getattr(obj1, field_name) if kwargs and field_name in kwargs: override_field_val = kwargs[field_name] # TODO: Should we spike this our or just put the for loop inside the next if and make everything respect order? if field_name == 'instance_groups': # instance_groups are a list but we need to preserve the order for ig_id in override_field_val: getattr(obj2, field_name).add(ig_id) continue if isinstance(override_field_val, (set, list, QuerySet)): getattr(obj2, field_name).add(*override_field_val) continue if override_field_val.__class__.__name__ == 'ManyRelatedManager': src_field_value = override_field_val dest_field = getattr(obj2, field_name) dest_field.add(*list(src_field_value.all().values_list('id', flat=True))) ``` ###Assistant : In-place operation. Given two saved objects, copies related objects from obj1 to obj2 to field of same name, if field occurs in `fields` " 928,"def load_from_pipeline(pipeline): try: import transformers except ImportError: raise ImportError( ""transformers not installed. Please try `pip install transformers`"" ) if not isinstance(pipeline, transformers.Pipeline): raise ValueError(""pipeline must be a transformers.Pipeline"") # Handle the different pipelines. The has_attr() checks to make sure the pipeline exists in the # version of the transformers library that the user has installed. if hasattr(transformers, ""AudioClassificationPipeline"") and isinstance( pipeline, transformers.AudioClassificationPipeline ): pipeline_info = { ""inputs"": inputs.Audio(label=""Input"", source=""microphone"", type=""filepath""), ""outputs"": outputs.Label(label=""Class"", type=""confidences""), ""preprocess"": lambda i: {""inputs"": i}, ""postprocess"": lambda r: {i[""label""].split("", "")[0]: i[""score""] for i in r}, } elif hasattr(transformers, ""AutomaticSpeechRecognitionPipeline"") and isinstance( pipeline, transformers.AutomaticSpeechRecognitionPipeline ): pipeline_info = { ""inputs"": inputs.Audio(label=""Input"", source=""microphone"", type=""filepath""), ""outputs"": outputs.Textbox(label=""Output""), ""preprocess"": lambda i: {""inputs"": i}, ""postprocess"": lambda r: r[""text""], } elif hasattr(transformers, ""FeatureExtractionPipeline"") and isinstance( pipeline, transformers.FeatureExtractionPipeline ): pipeline_info = { ""inputs"": inputs.Textbox(label=""Input""), ""outputs"": outputs.Dataframe(label=""Output""), ""preprocess"": lambda x: {""inputs"": x}, ""postprocess"": lambda r: r[0], } elif hasattr(transformers, ""FillMaskPipeline"") and isinstance( pipeline, transformers.FillMaskPipeline ): pipeline_info = { ""inputs"": inputs.Textbox(label=""Input""), ""outputs"": outputs.Label(label=""Classification"", type=""confidences""), ""preprocess"": lambda x: {""inputs"": x}, ""postprocess"": lambda r: {i[""token_str""]: i[""score""] for i in r}, } elif hasattr(transformers, ""ImageClassificationPipeline"") and isinstance( pipeline, transformers.ImageClassificationPipeline ): pipeline_info = { ""inputs"": inputs.Image(label=""Input Image"", type=""filepath""), ""outputs"": outputs.Label(label=""Classification"", type=""confidences""), ""preprocess"": lambda i: {""images"": i}, ""postprocess"": lambda r: {i[""label""].split("", "")[0]: i[""score""] for i in r}, } elif hasattr(transformers, ""QuestionAnsweringPipeline"") and isinstance( pipeline, transformers.QuestionAnsweringPipeline ): pipeline_info = { ""inputs"": [ inputs.Textbox(label=""Context"", lines=7), inputs.Textbox(label=""Question""), ], ""outputs"": [outputs.Textbox(label=""Answer""), outputs.Label(label=""Score"")], ""preprocess"": lambda c, q: {""context"": c, ""question"": q}, ""postprocess"": lambda r: (r[""answer""], r[""score""]), } elif hasattr(transformers, ""SummarizationPipeline"") and isinstance( pipeline, transformers.SummarizationPipeline ): pipeline_info = { ""inputs"": inputs.Textbox(label=""Input"", lines=7), ""outputs"": outputs.Textbox(label=""Summary""), ""preprocess"": lambda x: {""inputs"": x}, ""postprocess"": lambda r: r[0][""summary_text""], } elif hasattr(transformers, ""TextClassificationPipeline"") and isinstance( pipeline, transformers.TextClassificationPipeline ): pipeline_info = { ""inputs"": inputs.Textbox(label=""Input""), ""outputs"": outputs.Label(label=""Classification"", type=""confidences""), ""preprocess"": lambda x: [x], ""postprocess"": lambda r: {i[""label""].split("", "")[0]: i[""score""] for i in r}, } elif hasattr(transformers, ""TextGenerationPipeline"") and isinstance( pipeline, transformers.TextGenerationPipeline ): pipeline_info = { ""inputs"": inputs.Textbox(label=""Input""), ""outputs"": outputs.Textbox(label=""Output""), ""preprocess"": lambda x: {""text_inputs"": x}, ""postprocess"": lambda r: r[0][""generated_text""], } elif hasattr(transformers, ""TranslationPipeline"") and isinstance( pipeline, transformers.TranslationPipeline ): pipeline_info = { ""inputs"": inputs.Textbox(label=""Input""), ""outputs"": outputs.Textbox(label=""Translation""), ""preprocess"": lambda x: [x], ""postprocess"": lambda r: r[0][""translation_text""], } elif hasattr(transformers, ""Text2TextGenerationPipeline"") and isinstance( pipeline, transformers.Text2TextGenerationPipeline ): pipeline_info = { ""inputs"": inputs.Textbox(label=""Input""), ""outputs"": outputs.Textbox(label=""Generated Text""), ""preprocess"": lambda x: [x], ""postprocess"": lambda r: r[0][""generated_text""], } elif hasattr(transformers, ""ZeroShotClassificationPipeline"") and isinstance( pipeline, transformers.ZeroShotClassificationPipeline ): pipeline_info = { ""inputs"": [ inputs.Textbox(label=""Input""), inputs.Textbox(label=""Possible class names ("" ""comma-separated)""), inputs.Checkbox(label=""Allow multiple true classes""), ], ""outputs"": outputs.Label(label=""Classification"", type=""confidences""), ""preprocess"": lambda i, c, m: { ""sequences"": i, ""candidate_labels"": c, ""multi_label"": m, }, ""postprocess"": lambda r: { r[""labels""][i]: r[""scores""][i] for i in range(len(r[""labels""])) }, } else: raise ValueError(""Unsupported pipeline type: {}"".format(type(pipeline))) # define the function that will be called by the Interface"," Gets the appropriate Interface kwargs for a given Hugging Face transformers.Pipeline. pipeline (transformers.Pipeline): the transformers.Pipeline from which to create an interface Returns: (dict): a dictionary of kwargs that can be used to construct an Interface object ",36,440,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_from_pipeline(pipeline): try: import transformers except ImportError: raise ImportError( ""transformers not installed. Please try `pip install transformers`"" ) if not isinstance(pipeline, transformers.Pipeline): raise ValueError(""pipeline must be a transformers.Pipeline"") # Handle the different pipelines. The has_attr() checks to make sure the pipeline exists in the # version of the transformers library that the user has installed. if hasattr(transformers, ""AudioClassificationPipeline"") and isinstance( pipeline, transformers.AudioClassificationPipeline ): pipeline_info = { ""inputs"": inputs.Audio(label=""Input"", source=""microphone"", type=""filepath""), ""outputs"": outputs.Label(label=""Class"", type=""confidences""), ""preprocess"": lambda i: {""inputs"": i}, ""postprocess"": lambda r: {i[""label""].split("", "")[0]: i[""score""] for i in r}, } elif hasattr(transformers, ""AutomaticSpeechRecognitionPipeline"") and isinstance( pipeline, transformers.AutomaticSpeechRecognitionPipeline ): pipeline_info = { ""inputs"": inputs.Audio(label=""Input"", source=""microphone"", type=""filepath""), ""outputs"": outputs.Textbox(label=""Output""), ""preprocess"": lambda i: {""inputs"": i}, ""postprocess"": lambda r: r[""text""], } elif hasattr(transformers, ""FeatureExtractionPipeline"") and isinstance( pipeline, transformers.FeatureExtractionPipeline ): pipeline_info = { ""inputs"": inputs.Textbox(label=""Input""), ""outputs"": outputs.Dataframe(label=""Output""), ""preprocess"": lambda x: {""inputs"": x}, ""postprocess"": lambda r: r[0], } elif hasattr(transformers, ""FillMaskPipeline"") and isinstance( pipeline, transformers.FillMaskPipeline ): pipeline_info = { ""inputs"": inputs.Textbox(label=""Input""), ""outputs"": outputs.Label(label=""Classification"", type=""confidences""), ""preprocess"": lambda x: {""inputs"": x}, ""postprocess"": lambda r: {i[""token_str""]: i[""score""] for i in r}, } elif hasattr(transformers, ""ImageClassificationPipeline"") and isinstance( pipeline, transformers.ImageClassificationPipeline ): pipeline_info = { ""inputs"": inputs.Image(label=""Input Image"", type=""filepath""), ""outputs"": outputs.Label(label=""Classification"", type=""confidences""), ""preprocess"": lambda i: {""images"": i}, ""postprocess"": lambda r: {i[""label""].split("", "")[0]: i[""score""] for i in r}, } elif hasattr(transformers, ""QuestionAnsweringPipeline"") and isinstance( pipeline, transformers.QuestionAnsweringPipeline ): pipeline_info = { ""inputs"": [ inputs.Textbox(label=""Context"", lines=7), inputs.Textbox(label=""Question""), ], ""outputs"": [outputs.Textbox(label=""Answer""), outputs.Label(label=""Score"")], ""preprocess"": lambda c, q: {""context"": c, ""question"": q}, ""postprocess"": lambda r: (r[""answer""], r[""score""]), } elif hasattr(transformers, ""SummarizationPipeline"") and isinstance( pipeline, transformers.SummarizationPipeline ): pipeline_info = { ""inputs"": inputs.Textbox(label=""Input"", lines=7), ""outputs"": outputs.Textbox(label=""Summary""), ""preprocess"": lambda x: {""inputs"": x}, ""postprocess"": lambda r: r[0][""summary_text""], } elif hasattr(transformers, ""TextClassificationPipeline"") and isinstance( pipeline, transformers.TextClassificationPipeline ): pipeline_info = { ""inputs"": inputs.Textbox(label=""Input""), ""outputs"": outputs.Label(label=""Classification"", type=""confidences""), ""preprocess"": lambda x: [x], ""postprocess"": lambda r: {i[""label""].split("", "")[0]: i[""score""] for i in r}, } elif hasattr(transformers, ""TextGenerationPipeline"") and isinstance( pipeline, transformers.TextGenerationPipeline ): pipeline_info = { ""inputs"": inputs.Textbox(label=""Input""), ""outputs"": outputs.Textbox(label=""Output""), ""preprocess"": lambda x: {""text_inputs"": x}, ""postprocess"": lambda r: r[0][""generated_text""], } elif hasattr(transformers, ""TranslationPipeline"") and isinstance( pipeline, transformers.TranslationPipeline ): pipeline_info = { ""inputs"": inputs.Textbox(label=""Input""), ""outputs"": outputs.Textbox(label=""Translation""), ""preprocess"": lambda x: [x], ""postprocess"": lambda r: r[0][""translation_text""], } elif hasattr(transformers, ""Text2TextGenerationPipeline"") and isinstance( pipeline, transformers.Text2TextGenerationPipeline ): pipeline_info = { ""inputs"": inputs.Textbox(label=""Input""), ""outputs"": outputs.Textbox(label=""Generated Text""), ""preprocess"": lambda x: [x], ""postprocess"": lambda r: r[0][""generated_text""], } elif hasattr(transformers, ""ZeroShotClassificationPipeline"") and isinstance( pipeline, transformers.ZeroShotClassificationPipeline ): pipeline_info = { ""inputs"": [ inputs.Textbox(label=""Input""), inputs.Textbox(label=""Possible class names ("" ""comma-separated)""), inputs.Checkbox(label=""Allow multiple true classes""), ], ""outputs"": outputs.Label(label=""Classification"", type=""confidences""), ""preprocess"": lambda i, c, m: { ""sequences"": i, ""candidate_labels"": c, ""multi_label"": m, }, ""postprocess"": lambda r: { r[""labels""][i]: r[""scores""][i] for i in range(len(r[""labels""])) }, } else: raise ValueError(""Unsupported pipeline type: {}"".format(type(pipeline))) # define the function that will be called by the Interface ``` ###Assistant : Gets the appropriate Interface kwargs for a given Hugging Face transformers.Pipeline. pipeline (transformers.Pipeline): the transformers.Pipeline from which to create an interface Returns: (dict): a dictionary of kwargs that can be used to construct an Interface object " 929,"def calculate_bounds_for_mechanism(value_array, min_val_array, max_val_array): # TODO: Double check whether the iDPGaussianMechanism class squares its squared_l2_norm values!! worst_case_l2_norm = np.sqrt(np.sum(np.square(max_val_array - min_val_array))) * np.ones_like(value_array) l2_norm = np.sqrt(np.sum(np.square(value_array))) * np.ones_like(value_array) # print(l2_norm.shape, worst_case_l2_norm.shape) # print(l2_norm.shape) return l2_norm, worst_case_l2_norm ","Calculates the squared L2 norm values needed to create a Mechanism, and calculate privacy budget + spend If you calculate the privacy budget spend with the worst case bound, you can show this number to the D.S. If you calculate it with the regular value (the value computed below when public_only = False, you cannot show the privacy budget to the DS because this violates privacy. ",66,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def calculate_bounds_for_mechanism(value_array, min_val_array, max_val_array): # TODO: Double check whether the iDPGaussianMechanism class squares its squared_l2_norm values!! worst_case_l2_norm = np.sqrt(np.sum(np.square(max_val_array - min_val_array))) * np.ones_like(value_array) l2_norm = np.sqrt(np.sum(np.square(value_array))) * np.ones_like(value_array) # print(l2_norm.shape, worst_case_l2_norm.shape) # print(l2_norm.shape) return l2_norm, worst_case_l2_norm ``` ###Assistant : Calculates the squared L2 norm values needed to create a Mechanism, and calculate privacy budget + spend If you calculate the privacy budget spend with the worst case bound, you can show this number to the D.S. If you calculate it with the regular value (the value computed below when public_only = False, you cannot show the privacy budget to the DS because this violates privacy. " 930,"def get_po_entries(conditions): return frappe.db.sql( .format( conditions=conditions ), as_dict=1, ) # nosec "," SELECT child.name, child.parent, child.cost_center, child.project, child.warehouse, child.material_request, child.material_request_item, child.item_code, child.stock_uom, child.qty, child.amount, child.base_amount, child.schedule_date, parent.transaction_date, parent.supplier, parent.status, parent.owner FROM `tabPurchase Order` parent, `tabPurchase Order Item` child WHERE parent.docstatus = 1 AND parent.name = child.parent AND parent.status not in (""Closed"",""Completed"",""Cancelled"") {conditions} GROUP BY parent.name, child.item_code ",44,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_po_entries(conditions): return frappe.db.sql( .format( conditions=conditions ), as_dict=1, ) # nosec ``` ###Assistant : SELECT child.name, child.parent, child.cost_center, child.project, child.warehouse, child.material_request, child.material_request_item, child.item_code, child.stock_uom, child.qty, child.amount, child.base_amount, child.schedule_date, parent.transaction_date, parent.supplier, parent.status, parent.owner FROM `tabPurchase Order` parent, `tabPurchase Order Item` child WHERE parent.docstatus = 1 AND parent.name = child.parent AND parent.status not in (""Closed"",""Completed"",""Cancelled"") {conditions} GROUP BY parent.name, child.item_code " 931,"def get_year(self): year = self.year if year is None: try: year = self.kwargs[""year""] except KeyError: try: year = self.request.GET[""year""] except KeyError: raise Http404(_(""No year specified"")) return year ",Return the year for which this view should display data.,10,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_year(self): year = self.year if year is None: try: year = self.kwargs[""year""] except KeyError: try: year = self.request.GET[""year""] except KeyError: raise Http404(_(""No year specified"")) return year ``` ###Assistant : Return the year for which this view should display data. " 932,"def test_overwrite_storage_path(self): call_command(""document_retagger"", ""--storage_path"", ""--overwrite"") d_first, d_second, d_unrelated, d_auto = self.get_updated_docs() self.assertEqual(d_first.storage_path, self.sp2) self.assertEqual(d_auto.storage_path, self.sp1) self.assertIsNone(d_second.storage_path) self.assertEqual(d_unrelated.storage_path, self.sp2) "," GIVEN: - 2 storage paths with documents which match them - 1 document which matches but has a storage path WHEN: - document retagger is called with overwrite THEN: - Matching document's storage paths updated - Non-matching documents have no storage path - Existing storage patch overwritten ",47,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_overwrite_storage_path(self): call_command(""document_retagger"", ""--storage_path"", ""--overwrite"") d_first, d_second, d_unrelated, d_auto = self.get_updated_docs() self.assertEqual(d_first.storage_path, self.sp2) self.assertEqual(d_auto.storage_path, self.sp1) self.assertIsNone(d_second.storage_path) self.assertEqual(d_unrelated.storage_path, self.sp2) ``` ###Assistant : GIVEN: - 2 storage paths with documents which match them - 1 document which matches but has a storage path WHEN: - document retagger is called with overwrite THEN: - Matching document's storage paths updated - Non-matching documents have no storage path - Existing storage patch overwritten " 933,"def invert(self): return DataFrameDefault.register(pandas.DataFrame.__invert__)(self) "," Apply bitwise inversion for each element of the QueryCompiler. Returns ------- BaseQueryCompiler New QueryCompiler containing bitwise inversion for each value. ",20,4,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def invert(self): return DataFrameDefault.register(pandas.DataFrame.__invert__)(self) ``` ###Assistant : Apply bitwise inversion for each element of the QueryCompiler. Returns ------- BaseQueryCompiler New QueryCompiler containing bitwise inversion for each value. " 934,"def test_center_head_loss(self): s = 256 img_metas = [{'batch_input_shape': (s, s, 3)}] test_cfg = dict(topK=100, max_per_img=100) centernet_head = CenterNetHead( num_classes=4, in_channels=1, feat_channels=4, test_cfg=test_cfg) feat = [torch.rand(1, 1, s, s)] center_out, wh_out, offset_out = centernet_head.forward(feat) # Test that empty ground truth encourages the network to # predict background gt_instances = InstanceData() gt_instances.bboxes = torch.empty((0, 4)) gt_instances.labels = torch.LongTensor([]) empty_gt_losses = centernet_head.loss(center_out, wh_out, offset_out, [gt_instances], img_metas) loss_center = empty_gt_losses['loss_center_heatmap'] loss_wh = empty_gt_losses['loss_wh'] loss_offset = empty_gt_losses['loss_offset'] assert loss_center.item() > 0, 'loss_center should be non-zero' assert loss_wh.item() == 0, ( 'there should be no loss_wh when there are no true boxes') assert loss_offset.item() == 0, ( 'there should be no loss_offset when there are no true boxes') # When truth is non-empty then both cls and box loss # should be nonzero for random inputs gt_instances = InstanceData() gt_instances.bboxes = torch.Tensor( [[23.6667, 23.8757, 238.6326, 151.8874]]) gt_instances.labels = torch.LongTensor([2]) one_gt_losses = centernet_head.loss(center_out, wh_out, offset_out, [gt_instances], img_metas) loss_center = one_gt_losses['loss_center_heatmap'] loss_wh = one_gt_losses['loss_wh'] loss_offset = one_gt_losses['loss_offset'] assert loss_center.item() > 0, 'loss_center should be non-zero' assert loss_wh.item() > 0, 'loss_wh should be non-zero' assert loss_offset.item() > 0, 'loss_offset should be non-zero' ",Tests center head loss when truth is empty and non-empty.,10,183,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_center_head_loss(self): s = 256 img_metas = [{'batch_input_shape': (s, s, 3)}] test_cfg = dict(topK=100, max_per_img=100) centernet_head = CenterNetHead( num_classes=4, in_channels=1, feat_channels=4, test_cfg=test_cfg) feat = [torch.rand(1, 1, s, s)] center_out, wh_out, offset_out = centernet_head.forward(feat) # Test that empty ground truth encourages the network to # predict background gt_instances = InstanceData() gt_instances.bboxes = torch.empty((0, 4)) gt_instances.labels = torch.LongTensor([]) empty_gt_losses = centernet_head.loss(center_out, wh_out, offset_out, [gt_instances], img_metas) loss_center = empty_gt_losses['loss_center_heatmap'] loss_wh = empty_gt_losses['loss_wh'] loss_offset = empty_gt_losses['loss_offset'] assert loss_center.item() > 0, 'loss_center should be non-zero' assert loss_wh.item() == 0, ( 'there should be no loss_wh when there are no true boxes') assert loss_offset.item() == 0, ( 'there should be no loss_offset when there are no true boxes') # When truth is non-empty then both cls and box loss # should be nonzero for random inputs gt_instances = InstanceData() gt_instances.bboxes = torch.Tensor( [[23.6667, 23.8757, 238.6326, 151.8874]]) gt_instances.labels = torch.LongTensor([2]) one_gt_losses = centernet_head.loss(center_out, wh_out, offset_out, [gt_instances], img_metas) loss_center = one_gt_losses['loss_center_heatmap'] loss_wh = one_gt_losses['loss_wh'] loss_offset = one_gt_losses['loss_offset'] assert loss_center.item() > 0, 'loss_center should be non-zero' assert loss_wh.item() > 0, 'loss_wh should be non-zero' assert loss_offset.item() > 0, 'loss_offset should be non-zero' ``` ###Assistant : Tests center head loss when truth is empty and non-empty. " 935,"def from_package(package): spec = wrap_spec(package) reader = spec.loader.get_resource_reader(spec.name) return reader.files() @contextlib.contextmanager"," Return a Traversable object for the given package. ",8,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def from_package(package): spec = wrap_spec(package) reader = spec.loader.get_resource_reader(spec.name) return reader.files() @contextlib.contextmanager ``` ###Assistant : Return a Traversable object for the given package. " 936,"def _is_refund_ongoing(payment): return ( payment.transactions.filter( kind=TransactionKind.REFUND_ONGOING, is_success=True ).exists() if payment else False ) ",Return True if refund is ongoing for given payment.,9,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _is_refund_ongoing(payment): return ( payment.transactions.filter( kind=TransactionKind.REFUND_ONGOING, is_success=True ).exists() if payment else False ) ``` ###Assistant : Return True if refund is ongoing for given payment. " 937,"def installed(name, updates=None): if isinstance(updates, str): updates = [updates] if not updates: updates = name ret = {""name"": name, ""changes"": {}, ""result"": True, ""comment"": """"} wua = salt.utils.win_update.WindowsUpdateAgent() # Search for updates install_list = wua.search(updates) # No updates found if install_list.count() == 0: ret[""comment""] = ""No updates found"" return ret # List of updates to download download = salt.utils.win_update.Updates() for item in install_list.updates: if not salt.utils.data.is_true(item.IsDownloaded): download.updates.Add(item) # List of updates to install install = salt.utils.win_update.Updates() installed_updates = [] for item in install_list.updates: if not salt.utils.data.is_true(item.IsInstalled): install.updates.Add(item) else: installed_updates.extend(""KB"" + kb for kb in item.KBArticleIDs) if install.count() == 0: ret[""comment""] = ""Updates already installed: "" ret[""comment""] += ""\n - "".join(installed_updates) return ret # Return comment of changes if test. if __opts__[""test""]: ret[""result""] = None ret[""comment""] = ""Updates will be installed:"" for update in install.updates: ret[""comment""] += ""\n"" ret[""comment""] += "": "".join([update.Identity.UpdateID, update.Title]) return ret # Download updates wua.download(download) # Install updates wua.install(install) # Refresh windows update info wua.refresh() post_info = wua.updates().list() # Verify the installation for item in install.list(): if not salt.utils.data.is_true(post_info[item][""Installed""]): ret[""changes""][""failed""] = { item: { ""Title"": post_info[item][""Title""], ""KBs"": post_info[item][""KBs""], } } ret[""result""] = False else: ret[""changes""][""installed""] = { item: { ""Title"": post_info[item][""Title""], ""NeedsReboot"": post_info[item][""NeedsReboot""], ""KBs"": post_info[item][""KBs""], } } if ret[""changes""].get(""failed"", False): ret[""comment""] = ""Updates failed"" else: ret[""comment""] = ""Updates installed successfully"" return ret "," Ensure Microsoft Updates are installed. Updates will be downloaded if needed. Args: name (str): The identifier of a single update to install. updates (list): A list of identifiers for updates to be installed. Overrides ``name``. Default is None. .. note:: Identifiers can be the GUID, the KB number, or any part of the Title of the Microsoft update. GUIDs and KBs are the preferred method to ensure you're installing the correct update. .. warning:: Using a partial KB number or a partial Title could result in more than one update being installed. Returns: dict: A dictionary containing the results of the update CLI Example: .. code-block:: yaml # using a GUID install_update: wua.installed: - name: 28cf1b09-2b1a-458c-9bd1-971d1b26b211 # using a KB install_update: wua.installed: - name: KB3194343 # using the full Title install_update: wua.installed: - name: Security Update for Adobe Flash Player for Windows 10 Version 1607 (for x64-based Systems) (KB3194343) # Install multiple updates install_updates: wua.installed: - updates: - KB3194343 - 28cf1b09-2b1a-458c-9bd1-971d1b26b211 ",161,215,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def installed(name, updates=None): if isinstance(updates, str): updates = [updates] if not updates: updates = name ret = {""name"": name, ""changes"": {}, ""result"": True, ""comment"": """"} wua = salt.utils.win_update.WindowsUpdateAgent() # Search for updates install_list = wua.search(updates) # No updates found if install_list.count() == 0: ret[""comment""] = ""No updates found"" return ret # List of updates to download download = salt.utils.win_update.Updates() for item in install_list.updates: if not salt.utils.data.is_true(item.IsDownloaded): download.updates.Add(item) # List of updates to install install = salt.utils.win_update.Updates() installed_updates = [] for item in install_list.updates: if not salt.utils.data.is_true(item.IsInstalled): install.updates.Add(item) else: installed_updates.extend(""KB"" + kb for kb in item.KBArticleIDs) if install.count() == 0: ret[""comment""] = ""Updates already installed: "" ret[""comment""] += ""\n - "".join(installed_updates) return ret # Return comment of changes if test. if __opts__[""test""]: ret[""result""] = None ret[""comment""] = ""Updates will be installed:"" for update in install.updates: ret[""comment""] += ""\n"" ret[""comment""] += "": "".join([update.Identity.UpdateID, update.Title]) return ret # Download updates wua.download(download) # Install updates wua.install(install) # Refresh windows update info wua.refresh() post_info = wua.updates().list() # Verify the installation for item in install.list(): if not salt.utils.data.is_true(post_info[item][""Installed""]): ret[""changes""][""failed""] = { item: { ""Title"": post_info[item][""Title""], ""KBs"": post_info[item][""KBs""], } } ret[""result""] = False else: ret[""changes""][""installed""] = { item: { ""Title"": post_info[item][""Title""], ""NeedsReboot"": post_info[item][""NeedsReboot""], ""KBs"": post_info[item][""KBs""], } } if ret[""changes""].get(""failed"", False): ret[""comment""] = ""Updates failed"" else: ret[""comment""] = ""Updates installed successfully"" return ret ``` ###Assistant : Ensure Microsoft Updates are installed. Updates will be downloaded if needed. Args: name (str): The identifier of a single update to install. updates (list): A list of identifiers for updates to be installed. Overrides ``name``. Default is None. .. note:: Identifiers can be the GUID, the KB number, or any part of the Title of the Microsoft update. GUIDs and KBs are the preferred method to ensure you're installing the correct update. .. warning:: Using a partial KB number or a partial Title could result in more than one update being installed. Returns: dict: A dictionary containing the results of the update CLI Example: .. code-block:: yaml # using a GUID install_update: wua.installed: - name: 28cf1b09-2b1a-458c-9bd1-971d1b26b211 # using a KB install_update: wua.installed: - name: KB3194343 # using the full Title install_update: wua.installed: - name: Security Update for Adobe Flash Player for Windows 10 Version 1607 (for x64-based Systems) (KB3194343) # Install multiple updates install_updates: wua.installed: - updates: - KB3194343 - 28cf1b09-2b1a-458c-9bd1-971d1b26b211 " 938,"def _getKivyInformation(self): setup_codes = r info = self.queryRuntimeInformationMultiple( info_name=""kivy_info"", setup_codes=setup_codes, values=( (""libs_loaded"", ""kivy.core.image.libs_loaded""), (""window_impl"", ""kivy.core.window.window_impl""), (""label_libs"", ""kivy.core.text.label_libs""), (""sdl2_dep_bins"", ""sdl2_dep_bins""), (""glew_dep_bins"", ""glew_dep_bins""), ), ) if info is None: self.sysexit(""Error, it seems Kivy is not installed."") return info "," import kivy.core.image import kivy.core.text # Prevent Window from being created at compile time. kivy.core.core_select_lib=(lambda *args, **kwargs: None) import kivy.core.window # Kivy has packages designed to provide these on Windows try: from kivy_deps.sdl2 import dep_bins as sdl2_dep_bins except ImportError: sdl2_dep_bins = [] try: from kivy_deps.glew import dep_bins as glew_dep_bins except ImportError: glew_dep_bins = [] ",53,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _getKivyInformation(self): setup_codes = r info = self.queryRuntimeInformationMultiple( info_name=""kivy_info"", setup_codes=setup_codes, values=( (""libs_loaded"", ""kivy.core.image.libs_loaded""), (""window_impl"", ""kivy.core.window.window_impl""), (""label_libs"", ""kivy.core.text.label_libs""), (""sdl2_dep_bins"", ""sdl2_dep_bins""), (""glew_dep_bins"", ""glew_dep_bins""), ), ) if info is None: self.sysexit(""Error, it seems Kivy is not installed."") return info ``` ###Assistant : import kivy.core.image import kivy.core.text # Prevent Window from being created at compile time. kivy.core.core_select_lib=(lambda *args, **kwargs: None) import kivy.core.window # Kivy has packages designed to provide these on Windows try: from kivy_deps.sdl2 import dep_bins as sdl2_dep_bins except ImportError: sdl2_dep_bins = [] try: from kivy_deps.glew import dep_bins as glew_dep_bins except ImportError: glew_dep_bins = [] " 939,"def __iter__(self) -> Iterator: return iter(self._info_axis) # can we get a better explanation of this?"," Iterate over info axis. Returns ------- iterator Info axis as iterator. ",11,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __iter__(self) -> Iterator: return iter(self._info_axis) # can we get a better explanation of this? ``` ###Assistant : Iterate over info axis. Returns ------- iterator Info axis as iterator. " 940,"def cast(self, target_schema, *args, **kwargs): table = table_cast(self.table, target_schema, *args, **kwargs) blocks = [] for subtables in self.blocks: new_tables = [] fields = list(target_schema) for subtable in subtables: subfields = [] for name in subtable.column_names: subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name))) subschema = pa.schema(subfields) new_tables.append(subtable.cast(subschema, *args, **kwargs)) blocks.append(new_tables) return ConcatenationTable(table, blocks) "," Cast table values to another schema Args: target_schema (:obj:`Schema`): Schema to cast to, the names and order of fields must match safe (:obj:`bool`, defaults to :obj:`True`): Check for overflows or other unsafe conversions Returns: :class:`datasets.table.Table`: ",35,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cast(self, target_schema, *args, **kwargs): table = table_cast(self.table, target_schema, *args, **kwargs) blocks = [] for subtables in self.blocks: new_tables = [] fields = list(target_schema) for subtable in subtables: subfields = [] for name in subtable.column_names: subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name))) subschema = pa.schema(subfields) new_tables.append(subtable.cast(subschema, *args, **kwargs)) blocks.append(new_tables) return ConcatenationTable(table, blocks) ``` ###Assistant : Cast table values to another schema Args: target_schema (:obj:`Schema`): Schema to cast to, the names and order of fields must match safe (:obj:`bool`, defaults to :obj:`True`): Check for overflows or other unsafe conversions Returns: :class:`datasets.table.Table`: " 941,"def softmax(p, axis=None, temperature=1): if axis is None: axis = p.ndim - 1 if temperature == 0.: # NOTE: in case of multiple equal maxima, returns uniform distribution. p = p == np.max(p, axis=axis, keepdims=True) else: # oldp = p logp = np.log(p) logp /= temperature logp -= logp.max(axis=axis, keepdims=True) p = np.exp(logp) p /= p.sum(axis=axis, keepdims=True) if np.isnan(p).any(): pdb.set_trace() # pylint: disable=forgotten-debug-statement return p ","Apply the softmax transform to an array of categorical distributions. Args: p: an array of categorical probability vectors, possibly unnormalized. axis: the axis that spans the categories (default: -1). temperature: if not 1, transform the distribution by dividing the log probabilities and renormalizing. Values greater than 1 increase entropy, values less than 1 decrease entropy. A value of 0 yields a deterministic distribution that chooses the mode. Returns: An array of categorical probability vectors, like `p` but tempered and normalized. ",80,65,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def softmax(p, axis=None, temperature=1): if axis is None: axis = p.ndim - 1 if temperature == 0.: # NOTE: in case of multiple equal maxima, returns uniform distribution. p = p == np.max(p, axis=axis, keepdims=True) else: # oldp = p logp = np.log(p) logp /= temperature logp -= logp.max(axis=axis, keepdims=True) p = np.exp(logp) p /= p.sum(axis=axis, keepdims=True) if np.isnan(p).any(): pdb.set_trace() # pylint: disable=forgotten-debug-statement return p ``` ###Assistant : Apply the softmax transform to an array of categorical distributions. Args: p: an array of categorical probability vectors, possibly unnormalized. axis: the axis that spans the categories (default: -1). temperature: if not 1, transform the distribution by dividing the log probabilities and renormalizing. Values greater than 1 increase entropy, values less than 1 decrease entropy. A value of 0 yields a deterministic distribution that chooses the mode. Returns: An array of categorical probability vectors, like `p` but tempered and normalized. " 942,"def _populate_static_information(self) -> None: self.info[""ludwig_version""] = LUDWIG_VERSION self.info[""start_disk_usage""] = shutil.disk_usage(os.path.expanduser(""~"")).used # CPU information cpu_info = get_my_cpu_info() self.info[""cpu_architecture""] = cpu_info[""arch""] self.info[""num_cpu""] = psutil.cpu_count() self.info[""cpu_name""] = cpu_info[""brand_raw""] self.info[""total_cpu_memory_size""] = psutil.virtual_memory().total # GPU information if self.cuda_is_available: gpu_infos = get_gpu_info() for i, gpu_info in enumerate(gpu_infos): gpu_key = f""cuda_{i}"" self.info[f""{gpu_key}_memory_used""] = [] self.info[f""{gpu_key}_name""] = gpu_info[""name""] self.info[f""{gpu_key}_total_memory""] = gpu_info[""total_memory""] self.info[f""{gpu_key}_driver_version""] = gpu_info[""driver_version""] self.info[f""{gpu_key}_cuda_version""] = gpu_info[""cuda_version""] # recording in microseconds to be in line with torch profiler time recording. self.info[""start_time""] = time.perf_counter_ns() / 1000 ",Populate the report with static software and hardware information.,9,77,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _populate_static_information(self) -> None: self.info[""ludwig_version""] = LUDWIG_VERSION self.info[""start_disk_usage""] = shutil.disk_usage(os.path.expanduser(""~"")).used # CPU information cpu_info = get_my_cpu_info() self.info[""cpu_architecture""] = cpu_info[""arch""] self.info[""num_cpu""] = psutil.cpu_count() self.info[""cpu_name""] = cpu_info[""brand_raw""] self.info[""total_cpu_memory_size""] = psutil.virtual_memory().total # GPU information if self.cuda_is_available: gpu_infos = get_gpu_info() for i, gpu_info in enumerate(gpu_infos): gpu_key = f""cuda_{i}"" self.info[f""{gpu_key}_memory_used""] = [] self.info[f""{gpu_key}_name""] = gpu_info[""name""] self.info[f""{gpu_key}_total_memory""] = gpu_info[""total_memory""] self.info[f""{gpu_key}_driver_version""] = gpu_info[""driver_version""] self.info[f""{gpu_key}_cuda_version""] = gpu_info[""cuda_version""] # recording in microseconds to be in line with torch profiler time recording. self.info[""start_time""] = time.perf_counter_ns() / 1000 ``` ###Assistant : Populate the report with static software and hardware information. " 943,"def assertCanNotCreateAt(self, parent_model, child_model, msg=None): if self._testCanCreateAt(parent_model, child_model): msg = self._formatMessage( msg, ""Can create a %s.%s under a %s.%s"" % ( child_model._meta.app_label, child_model._meta.model_name, parent_model._meta.app_label, parent_model._meta.model_name, ), ) raise self.failureException(msg) "," Assert a particular child Page type can not be created under a parent Page type. ``parent_model`` and ``child_model`` should be the Page classes being tested. ",25,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def assertCanNotCreateAt(self, parent_model, child_model, msg=None): if self._testCanCreateAt(parent_model, child_model): msg = self._formatMessage( msg, ""Can create a %s.%s under a %s.%s"" % ( child_model._meta.app_label, child_model._meta.model_name, parent_model._meta.app_label, parent_model._meta.model_name, ), ) raise self.failureException(msg) ``` ###Assistant : Assert a particular child Page type can not be created under a parent Page type. ``parent_model`` and ``child_model`` should be the Page classes being tested. " 944,"def get_feature_objects(self) -> Mapping[Project, Feature]: cls = self._manager._get_feature_class(self.feature_name) return {obj: cls(self.feature_name, obj) for obj in self.objects} "," Iterate over individual Feature objects. This is a fallback mode for applying a FeatureHandler that doesn't support checking the entire batch at once. ",23,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_feature_objects(self) -> Mapping[Project, Feature]: cls = self._manager._get_feature_class(self.feature_name) return {obj: cls(self.feature_name, obj) for obj in self.objects} ``` ###Assistant : Iterate over individual Feature objects. This is a fallback mode for applying a FeatureHandler that doesn't support checking the entire batch at once. " 945,"def require_ffmpeg(test_case): import subprocess try: subprocess.check_output([""ffmpeg"", ""-h""], stderr=subprocess.DEVNULL) return test_case except Exception: return unittest.skip(""test requires ffmpeg"")(test_case) "," Decorator marking a test that requires FFmpeg. These tests are skipped when FFmpeg isn't installed. ",15,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def require_ffmpeg(test_case): import subprocess try: subprocess.check_output([""ffmpeg"", ""-h""], stderr=subprocess.DEVNULL) return test_case except Exception: return unittest.skip(""test requires ffmpeg"")(test_case) ``` ###Assistant : Decorator marking a test that requires FFmpeg. These tests are skipped when FFmpeg isn't installed. " 946,"def generate_random_string(): import random import string return """".join(random.choices(string.ascii_uppercase + string.digits, k=8)) random_string = generate_random_string() # [START create_queue] create_queue = CloudTasksQueueCreateOperator( location=LOCATION, task_queue=Queue(stackdriver_logging_config=dict(sampling_ratio=0.5)), queue_name=QUEUE_ID + ""{{ task_instance.xcom_pull(task_ids='random_string') }}"", retry=Retry(maximum=10.0), timeout=5, task_id=""create_queue"", ) # [END create_queue] # [START delete_queue] delete_queue = CloudTasksQueueDeleteOperator( location=LOCATION, queue_name=QUEUE_ID + ""{{ task_instance.xcom_pull(task_ids='random_string') }}"", task_id=""delete_queue"", ) # [END delete_queue] delete_queue.trigger_rule = TriggerRule.ALL_DONE # [START resume_queue] resume_queue = CloudTasksQueueResumeOperator( location=LOCATION, queue_name=QUEUE_ID + ""{{ task_instance.xcom_pull(task_ids='random_string') }}"", task_id=""resume_queue"", ) # [END resume_queue] # [START pause_queue] pause_queue = CloudTasksQueuePauseOperator( location=LOCATION, queue_name=QUEUE_ID + ""{{ task_instance.xcom_pull(task_ids='random_string') }}"", task_id=""pause_queue"", ) # [END pause_queue] # [START purge_queue] purge_queue = CloudTasksQueuePurgeOperator( location=LOCATION, queue_name=QUEUE_ID + ""{{ task_instance.xcom_pull(task_ids='random_string') }}"", task_id=""purge_queue"", ) # [END purge_queue] # [START get_queue] get_queue = CloudTasksQueueGetOperator( location=LOCATION, queue_name=QUEUE_ID + ""{{ task_instance.xcom_pull(task_ids='random_string') }}"", task_id=""get_queue"", ) get_queue_result = BashOperator( task_id=""get_queue_result"", bash_command=f""echo {get_queue.output}"", ) # [END get_queue] # [START update_queue] update_queue = CloudTasksQueueUpdateOperator( task_queue=Queue(stackdriver_logging_config=dict(sampling_ratio=1)), location=LOCATION, queue_name=QUEUE_ID + ""{{ task_instance.xcom_pull(task_ids='random_string') }}"", update_mask=FieldMask(paths=[""stackdriver_logging_config.sampling_ratio""]), task_id=""update_queue"", ) # [END update_queue] # [START list_queue] list_queue = CloudTasksQueuesListOperator(location=LOCATION, task_id=""list_queue"") # [END list_queue] chain( random_string, create_queue, update_queue, pause_queue, resume_queue, purge_queue, get_queue, get_queue_result, list_queue, delete_queue, ) from tests.system.utils.watcher import watcher # This test needs watcher in order to properly mark success/failure # when ""tearDown"" task with trigger rule is part of the DAG list(dag.tasks) >> watcher() from tests.system.utils import get_test_run # noqa: E402 # Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest) test_run = get_test_run(dag) "," Generate random string for queue and task names. Queue name cannot be repeated in preceding 7 days and task name in the last 1 hour. ",25,221,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def generate_random_string(): import random import string return """".join(random.choices(string.ascii_uppercase + string.digits, k=8)) random_string = generate_random_string() # [START create_queue] create_queue = CloudTasksQueueCreateOperator( location=LOCATION, task_queue=Queue(stackdriver_logging_config=dict(sampling_ratio=0.5)), queue_name=QUEUE_ID + ""{{ task_instance.xcom_pull(task_ids='random_string') }}"", retry=Retry(maximum=10.0), timeout=5, task_id=""create_queue"", ) # [END create_queue] # [START delete_queue] delete_queue = CloudTasksQueueDeleteOperator( location=LOCATION, queue_name=QUEUE_ID + ""{{ task_instance.xcom_pull(task_ids='random_string') }}"", task_id=""delete_queue"", ) # [END delete_queue] delete_queue.trigger_rule = TriggerRule.ALL_DONE # [START resume_queue] resume_queue = CloudTasksQueueResumeOperator( location=LOCATION, queue_name=QUEUE_ID + ""{{ task_instance.xcom_pull(task_ids='random_string') }}"", task_id=""resume_queue"", ) # [END resume_queue] # [START pause_queue] pause_queue = CloudTasksQueuePauseOperator( location=LOCATION, queue_name=QUEUE_ID + ""{{ task_instance.xcom_pull(task_ids='random_string') }}"", task_id=""pause_queue"", ) # [END pause_queue] # [START purge_queue] purge_queue = CloudTasksQueuePurgeOperator( location=LOCATION, queue_name=QUEUE_ID + ""{{ task_instance.xcom_pull(task_ids='random_string') }}"", task_id=""purge_queue"", ) # [END purge_queue] # [START get_queue] get_queue = CloudTasksQueueGetOperator( location=LOCATION, queue_name=QUEUE_ID + ""{{ task_instance.xcom_pull(task_ids='random_string') }}"", task_id=""get_queue"", ) get_queue_result = BashOperator( task_id=""get_queue_result"", bash_command=f""echo {get_queue.output}"", ) # [END get_queue] # [START update_queue] update_queue = CloudTasksQueueUpdateOperator( task_queue=Queue(stackdriver_logging_config=dict(sampling_ratio=1)), location=LOCATION, queue_name=QUEUE_ID + ""{{ task_instance.xcom_pull(task_ids='random_string') }}"", update_mask=FieldMask(paths=[""stackdriver_logging_config.sampling_ratio""]), task_id=""update_queue"", ) # [END update_queue] # [START list_queue] list_queue = CloudTasksQueuesListOperator(location=LOCATION, task_id=""list_queue"") # [END list_queue] chain( random_string, create_queue, update_queue, pause_queue, resume_queue, purge_queue, get_queue, get_queue_result, list_queue, delete_queue, ) from tests.system.utils.watcher import watcher # This test needs watcher in order to properly mark success/failure # when ""tearDown"" task with trigger rule is part of the DAG list(dag.tasks) >> watcher() from tests.system.utils import get_test_run # noqa: E402 # Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest) test_run = get_test_run(dag) ``` ###Assistant : Generate random string for queue and task names. Queue name cannot be repeated in preceding 7 days and task name in the last 1 hour. " 947,"def make_gradient_clipvalue_fn(clipvalue): if clipvalue is None: return lambda grads_and_vars: grads_and_vars ",Creates a gradient transformation function for clipping by value.,9,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def make_gradient_clipvalue_fn(clipvalue): if clipvalue is None: return lambda grads_and_vars: grads_and_vars ``` ###Assistant : Creates a gradient transformation function for clipping by value. " 948,"def odd_ext(x, n, axis=-1): if n < 1: return x if n > x.shape[axis] - 1: raise ValueError( f""The extension length n ({n}) is too big. "" f""It must not exceed x.shape[axis]-1, which is {x.shape[axis] - 1}."") left_end = lax.slice_in_dim(x, 0, 1, axis=axis) left_ext = jnp.flip(lax.slice_in_dim(x, 1, n + 1, axis=axis), axis=axis) right_end = lax.slice_in_dim(x, -1, None, axis=axis) right_ext = jnp.flip(lax.slice_in_dim(x, -(n + 1), -1, axis=axis), axis=axis) ext = jnp.concatenate((2 * left_end - left_ext, x, 2 * right_end - right_ext), axis=axis) return ext ","Extends `x` along with `axis` by odd-extension. This function was previously a part of ""scipy.signal.signaltools"" but is no longer exposed. Args: x : input array n : the number of points to be added to the both end axis: the axis to be extended ",44,83,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def odd_ext(x, n, axis=-1): if n < 1: return x if n > x.shape[axis] - 1: raise ValueError( f""The extension length n ({n}) is too big. "" f""It must not exceed x.shape[axis]-1, which is {x.shape[axis] - 1}."") left_end = lax.slice_in_dim(x, 0, 1, axis=axis) left_ext = jnp.flip(lax.slice_in_dim(x, 1, n + 1, axis=axis), axis=axis) right_end = lax.slice_in_dim(x, -1, None, axis=axis) right_ext = jnp.flip(lax.slice_in_dim(x, -(n + 1), -1, axis=axis), axis=axis) ext = jnp.concatenate((2 * left_end - left_ext, x, 2 * right_end - right_ext), axis=axis) return ext ``` ###Assistant : Extends `x` along with `axis` by odd-extension. This function was previously a part of ""scipy.signal.signaltools"" but is no longer exposed. Args: x : input array n : the number of points to be added to the both end axis: the axis to be extended " 949,"def filter_on_submodules(all_modules, submodule): filtered_modules = [ mod for mod in all_modules if PACKAGE + submodule in mod.__name__ ] return filtered_modules ","Filters all the modules based on the module flag. The module flag has to be relative to the core package imported. For example, if `submodule=keras.layers` then, this function will return all the modules in the submodule. Args: all_modules: All the modules in the core package. submodule: Submodule to filter from all the modules. Returns: All the modules in the submodule. ",60,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def filter_on_submodules(all_modules, submodule): filtered_modules = [ mod for mod in all_modules if PACKAGE + submodule in mod.__name__ ] return filtered_modules ``` ###Assistant : Filters all the modules based on the module flag. The module flag has to be relative to the core package imported. For example, if `submodule=keras.layers` then, this function will return all the modules in the submodule. Args: all_modules: All the modules in the core package. submodule: Submodule to filter from all the modules. Returns: All the modules in the submodule. " 950,"def get_pe_matching_query(amount_condition, account_from_to, transaction): # get matching payment entries query from_date = frappe.db.get_single_value(""Bank Reconciliation Tool"", ""bank_statement_from_date"") to_date = frappe.db.get_single_value(""Bank Reconciliation Tool"", ""bank_statement_to_date"") from_reference_date = frappe.db.get_single_value( ""Bank Reconciliation Tool"", ""from_reference_date"" ) to_reference_date = frappe.db.get_single_value(""Bank Reconciliation Tool"", ""to_reference_date"") filtered_by_reference_date = frappe.db.get_single_value( ""Bank Reconciliation Tool"", ""filtered_by_reference_date"" ) if transaction.deposit > 0: currency_field = ""paid_to_account_currency as currency"" else: currency_field = ""paid_from_account_currency as currency"" cond_filtered_from_ref_date = """" cond_filtered_to_ref_date = """" cond_filtered_from_posting_date = """" cond_filtered_to_posting_date = """" from_ref_date ="""" to_ref_date ="""" from_post_date = """" to_post_date = """" if(filtered_by_reference_date): cond_filtered_from_ref_date = "" AND reference_date >="" cond_filtered_to_ref_date = "" AND reference_date <="" from_ref_date = from_reference_date to_ref_date = to_reference_date elif(not filtered_by_reference_date): cond_filtered_from_posting_date = "" AND posting_date >="" cond_filtered_to_posting_date = "" AND posting_date <="" from_post_date = from_date to_post_date = to_date pe_data= f return pe_data "," SELECT (CASE WHEN reference_no=%(reference_no)s THEN 1 ELSE 0 END + CASE WHEN (party_type = %(party_type)s AND party = %(party)s ) THEN 1 ELSE 0 END + 1 ) AS rank, 'Payment Entry' as doctype, name, paid_amount, reference_no, reference_date, party, party_type, posting_date, {currency_field} FROM `tabPayment Entry` WHERE paid_amount {amount_condition} %(amount)s AND docstatus = 1 AND payment_type IN (%(payment_type)s, 'Internal Transfer') AND ifnull(clearance_date, '') = """" AND {account_from_to} = %(bank_account)s AND reference_no = '{transaction.reference_number}' {cond_filtered_from_ref_date} ""{from_ref_date}"" {cond_filtered_to_ref_date} ""{to_ref_date}"" {cond_filtered_from_posting_date} ""{from_post_date}"" {cond_filtered_to_posting_date} ""{to_post_date}"" ",80,124,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_pe_matching_query(amount_condition, account_from_to, transaction): # get matching payment entries query from_date = frappe.db.get_single_value(""Bank Reconciliation Tool"", ""bank_statement_from_date"") to_date = frappe.db.get_single_value(""Bank Reconciliation Tool"", ""bank_statement_to_date"") from_reference_date = frappe.db.get_single_value( ""Bank Reconciliation Tool"", ""from_reference_date"" ) to_reference_date = frappe.db.get_single_value(""Bank Reconciliation Tool"", ""to_reference_date"") filtered_by_reference_date = frappe.db.get_single_value( ""Bank Reconciliation Tool"", ""filtered_by_reference_date"" ) if transaction.deposit > 0: currency_field = ""paid_to_account_currency as currency"" else: currency_field = ""paid_from_account_currency as currency"" cond_filtered_from_ref_date = """" cond_filtered_to_ref_date = """" cond_filtered_from_posting_date = """" cond_filtered_to_posting_date = """" from_ref_date ="""" to_ref_date ="""" from_post_date = """" to_post_date = """" if(filtered_by_reference_date): cond_filtered_from_ref_date = "" AND reference_date >="" cond_filtered_to_ref_date = "" AND reference_date <="" from_ref_date = from_reference_date to_ref_date = to_reference_date elif(not filtered_by_reference_date): cond_filtered_from_posting_date = "" AND posting_date >="" cond_filtered_to_posting_date = "" AND posting_date <="" from_post_date = from_date to_post_date = to_date pe_data= f return pe_data ``` ###Assistant : SELECT (CASE WHEN reference_no=%(reference_no)s THEN 1 ELSE 0 END + CASE WHEN (party_type = %(party_type)s AND party = %(party)s ) THEN 1 ELSE 0 END + 1 ) AS rank, 'Payment Entry' as doctype, name, paid_amount, reference_no, reference_date, party, party_type, posting_date, {currency_field} FROM `tabPayment Entry` WHERE paid_amount {amount_condition} %(amount)s AND docstatus = 1 AND payment_type IN (%(payment_type)s, 'Internal Transfer') AND ifnull(clearance_date, '') = """" AND {account_from_to} = %(bank_account)s AND reference_no = '{transaction.reference_number}' {cond_filtered_from_ref_date} ""{from_ref_date}"" {cond_filtered_to_ref_date} ""{to_ref_date}"" {cond_filtered_from_posting_date} ""{from_post_date}"" {cond_filtered_to_posting_date} ""{to_post_date}"" " 951,"def closed(self) -> IntervalInclusiveType: warnings.warn( ""Attribute `closed` is deprecated in favor of `inclusive`."", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) return self.dtype.inclusive _interval_shared_docs[""set_closed""] = textwrap.dedent( ) "," String describing the inclusive side the intervals. Either ``left``, ``right``, ``both`` or ``neither`. Return an identical %(klass)s closed on the specified side. .. deprecated:: 1.5.0 Parameters ---------- closed : {'left', 'right', 'both', 'neither'} Whether the intervals are closed on the left-side, right-side, both or neither. Returns ------- new_index : %(klass)s %(examples)s\ ",51,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def closed(self) -> IntervalInclusiveType: warnings.warn( ""Attribute `closed` is deprecated in favor of `inclusive`."", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) return self.dtype.inclusive _interval_shared_docs[""set_closed""] = textwrap.dedent( ) ``` ###Assistant : String describing the inclusive side the intervals. Either ``left``, ``right``, ``both`` or ``neither`. Return an identical %(klass)s closed on the specified side. .. deprecated:: 1.5.0 Parameters ---------- closed : {'left', 'right', 'both', 'neither'} Whether the intervals are closed on the left-side, right-side, both or neither. Returns ------- new_index : %(klass)s %(examples)s\ " 952,"def as_real_imag(self, deep=True, **hints): from sympy.functions.elementary.trigonometric import cos, sin re, im = self.args[0].as_real_imag() if deep: re = re.expand(deep, **hints) im = im.expand(deep, **hints) cos, sin = cos(im), sin(im) return (exp(re)*cos, exp(re)*sin) "," Returns this function as a 2-tuple representing a complex number. Examples ======== >>> from sympy import I, exp >>> from sympy.abc import x >>> exp(x).as_real_imag() (exp(re(x))*cos(im(x)), exp(re(x))*sin(im(x))) >>> exp(1).as_real_imag() (E, 0) >>> exp(I).as_real_imag() (cos(1), sin(1)) >>> exp(1+I).as_real_imag() (E*cos(1), E*sin(1)) See Also ======== sympy.functions.elementary.complexes.re sympy.functions.elementary.complexes.im ",44,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def as_real_imag(self, deep=True, **hints): from sympy.functions.elementary.trigonometric import cos, sin re, im = self.args[0].as_real_imag() if deep: re = re.expand(deep, **hints) im = im.expand(deep, **hints) cos, sin = cos(im), sin(im) return (exp(re)*cos, exp(re)*sin) ``` ###Assistant : Returns this function as a 2-tuple representing a complex number. Examples ======== >>> from sympy import I, exp >>> from sympy.abc import x >>> exp(x).as_real_imag() (exp(re(x))*cos(im(x)), exp(re(x))*sin(im(x))) >>> exp(1).as_real_imag() (E, 0) >>> exp(I).as_real_imag() (cos(1), sin(1)) >>> exp(1+I).as_real_imag() (E*cos(1), E*sin(1)) See Also ======== sympy.functions.elementary.complexes.re sympy.functions.elementary.complexes.im " 953,"async def _get_conversation_ids_to_process(self) -> Set[Text]: conversation_ids_in_tracker_store = ( await self._get_conversation_ids_in_tracker() ) if not self.requested_conversation_ids: return conversation_ids_in_tracker_store self._validate_all_requested_ids_exist(conversation_ids_in_tracker_store) conversation_ids_to_process = conversation_ids_in_tracker_store & set( self.requested_conversation_ids ) if not conversation_ids_to_process: raise NoEventsToMigrateError( ""Could not find an overlap between the requested "" ""conversation IDs and those found in the tracker store. Exiting."" ) return conversation_ids_to_process ","Get conversation IDs that are good for processing. Finds the intersection of events that are contained in the tracker store with those events requested as a command-line argument. Returns: Conversation IDs that are both requested and contained in the tracker store. If no conversation IDs are requested, all conversation IDs in the tracker store are returned. ",56,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def _get_conversation_ids_to_process(self) -> Set[Text]: conversation_ids_in_tracker_store = ( await self._get_conversation_ids_in_tracker() ) if not self.requested_conversation_ids: return conversation_ids_in_tracker_store self._validate_all_requested_ids_exist(conversation_ids_in_tracker_store) conversation_ids_to_process = conversation_ids_in_tracker_store & set( self.requested_conversation_ids ) if not conversation_ids_to_process: raise NoEventsToMigrateError( ""Could not find an overlap between the requested "" ""conversation IDs and those found in the tracker store. Exiting."" ) return conversation_ids_to_process ``` ###Assistant : Get conversation IDs that are good for processing. Finds the intersection of events that are contained in the tracker store with those events requested as a command-line argument. Returns: Conversation IDs that are both requested and contained in the tracker store. If no conversation IDs are requested, all conversation IDs in the tracker store are returned. " 954,"def call(self, inputs, training=None, mask=None): raise NotImplementedError( ""Unimplemented `tf.keras.Model.call()`: if you "" ""intend to create a `Model` with the Functional "" ""API, please provide `inputs` and `outputs` "" ""arguments. Otherwise, subclass `Model` with an "" ""overridden `call()` method."" ) ","Calls the model on new inputs and returns the outputs as tensors. In this case `call()` just reapplies all ops in the graph to the new inputs (e.g. build a new computational graph from the provided inputs). Note: This method should not be called directly. It is only meant to be overridden when subclassing `tf.keras.Model`. To call a model on an input, always use the `__call__()` method, i.e. `model(inputs)`, which relies on the underlying `call()` method. Args: inputs: Input tensor, or dict/list/tuple of input tensors. training: Boolean or boolean scalar tensor, indicating whether to run the `Network` in training mode or inference mode. mask: A mask or list of masks. A mask can be either a boolean tensor or None (no mask). For more details, check the guide [here](https://www.tensorflow.org/guide/keras/masking_and_padding). Returns: A tensor if there is a single output, or a list of tensors if there are more than one outputs. ",150,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call(self, inputs, training=None, mask=None): raise NotImplementedError( ""Unimplemented `tf.keras.Model.call()`: if you "" ""intend to create a `Model` with the Functional "" ""API, please provide `inputs` and `outputs` "" ""arguments. Otherwise, subclass `Model` with an "" ""overridden `call()` method."" ) ``` ###Assistant : Calls the model on new inputs and returns the outputs as tensors. In this case `call()` just reapplies all ops in the graph to the new inputs (e.g. build a new computational graph from the provided inputs). Note: This method should not be called directly. It is only meant to be overridden when subclassing `tf.keras.Model`. To call a model on an input, always use the `__call__()` method, i.e. `model(inputs)`, which relies on the underlying `call()` method. Args: inputs: Input tensor, or dict/list/tuple of input tensors. training: Boolean or boolean scalar tensor, indicating whether to run the `Network` in training mode or inference mode. mask: A mask or list of masks. A mask can be either a boolean tensor or None (no mask). For more details, check the guide [here](https://www.tensorflow.org/guide/keras/masking_and_padding). Returns: A tensor if there is a single output, or a list of tensors if there are more than one outputs. " 955,"def _read(cls, path_or_buf, **kwargs): path_or_buf = cls.get_path_or_buffer(path_or_buf) if isinstance(path_or_buf, str): if not cls.file_exists(path_or_buf): return cls.single_worker_read(path_or_buf, **kwargs) path_or_buf = cls.get_path(path_or_buf) elif not cls.pathlib_or_pypath(path_or_buf): return cls.single_worker_read(path_or_buf, **kwargs) if not kwargs.get(""lines"", False): return cls.single_worker_read(path_or_buf, **kwargs) with OpenFile(path_or_buf, ""rb"") as f: columns = pandas.read_json(BytesIO(b"""" + f.readline()), lines=True).columns kwargs[""columns""] = columns empty_pd_df = pandas.DataFrame(columns=columns) with OpenFile(path_or_buf, ""rb"", kwargs.get(""compression"", ""infer"")) as f: partition_ids = [] index_ids = [] dtypes_ids = [] column_widths, num_splits = cls._define_metadata(empty_pd_df, columns) args = {""fname"": path_or_buf, ""num_splits"": num_splits, **kwargs} splits = cls.partitioned_file( f, num_partitions=NPartitions.get(), ) for start, end in splits: args.update({""start"": start, ""end"": end}) partition_id = cls.deploy(cls.parse, num_returns=num_splits + 3, **args) partition_ids.append(partition_id[:-3]) index_ids.append(partition_id[-3]) dtypes_ids.append(partition_id[-2]) # partition_id[-1] contains the columns for each partition, which will be useful # for implementing when `lines=False`. row_lengths = cls.materialize(index_ids) new_index = pandas.RangeIndex(sum(row_lengths)) dtypes = cls.get_dtypes(dtypes_ids) partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths) if isinstance(dtypes, pandas.Series): dtypes.index = columns else: dtypes = pandas.Series(dtypes, index=columns) new_frame = cls.frame_cls( np.array(partition_ids), new_index, columns, row_lengths, column_widths, dtypes=dtypes, ) new_frame.synchronize_labels(axis=0) return cls.query_compiler_cls(new_frame) "," Read data from `path_or_buf` according to the passed `read_json` `kwargs` parameters. Parameters ---------- path_or_buf : str, path object or file-like object `path_or_buf` parameter of `read_json` function. **kwargs : dict Parameters of `read_json` function. Returns ------- BaseQueryCompiler Query compiler with imported data for further processing. ",44,157,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _read(cls, path_or_buf, **kwargs): path_or_buf = cls.get_path_or_buffer(path_or_buf) if isinstance(path_or_buf, str): if not cls.file_exists(path_or_buf): return cls.single_worker_read(path_or_buf, **kwargs) path_or_buf = cls.get_path(path_or_buf) elif not cls.pathlib_or_pypath(path_or_buf): return cls.single_worker_read(path_or_buf, **kwargs) if not kwargs.get(""lines"", False): return cls.single_worker_read(path_or_buf, **kwargs) with OpenFile(path_or_buf, ""rb"") as f: columns = pandas.read_json(BytesIO(b"""" + f.readline()), lines=True).columns kwargs[""columns""] = columns empty_pd_df = pandas.DataFrame(columns=columns) with OpenFile(path_or_buf, ""rb"", kwargs.get(""compression"", ""infer"")) as f: partition_ids = [] index_ids = [] dtypes_ids = [] column_widths, num_splits = cls._define_metadata(empty_pd_df, columns) args = {""fname"": path_or_buf, ""num_splits"": num_splits, **kwargs} splits = cls.partitioned_file( f, num_partitions=NPartitions.get(), ) for start, end in splits: args.update({""start"": start, ""end"": end}) partition_id = cls.deploy(cls.parse, num_returns=num_splits + 3, **args) partition_ids.append(partition_id[:-3]) index_ids.append(partition_id[-3]) dtypes_ids.append(partition_id[-2]) # partition_id[-1] contains the columns for each partition, which will be useful # for implementing when `lines=False`. row_lengths = cls.materialize(index_ids) new_index = pandas.RangeIndex(sum(row_lengths)) dtypes = cls.get_dtypes(dtypes_ids) partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths) if isinstance(dtypes, pandas.Series): dtypes.index = columns else: dtypes = pandas.Series(dtypes, index=columns) new_frame = cls.frame_cls( np.array(partition_ids), new_index, columns, row_lengths, column_widths, dtypes=dtypes, ) new_frame.synchronize_labels(axis=0) return cls.query_compiler_cls(new_frame) ``` ###Assistant : Read data from `path_or_buf` according to the passed `read_json` `kwargs` parameters. Parameters ---------- path_or_buf : str, path object or file-like object `path_or_buf` parameter of `read_json` function. **kwargs : dict Parameters of `read_json` function. Returns ------- BaseQueryCompiler Query compiler with imported data for further processing. " 956,"def stop_ambient_camera_rotation(self, about=""theta""): about: str = about.lower() try: if config.renderer == RendererType.CAIRO: trackers = { ""theta"": self.camera.theta_tracker, ""phi"": self.camera.phi_tracker, ""gamma"": self.camera.gamma_tracker, } x: ValueTracker = trackers[about] x.clear_updaters() self.remove(x) elif config.renderer == RendererType.OPENGL: self.camera.clear_updaters() except Exception: raise ValueError(""Invalid ambient rotation angle."") "," This method stops all ambient camera rotation. ",7,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def stop_ambient_camera_rotation(self, about=""theta""): about: str = about.lower() try: if config.renderer == RendererType.CAIRO: trackers = { ""theta"": self.camera.theta_tracker, ""phi"": self.camera.phi_tracker, ""gamma"": self.camera.gamma_tracker, } x: ValueTracker = trackers[about] x.clear_updaters() self.remove(x) elif config.renderer == RendererType.OPENGL: self.camera.clear_updaters() except Exception: raise ValueError(""Invalid ambient rotation angle."") ``` ###Assistant : This method stops all ambient camera rotation. " 957,"def _get_bundled_aggregations(self) -> JsonDict: # Fetch the bundled aggregations of the event. channel = self.make_request( ""GET"", f""/_matrix/client/unstable/rooms/{self.room}/event/{self.parent_id}"", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) return channel.json_body[""unsigned""].get(""m.relations"", {}) "," Requests /event on the parent ID and returns the m.relations field (from unsigned), if it exists. ",16,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_bundled_aggregations(self) -> JsonDict: # Fetch the bundled aggregations of the event. channel = self.make_request( ""GET"", f""/_matrix/client/unstable/rooms/{self.room}/event/{self.parent_id}"", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) return channel.json_body[""unsigned""].get(""m.relations"", {}) ``` ###Assistant : Requests /event on the parent ID and returns the m.relations field (from unsigned), if it exists. " 958,"def get_memos(self) -> Dict[bytes32, List[bytes]]: memos: Dict[bytes32, List[bytes]] = {} for coin_spend in self.coin_spends: result = Program.from_bytes(bytes(coin_spend.puzzle_reveal)).run( Program.from_bytes(bytes(coin_spend.solution)) ) for condition in result.as_python(): if condition[0] == ConditionOpcode.CREATE_COIN and len(condition) >= 4: # If only 3 elements (opcode + 2 args), there is no memo, this is ph, amount coin_added = Coin(coin_spend.coin.name(), bytes32(condition[1]), int_from_bytes(condition[2])) if type(condition[3]) != list: # If it's not a list, it's not the correct format continue memos[coin_added.name()] = condition[3] return memos # Note that `coin_spends` used to have the bad name `coin_solutions`. # Some API still expects this name. For now, we accept both names. # # TODO: continue this deprecation. Eventually, all code below here should be removed. # 1. set `exclude_modern_keys` to `False` (and manually set to `True` where necessary) # 2. set `include_legacy_keys` to `False` (and manually set to `False` where necessary) # 3. remove all references to `include_legacy_keys=True` # 4. remove all code below this point "," Retrieves the memos for additions in this spend_bundle, which are formatted as a list in the 3rd parameter of CREATE_COIN. If there are no memos, the addition coin_id is not included. If they are not formatted as a list of bytes, they are not included. This is expensive to call, it should not be used in full node code. ",59,153,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_memos(self) -> Dict[bytes32, List[bytes]]: memos: Dict[bytes32, List[bytes]] = {} for coin_spend in self.coin_spends: result = Program.from_bytes(bytes(coin_spend.puzzle_reveal)).run( Program.from_bytes(bytes(coin_spend.solution)) ) for condition in result.as_python(): if condition[0] == ConditionOpcode.CREATE_COIN and len(condition) >= 4: # If only 3 elements (opcode + 2 args), there is no memo, this is ph, amount coin_added = Coin(coin_spend.coin.name(), bytes32(condition[1]), int_from_bytes(condition[2])) if type(condition[3]) != list: # If it's not a list, it's not the correct format continue memos[coin_added.name()] = condition[3] return memos # Note that `coin_spends` used to have the bad name `coin_solutions`. # Some API still expects this name. For now, we accept both names. # # TODO: continue this deprecation. Eventually, all code below here should be removed. # 1. set `exclude_modern_keys` to `False` (and manually set to `True` where necessary) # 2. set `include_legacy_keys` to `False` (and manually set to `False` where necessary) # 3. remove all references to `include_legacy_keys=True` # 4. remove all code below this point ``` ###Assistant : Retrieves the memos for additions in this spend_bundle, which are formatted as a list in the 3rd parameter of CREATE_COIN. If there are no memos, the addition coin_id is not included. If they are not formatted as a list of bytes, they are not included. This is expensive to call, it should not be used in full node code. " 959,"def _clean_url_path_part(part): # type: (str) -> str # We unquote prior to quoting to make sure nothing is double quoted. return urllib.parse.quote(urllib.parse.unquote(part)) "," Clean a ""part"" of a URL path (i.e. after splitting on ""@"" characters). ",13,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _clean_url_path_part(part): # type: (str) -> str # We unquote prior to quoting to make sure nothing is double quoted. return urllib.parse.quote(urllib.parse.unquote(part)) ``` ###Assistant : Clean a ""part"" of a URL path (i.e. after splitting on ""@"" characters). " 960,"def CheckAltTokens(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # Avoid preprocessor lines if Match(r'^\s*#', line): return # Last ditch effort to avoid multi-line comments. This will not help # if the comment started before the current line or ended after the # current line, but it catches most of the false positives. At least, # it provides a way to workaround this warning for people who use # multi-line comments in preprocessor macros. # # TODO(unknown): remove this once cpplint has better support for # multi-line comments. if line.find('/*') >= 0 or line.find('*/') >= 0: return for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): error(filename, linenum, 'readability/alt_tokens', 2, 'Use operator %s instead of %s' % ( _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))) ","Check alternative keywords being used in boolean expressions. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. ",40,114,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def CheckAltTokens(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # Avoid preprocessor lines if Match(r'^\s*#', line): return # Last ditch effort to avoid multi-line comments. This will not help # if the comment started before the current line or ended after the # current line, but it catches most of the false positives. At least, # it provides a way to workaround this warning for people who use # multi-line comments in preprocessor macros. # # TODO(unknown): remove this once cpplint has better support for # multi-line comments. if line.find('/*') >= 0 or line.find('*/') >= 0: return for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): error(filename, linenum, 'readability/alt_tokens', 2, 'Use operator %s instead of %s' % ( _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))) ``` ###Assistant : Check alternative keywords being used in boolean expressions. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. " 961,"def register_ray(): try: from ray.util.joblib.ray_backend import RayBackend register_parallel_backend(""ray"", RayBackend) except ImportError: msg = ( ""To use the ray backend you must install ray."" ""Try running 'pip install ray'."" ""See https://docs.ray.io/en/master/installation.html"" ""for more information."" ) raise ImportError(msg) __all__ = [""register_ray""] ","Register Ray Backend to be called with parallel_backend(""ray"").",8,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def register_ray(): try: from ray.util.joblib.ray_backend import RayBackend register_parallel_backend(""ray"", RayBackend) except ImportError: msg = ( ""To use the ray backend you must install ray."" ""Try running 'pip install ray'."" ""See https://docs.ray.io/en/master/installation.html"" ""for more information."" ) raise ImportError(msg) __all__ = [""register_ray""] ``` ###Assistant : Register Ray Backend to be called with parallel_backend(""ray""). " 962,"def get_group_permissions(self, user_obj, obj=None): return self._get_permissions(user_obj, obj, ""group"") "," Return a set of permission strings the user `user_obj` has from the groups they belong. ",15,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_group_permissions(self, user_obj, obj=None): return self._get_permissions(user_obj, obj, ""group"") ``` ###Assistant : Return a set of permission strings the user `user_obj` has from the groups they belong. " 963,"def get_evaluation_sets(self) -> List[dict]: return self.evaluation_set_client.get_evaluation_sets() "," Returns a list of uploaded evaluation sets to deepset cloud. :return: list of evaluation sets as dicts These contain (""name"", ""evaluation_set_id"", ""created_at"", ""matched_labels"", ""total_labels"") as fields. ",26,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_evaluation_sets(self) -> List[dict]: return self.evaluation_set_client.get_evaluation_sets() ``` ###Assistant : Returns a list of uploaded evaluation sets to deepset cloud. :return: list of evaluation sets as dicts These contain (""name"", ""evaluation_set_id"", ""created_at"", ""matched_labels"", ""total_labels"") as fields. " 964,"def allowlist_svg(dirty_xml): from lxml.html import clean allow_tags = [ 'xml', 'svg', 'circle', 'ellipse', 'line', 'path', 'polygon', 'polyline', 'rect' ] cleaner = clean.Cleaner( allow_tags=allow_tags, style=True, links=True, add_nofollow=False, page_structure=True, safe_attrs_only=False, remove_unknown_tags=False) clean_xml = cleaner.clean_html(dirty_xml) return clean_xml ","Filter out malicious/harmful content from SVG files by defining allowed tags ",11,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def allowlist_svg(dirty_xml): from lxml.html import clean allow_tags = [ 'xml', 'svg', 'circle', 'ellipse', 'line', 'path', 'polygon', 'polyline', 'rect' ] cleaner = clean.Cleaner( allow_tags=allow_tags, style=True, links=True, add_nofollow=False, page_structure=True, safe_attrs_only=False, remove_unknown_tags=False) clean_xml = cleaner.clean_html(dirty_xml) return clean_xml ``` ###Assistant : Filter out malicious/harmful content from SVG files by defining allowed tags " 965,"def _sort_filelist(self) -> None: for filename, image, alignments in self._iterator(): self.score_image(filename, image, alignments) self.sort() logger.debug(""sorted list: %s"", [r[0] if isinstance(r, (tuple, list)) else r for r in self._result]) "," Call the sort method's logic to populate the :attr:`_results` attribute. Put logic for scoring an individual frame in in :attr:`score_image` of the child Returns ------- list The sorted file. A list of tuples with the filename in the first position and score in the second position ",46,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _sort_filelist(self) -> None: for filename, image, alignments in self._iterator(): self.score_image(filename, image, alignments) self.sort() logger.debug(""sorted list: %s"", [r[0] if isinstance(r, (tuple, list)) else r for r in self._result]) ``` ###Assistant : Call the sort method's logic to populate the :attr:`_results` attribute. Put logic for scoring an individual frame in in :attr:`score_image` of the child Returns ------- list The sorted file. A list of tuples with the filename in the first position and score in the second position " 966,"def finalize_variable_values(self, var_list): if self.use_ema: # If the optimizer uses EMA, then when finalizing, we replace the model # variable value with its moving average stored inside optimizer. self._overwrite_model_variables_with_average_value(var_list) ","Set the final value of model's trainable variables. Sometimes there are some extra steps before ending the variable updates, such as overriding the model variables with its average value. Args: var_list: list of model variables. ",35,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def finalize_variable_values(self, var_list): if self.use_ema: # If the optimizer uses EMA, then when finalizing, we replace the model # variable value with its moving average stored inside optimizer. self._overwrite_model_variables_with_average_value(var_list) ``` ###Assistant : Set the final value of model's trainable variables. Sometimes there are some extra steps before ending the variable updates, such as overriding the model variables with its average value. Args: var_list: list of model variables. " 967,"async def wait_floating_requests_end(self): while self.total_num_floating_tasks_alive > 0: await asyncio.sleep(0) "," Await this coroutine to make sure that all the floating tasks that the request handler may bring are properly consumed ",20,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def wait_floating_requests_end(self): while self.total_num_floating_tasks_alive > 0: await asyncio.sleep(0) ``` ###Assistant : Await this coroutine to make sure that all the floating tasks that the request handler may bring are properly consumed " 968,"def _lu_impl(A, pivot=True, get_infos=False, out=None): # type: (Tensor, bool, bool, Any) -> Tuple[Tensor, Tensor, Tensor] r # If get_infos is True, then we don't need to check for errors and vice versa return torch._lu_with_info(A, pivot=pivot, check_errors=(not get_infos)) if TYPE_CHECKING: _ListOrSeq = Sequence[Tensor] else: _ListOrSeq = List[Tensor] ","Computes the LU factorization of a matrix or batches of matrices :attr:`A`. Returns a tuple containing the LU factorization and pivots of :attr:`A`. Pivoting is done if :attr:`pivot` is set to ``True``. .. note:: * The returned permutation matrix for every matrix in the batch is represented by a 1-indexed vector of size ``min(A.shape[-2], A.shape[-1])``. ``pivots[i] == j`` represents that in the ``i``-th step of the algorithm, the ``i``-th row was permuted with the ``j-1``-th row. * LU factorization with :attr:`pivot` = ``False`` is not available for CPU, and attempting to do so will throw an error. However, LU factorization with :attr:`pivot` = ``False`` is available for CUDA. * This function does not check if the factorization was successful or not if :attr:`get_infos` is ``True`` since the status of the factorization is present in the third element of the return tuple. * In the case of batches of square matrices with size less or equal to 32 on a CUDA device, the LU factorization is repeated for singular matrices due to the bug in the MAGMA library (see magma issue 13). * ``L``, ``U``, and ``P`` can be derived using :func:`torch.lu_unpack`. .. warning:: The gradients of this function will only be finite when :attr:`A` is full rank. This is because the LU decomposition is just differentiable at full rank matrices. Furthermore, if :attr:`A` is close to not being full rank, the gradient will be numerically unstable as it depends on the computation of :math:`L^{-1}` and :math:`U^{-1}`. Args: A (Tensor): the tensor to factor of size :math:`(*, m, n)` pivot (bool, optional): controls whether pivoting is done. Default: ``True`` get_infos (bool, optional): if set to ``True``, returns an info IntTensor. Default: ``False`` out (tuple, optional): optional output tuple. If :attr:`get_infos` is ``True``, then the elements in the tuple are Tensor, IntTensor, and IntTensor. If :attr:`get_infos` is ``False``, then the elements in the tuple are Tensor, IntTensor. Default: ``None`` Returns: (Tensor, IntTensor, IntTensor (optional)): A tuple of tensors containing - **factorization** (*Tensor*): the factorization of size :math:`(*, m, n)` - **pivots** (*IntTensor*): the pivots of size :math:`(*, \text{min}(m, n))`. ``pivots`` stores all the intermediate transpositions of rows. The final permutation ``perm`` could be reconstructed by applying ``swap(perm[i], perm[pivots[i] - 1])`` for ``i = 0, ..., pivots.size(-1) - 1``, where ``perm`` is initially the identity permutation of :math:`m` elements (essentially this is what :func:`torch.lu_unpack` is doing). - **infos** (*IntTensor*, *optional*): if :attr:`get_infos` is ``True``, this is a tensor of size :math:`(*)` where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed Example:: >>> A = torch.randn(2, 3, 3) >>> A_LU, pivots = torch.lu(A) >>> A_LU tensor([[[ 1.3506, 2.5558, -0.0816], [ 0.1684, 1.1551, 0.1940], [ 0.1193, 0.6189, -0.5497]], [[ 0.4526, 1.2526, -0.3285], [-0.7988, 0.7175, -0.9701], [ 0.2634, -0.9255, -0.3459]]]) >>> pivots tensor([[ 3, 3, 3], [ 3, 3, 3]], dtype=torch.int32) >>> A_LU, pivots, info = torch.lu(A, get_infos=True) >>> if info.nonzero().size(0) == 0: ... print('LU factorization succeeded for all samples!') LU factorization succeeded for all samples! ",497,46,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _lu_impl(A, pivot=True, get_infos=False, out=None): # type: (Tensor, bool, bool, Any) -> Tuple[Tensor, Tensor, Tensor] r # If get_infos is True, then we don't need to check for errors and vice versa return torch._lu_with_info(A, pivot=pivot, check_errors=(not get_infos)) if TYPE_CHECKING: _ListOrSeq = Sequence[Tensor] else: _ListOrSeq = List[Tensor] ``` ###Assistant : Computes the LU factorization of a matrix or batches of matrices :attr:`A`. Returns a tuple containing the LU factorization and pivots of :attr:`A`. Pivoting is done if :attr:`pivot` is set to ``True``. .. note:: * The returned permutation matrix for every matrix in the batch is represented by a 1-indexed vector of size ``min(A.shape[-2], A.shape[-1])``. ``pivots[i] == j`` represents that in the ``i``-th step of the algorithm, the ``i``-th row was permuted with the ``j-1``-th row. * LU factorization with :attr:`pivot` = ``False`` is not available for CPU, and attempting to do so will throw an error. However, LU factorization with :attr:`pivot` = ``False`` is available for CUDA. * This function does not check if the factorization was successful or not if :attr:`get_infos` is ``True`` since the status of the factorization is present in the third element of the return tuple. * In the case of batches of square matrices with size less or equal to 32 on a CUDA device, the LU factorization is repeated for singular matrices due to the bug in the MAGMA library (see magma issue 13). * ``L``, ``U``, and ``P`` can be derived using :func:`torch.lu_unpack`. .. warning:: The gradients of this function will only be finite when :attr:`A` is full rank. This is because the LU decomposition is just differentiable at full rank matrices. Furthermore, if :attr:`A` is close to not being full rank, the gradient will be numerically unstable as it depends on the computation of :math:`L^{-1}` and :math:`U^{-1}`. Args: A (Tensor): the tensor to factor of size :math:`(*, m, n)` pivot (bool, optional): controls whether pivoting is done. Default: ``True`` get_infos (bool, optional): if set to ``True``, returns an info IntTensor. Default: ``False`` out (tuple, optional): optional output tuple. If :attr:`get_infos` is ``True``, then the elements in the tuple are Tensor, IntTensor, and IntTensor. If :attr:`get_infos` is ``False``, then the elements in the tuple are Tensor, IntTensor. Default: ``None`` Returns: (Tensor, IntTensor, IntTensor (optional)): A tuple of tensors containing - **factorization** (*Tensor*): the factorization of size :math:`(*, m, n)` - **pivots** (*IntTensor*): the pivots of size :math:`(*, \text{min}(m, n))`. ``pivots`` stores all the intermediate transpositions of rows. The final permutation ``perm`` could be reconstructed by applying ``swap(perm[i], perm[pivots[i] - 1])`` for ``i = 0, ..., pivots.size(-1) - 1``, where ``perm`` is initially the identity permutation of :math:`m` elements (essentially this is what :func:`torch.lu_unpack` is doing). - **infos** (*IntTensor*, *optional*): if :attr:`get_infos` is ``True``, this is a tensor of size :math:`(*)` where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed Example:: >>> A = torch.randn(2, 3, 3) >>> A_LU, pivots = torch.lu(A) >>> A_LU tensor([[[ 1.3506, 2.5558, -0.0816], [ 0.1684, 1.1551, 0.1940], [ 0.1193, 0.6189, -0.5497]], [[ 0.4526, 1.2526, -0.3285], [-0.7988, 0.7175, -0.9701], [ 0.2634, -0.9255, -0.3459]]]) >>> pivots tensor([[ 3, 3, 3], [ 3, 3, 3]], dtype=torch.int32) >>> A_LU, pivots, info = torch.lu(A, get_infos=True) >>> if info.nonzero().size(0) == 0: ... print('LU factorization succeeded for all samples!') LU factorization succeeded for all samples! " 969,"def test_new_configs_appservice_worker(self) -> None: appservice_worker_config = self._make_worker_config( worker_app=""synapse.app.generic_worker"", worker_name=""worker1"" ) self.assertTrue( appservice_worker_config._should_this_worker_perform_duty( { ""notify_appservices_from_worker"": ""worker1"", }, ""notify_appservices"", ""synapse.app.appservice"", ""notify_appservices_from_worker"", ) ) self.assertFalse( appservice_worker_config._should_this_worker_perform_duty( { ""notify_appservices_from_worker"": ""worker2"", }, ""notify_appservices"", ""synapse.app.appservice"", ""notify_appservices_from_worker"", ) ) "," Tests new config options. This is for the worker's config. ",10,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_new_configs_appservice_worker(self) -> None: appservice_worker_config = self._make_worker_config( worker_app=""synapse.app.generic_worker"", worker_name=""worker1"" ) self.assertTrue( appservice_worker_config._should_this_worker_perform_duty( { ""notify_appservices_from_worker"": ""worker1"", }, ""notify_appservices"", ""synapse.app.appservice"", ""notify_appservices_from_worker"", ) ) self.assertFalse( appservice_worker_config._should_this_worker_perform_duty( { ""notify_appservices_from_worker"": ""worker2"", }, ""notify_appservices"", ""synapse.app.appservice"", ""notify_appservices_from_worker"", ) ) ``` ###Assistant : Tests new config options. This is for the worker's config. " 970,"def E_nl(n, Z=1): n, Z = S(n), S(Z) if n.is_integer and (n < 1): raise ValueError(""'n' must be positive integer"") return -Z**2/(2*n**2) "," Returns the energy of the state (n, l) in Hartree atomic units. The energy does not depend on ""l"". Parameters ========== n : integer Principal Quantum Number which is an integer with possible values as 1, 2, 3, 4,... Z : Atomic number (1 for Hydrogen, 2 for Helium, ...) Examples ======== >>> from sympy.physics.hydrogen import E_nl >>> from sympy.abc import n, Z >>> E_nl(n, Z) -Z**2/(2*n**2) >>> E_nl(1) -1/2 >>> E_nl(2) -1/8 >>> E_nl(3) -1/18 >>> E_nl(3, 47) -2209/18 ",80,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def E_nl(n, Z=1): n, Z = S(n), S(Z) if n.is_integer and (n < 1): raise ValueError(""'n' must be positive integer"") return -Z**2/(2*n**2) ``` ###Assistant : Returns the energy of the state (n, l) in Hartree atomic units. The energy does not depend on ""l"". Parameters ========== n : integer Principal Quantum Number which is an integer with possible values as 1, 2, 3, 4,... Z : Atomic number (1 for Hydrogen, 2 for Helium, ...) Examples ======== >>> from sympy.physics.hydrogen import E_nl >>> from sympy.abc import n, Z >>> E_nl(n, Z) -Z**2/(2*n**2) >>> E_nl(1) -1/2 >>> E_nl(2) -1/8 >>> E_nl(3) -1/18 >>> E_nl(3, 47) -2209/18 " 971,"def test_model_checkpoint_no_extraneous_invocations(tmpdir): model = LogInTwoMethods() num_epochs = 4 model_checkpoint = ModelCheckpointTestInvocations(monitor=""early_stop_on"", expected_count=num_epochs, save_top_k=-1) trainer = Trainer( strategy=""ddp_spawn"", accelerator=""cpu"", devices=2, default_root_dir=tmpdir, callbacks=[model_checkpoint], max_epochs=num_epochs, ) trainer.fit(model) assert trainer.state.finished, f""Training failed with {trainer.state}"" ",Test to ensure that the model callback saves the checkpoints only once in distributed mode.,15,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_model_checkpoint_no_extraneous_invocations(tmpdir): model = LogInTwoMethods() num_epochs = 4 model_checkpoint = ModelCheckpointTestInvocations(monitor=""early_stop_on"", expected_count=num_epochs, save_top_k=-1) trainer = Trainer( strategy=""ddp_spawn"", accelerator=""cpu"", devices=2, default_root_dir=tmpdir, callbacks=[model_checkpoint], max_epochs=num_epochs, ) trainer.fit(model) assert trainer.state.finished, f""Training failed with {trainer.state}"" ``` ###Assistant : Test to ensure that the model callback saves the checkpoints only once in distributed mode. " 972,"def find_module(self, fullname, path): warnings.warn(""MetaPathFinder.find_module() is deprecated since Python "" ""3.4 in favor of MetaPathFinder.find_spec() and is "" ""slated for removal in Python 3.12"", DeprecationWarning, stacklevel=2) if not hasattr(self, 'find_spec'): return None found = self.find_spec(fullname, path) return found.loader if found is not None else None ","Return a loader for the module. If no module is found, return None. The fullname is a str and the path is a list of strings or None. This method is deprecated since Python 3.4 in favor of finder.find_spec(). If find_spec() exists then backwards-compatible functionality is provided for this method. ",50,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_module(self, fullname, path): warnings.warn(""MetaPathFinder.find_module() is deprecated since Python "" ""3.4 in favor of MetaPathFinder.find_spec() and is "" ""slated for removal in Python 3.12"", DeprecationWarning, stacklevel=2) if not hasattr(self, 'find_spec'): return None found = self.find_spec(fullname, path) return found.loader if found is not None else None ``` ###Assistant : Return a loader for the module. If no module is found, return None. The fullname is a str and the path is a list of strings or None. This method is deprecated since Python 3.4 in favor of finder.find_spec(). If find_spec() exists then backwards-compatible functionality is provided for this method. " 973,"def _generate_individual(self, parameter_id): pos = -1 for i in range(len(self.population)): if self.population[i].result is None: pos = i break if pos != -1: indiv = copy.deepcopy(self.population[pos]) self.population.pop(pos) else: random.shuffle(self.population) # avoid only 1 individual has result if len(self.population) > 1 and self.population[0].result < self.population[1].result: self.population[0] = self.population[1] # mutation on the worse individual space = json2space(self.searchspace_json, self.population[0].config) is_rand = dict() mutation_pos = space[random.randint(0, len(space)-1)] for i in range(len(self.space)): is_rand[self.space[i]] = (self.space[i] == mutation_pos) config = json2parameter( self.searchspace_json, is_rand, self.random_state, self.population[0].config) if len(self.population) > 1: self.population.pop(1) indiv = Individual(config=config) # remove ""_index"" from config and save params-id self.running_trials[parameter_id] = indiv config = split_index(indiv.config) return config "," This function will generate the config for a trial. If at the first generation, randomly generates individuals to satisfy self.population_size. Otherwise, random choose a pair of individuals and compare their fitnesses. The worst of the pair will be removed. Copy the best of the pair and mutate it to generate a new individual. Parameters ---------- parameter_id : int Returns ------- dict A group of candidate parameters that evolution tuner generated. ",70,103,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _generate_individual(self, parameter_id): pos = -1 for i in range(len(self.population)): if self.population[i].result is None: pos = i break if pos != -1: indiv = copy.deepcopy(self.population[pos]) self.population.pop(pos) else: random.shuffle(self.population) # avoid only 1 individual has result if len(self.population) > 1 and self.population[0].result < self.population[1].result: self.population[0] = self.population[1] # mutation on the worse individual space = json2space(self.searchspace_json, self.population[0].config) is_rand = dict() mutation_pos = space[random.randint(0, len(space)-1)] for i in range(len(self.space)): is_rand[self.space[i]] = (self.space[i] == mutation_pos) config = json2parameter( self.searchspace_json, is_rand, self.random_state, self.population[0].config) if len(self.population) > 1: self.population.pop(1) indiv = Individual(config=config) # remove ""_index"" from config and save params-id self.running_trials[parameter_id] = indiv config = split_index(indiv.config) return config ``` ###Assistant : This function will generate the config for a trial. If at the first generation, randomly generates individuals to satisfy self.population_size. Otherwise, random choose a pair of individuals and compare their fitnesses. The worst of the pair will be removed. Copy the best of the pair and mutate it to generate a new individual. Parameters ---------- parameter_id : int Returns ------- dict A group of candidate parameters that evolution tuner generated. " 974,"async def connect(self): connection = {""client_id"": self.client_id, ""websocket"": self.websocket} logging.info(f""Connecting WebSocket: {connection}"") await self.websocket.accept() WSProgressHandler.instances.append(self) "," Called when a new client connects to the websocket. ",9,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def connect(self): connection = {""client_id"": self.client_id, ""websocket"": self.websocket} logging.info(f""Connecting WebSocket: {connection}"") await self.websocket.accept() WSProgressHandler.instances.append(self) ``` ###Assistant : Called when a new client connects to the websocket. " 975,"def prepare_test_img(self, idx): img_info = self.data_infos[idx] results = dict(img_info=img_info) if self.proposals is not None: results['proposals'] = self.proposals[idx] self.pre_pipeline(results) return self.pipeline(results) ","Get testing data after pipeline. Args: idx (int): Index of data. Returns: dict: Testing data after pipeline with new keys introduced by \ pipeline. ",24,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def prepare_test_img(self, idx): img_info = self.data_infos[idx] results = dict(img_info=img_info) if self.proposals is not None: results['proposals'] = self.proposals[idx] self.pre_pipeline(results) return self.pipeline(results) ``` ###Assistant : Get testing data after pipeline. Args: idx (int): Index of data. Returns: dict: Testing data after pipeline with new keys introduced by \ pipeline. " 976,"def get_data(filters): data = [] conditions = get_conditions(filters) salary_slips = frappe.db.sql( % (conditions), as_dict=1, ) component_type_dict = frappe._dict( frappe.db.sql( ) ) if not len(component_type_dict): return [] entry = frappe.db.sql( % (conditions, "", "".join([""%s""] * len(component_type_dict))), tuple(component_type_dict.keys()), as_dict=1, ) data_list = prepare_data(entry, component_type_dict) for d in salary_slips: total = 0 if data_list.get(d.name): employee = { ""employee"": data_list.get(d.name).get(""employee""), ""employee_name"": data_list.get(d.name).get(""employee_name""), ""pf_account"": data_list.get(d.name).get(""pf_account""), } if data_list.get(d.name).get(""Provident Fund""): employee[""pf_amount""] = data_list.get(d.name).get(""Provident Fund"") total += data_list.get(d.name).get(""Provident Fund"") if data_list.get(d.name).get(""Additional Provident Fund""): employee[""additional_pf""] = data_list.get(d.name).get(""Additional Provident Fund"") total += data_list.get(d.name).get(""Additional Provident Fund"") if data_list.get(d.name).get(""Provident Fund Loan""): employee[""pf_loan""] = data_list.get(d.name).get(""Provident Fund Loan"") total += data_list.get(d.name).get(""Provident Fund Loan"") employee[""total""] = total data.append(employee) return data @frappe.whitelist()"," select sal.name from `tabSalary Slip` sal where docstatus = 1 %s select name, component_type from `tabSalary Component` where component_type in ('Provident Fund', 'Additional Provident Fund', 'Provident Fund Loan') select sal.name, sal.employee, sal.employee_name, ded.salary_component, ded.amount from `tabSalary Slip` sal, `tabSalary Detail` ded where sal.name = ded.parent and ded.parentfield = 'deductions' and ded.parenttype = 'Salary Slip' and sal.docstatus = 1 %s and ded.salary_component in (%s) ",63,107,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_data(filters): data = [] conditions = get_conditions(filters) salary_slips = frappe.db.sql( % (conditions), as_dict=1, ) component_type_dict = frappe._dict( frappe.db.sql( ) ) if not len(component_type_dict): return [] entry = frappe.db.sql( % (conditions, "", "".join([""%s""] * len(component_type_dict))), tuple(component_type_dict.keys()), as_dict=1, ) data_list = prepare_data(entry, component_type_dict) for d in salary_slips: total = 0 if data_list.get(d.name): employee = { ""employee"": data_list.get(d.name).get(""employee""), ""employee_name"": data_list.get(d.name).get(""employee_name""), ""pf_account"": data_list.get(d.name).get(""pf_account""), } if data_list.get(d.name).get(""Provident Fund""): employee[""pf_amount""] = data_list.get(d.name).get(""Provident Fund"") total += data_list.get(d.name).get(""Provident Fund"") if data_list.get(d.name).get(""Additional Provident Fund""): employee[""additional_pf""] = data_list.get(d.name).get(""Additional Provident Fund"") total += data_list.get(d.name).get(""Additional Provident Fund"") if data_list.get(d.name).get(""Provident Fund Loan""): employee[""pf_loan""] = data_list.get(d.name).get(""Provident Fund Loan"") total += data_list.get(d.name).get(""Provident Fund Loan"") employee[""total""] = total data.append(employee) return data @frappe.whitelist() ``` ###Assistant : select sal.name from `tabSalary Slip` sal where docstatus = 1 %s select name, component_type from `tabSalary Component` where component_type in ('Provident Fund', 'Additional Provident Fund', 'Provident Fund Loan') select sal.name, sal.employee, sal.employee_name, ded.salary_component, ded.amount from `tabSalary Slip` sal, `tabSalary Detail` ded where sal.name = ded.parent and ded.parentfield = 'deductions' and ded.parenttype = 'Salary Slip' and sal.docstatus = 1 %s and ded.salary_component in (%s) " 977,"def as_dict(self) -> dict[str, Any]: return { ""extended_dict"": self.as_extended_dict(), ""short_dict"": self.as_short_dict(), } ",Return an dictionary version of this ActionTrace for saving.,9,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def as_dict(self) -> dict[str, Any]: return { ""extended_dict"": self.as_extended_dict(), ""short_dict"": self.as_short_dict(), } ``` ###Assistant : Return an dictionary version of this ActionTrace for saving. " 978,"def deserialize(config, custom_objects=None, **kwargs): # loss_scale_optimizer has a direct dependency of optimizer, import here # rather than top to avoid the cyclic dependency. from keras.mixed_precision import ( loss_scale_optimizer, ) use_legacy_optimizer = kwargs.pop(""use_legacy_optimizer"", False) if len(config[""config""]) > 0: # If the optimizer config is not empty, then we use the value of # `is_legacy_optimizer` to override `use_legacy_optimizer`. If # `is_legacy_optimizer` does not exist in config, it means we are # using the legacy optimzier. use_legacy_optimizer = config[""config""].get(""is_legacy_optimizer"", True) if ( tf.__internal__.tf2.enabled() and tf.executing_eagerly() and not use_legacy_optimizer ): all_classes = { ""adadelta"": adadelta_experimental.Adadelta, ""adagrad"": adagrad_experimental.Adagrad, ""adam"": adam_experimental.Adam, ""adamax"": adamax_experimental.Adamax, ""experimentaladadelta"": adadelta_experimental.Adadelta, ""experimentaladagrad"": adagrad_experimental.Adagrad, ""experimentaladam"": adam_experimental.Adam, ""experimentalsgd"": sgd_experimental.SGD, ""nadam"": nadam_experimental.Nadam, ""rmsprop"": rmsprop_experimental.RMSprop, ""sgd"": sgd_experimental.SGD, ""ftrl"": ftrl_experimental.Ftrl, ""lossscaleoptimizer"": loss_scale_optimizer.LossScaleOptimizerV3, ""lossscaleoptimizerv3"": loss_scale_optimizer.LossScaleOptimizerV3, # LossScaleOptimizerV1 was an old version of LSO that was removed. # Deserializing it turns it into a LossScaleOptimizer ""lossscaleoptimizerv1"": loss_scale_optimizer.LossScaleOptimizer, } else: all_classes = { ""adadelta"": adadelta_v2.Adadelta, ""adagrad"": adagrad_v2.Adagrad, ""adam"": adam_v2.Adam, ""adamax"": adamax_v2.Adamax, ""experimentaladadelta"": adadelta_experimental.Adadelta, ""experimentaladagrad"": adagrad_experimental.Adagrad, ""experimentaladam"": adam_experimental.Adam, ""experimentalsgd"": sgd_experimental.SGD, ""nadam"": nadam_v2.Nadam, ""rmsprop"": rmsprop_v2.RMSprop, ""sgd"": gradient_descent_v2.SGD, ""ftrl"": ftrl_v2.Ftrl, ""lossscaleoptimizer"": loss_scale_optimizer.LossScaleOptimizer, ""lossscaleoptimizerv3"": loss_scale_optimizer.LossScaleOptimizerV3, # LossScaleOptimizerV1 was an old version of LSO that was removed. # Deserializing it turns it into a LossScaleOptimizer ""lossscaleoptimizerv1"": loss_scale_optimizer.LossScaleOptimizer, } # Make deserialization case-insensitive for built-in optimizers. if config[""class_name""].lower() in all_classes: config[""class_name""] = config[""class_name""].lower() return deserialize_keras_object( config, module_objects=all_classes, custom_objects=custom_objects, printable_module_name=""optimizer"", ) @keras_export( ""keras.__internal__.optimizers.convert_to_legacy_optimizer"", v1=[] )","Inverse of the `serialize` function. Args: config: Optimizer configuration dictionary. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras Optimizer instance. ",32,218,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def deserialize(config, custom_objects=None, **kwargs): # loss_scale_optimizer has a direct dependency of optimizer, import here # rather than top to avoid the cyclic dependency. from keras.mixed_precision import ( loss_scale_optimizer, ) use_legacy_optimizer = kwargs.pop(""use_legacy_optimizer"", False) if len(config[""config""]) > 0: # If the optimizer config is not empty, then we use the value of # `is_legacy_optimizer` to override `use_legacy_optimizer`. If # `is_legacy_optimizer` does not exist in config, it means we are # using the legacy optimzier. use_legacy_optimizer = config[""config""].get(""is_legacy_optimizer"", True) if ( tf.__internal__.tf2.enabled() and tf.executing_eagerly() and not use_legacy_optimizer ): all_classes = { ""adadelta"": adadelta_experimental.Adadelta, ""adagrad"": adagrad_experimental.Adagrad, ""adam"": adam_experimental.Adam, ""adamax"": adamax_experimental.Adamax, ""experimentaladadelta"": adadelta_experimental.Adadelta, ""experimentaladagrad"": adagrad_experimental.Adagrad, ""experimentaladam"": adam_experimental.Adam, ""experimentalsgd"": sgd_experimental.SGD, ""nadam"": nadam_experimental.Nadam, ""rmsprop"": rmsprop_experimental.RMSprop, ""sgd"": sgd_experimental.SGD, ""ftrl"": ftrl_experimental.Ftrl, ""lossscaleoptimizer"": loss_scale_optimizer.LossScaleOptimizerV3, ""lossscaleoptimizerv3"": loss_scale_optimizer.LossScaleOptimizerV3, # LossScaleOptimizerV1 was an old version of LSO that was removed. # Deserializing it turns it into a LossScaleOptimizer ""lossscaleoptimizerv1"": loss_scale_optimizer.LossScaleOptimizer, } else: all_classes = { ""adadelta"": adadelta_v2.Adadelta, ""adagrad"": adagrad_v2.Adagrad, ""adam"": adam_v2.Adam, ""adamax"": adamax_v2.Adamax, ""experimentaladadelta"": adadelta_experimental.Adadelta, ""experimentaladagrad"": adagrad_experimental.Adagrad, ""experimentaladam"": adam_experimental.Adam, ""experimentalsgd"": sgd_experimental.SGD, ""nadam"": nadam_v2.Nadam, ""rmsprop"": rmsprop_v2.RMSprop, ""sgd"": gradient_descent_v2.SGD, ""ftrl"": ftrl_v2.Ftrl, ""lossscaleoptimizer"": loss_scale_optimizer.LossScaleOptimizer, ""lossscaleoptimizerv3"": loss_scale_optimizer.LossScaleOptimizerV3, # LossScaleOptimizerV1 was an old version of LSO that was removed. # Deserializing it turns it into a LossScaleOptimizer ""lossscaleoptimizerv1"": loss_scale_optimizer.LossScaleOptimizer, } # Make deserialization case-insensitive for built-in optimizers. if config[""class_name""].lower() in all_classes: config[""class_name""] = config[""class_name""].lower() return deserialize_keras_object( config, module_objects=all_classes, custom_objects=custom_objects, printable_module_name=""optimizer"", ) @keras_export( ""keras.__internal__.optimizers.convert_to_legacy_optimizer"", v1=[] ) ``` ###Assistant : Inverse of the `serialize` function. Args: config: Optimizer configuration dictionary. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras Optimizer instance. " 979,"def multiply(self, a, b): a = _convert_other(a, raiseit=True) r = a.__mul__(b, context=self) if r is NotImplemented: raise TypeError(""Unable to convert %s to Decimal"" % b) else: return r ","multiply multiplies two operands. If either operand is a special value then the general rules apply. Otherwise, the operands are multiplied together ('long multiplication'), resulting in a number which may be as long as the sum of the lengths of the two operands. >>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3')) Decimal('3.60') >>> ExtendedContext.multiply(Decimal('7'), Decimal('3')) Decimal('21') >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8')) Decimal('0.72') >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0')) Decimal('-0.0') >>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321')) Decimal('4.28135971E+11') >>> ExtendedContext.multiply(7, 7) Decimal('49') >>> ExtendedContext.multiply(Decimal(7), 7) Decimal('49') >>> ExtendedContext.multiply(7, Decimal(7)) Decimal('49') ",75,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def multiply(self, a, b): a = _convert_other(a, raiseit=True) r = a.__mul__(b, context=self) if r is NotImplemented: raise TypeError(""Unable to convert %s to Decimal"" % b) else: return r ``` ###Assistant : multiply multiplies two operands. If either operand is a special value then the general rules apply. Otherwise, the operands are multiplied together ('long multiplication'), resulting in a number which may be as long as the sum of the lengths of the two operands. >>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3')) Decimal('3.60') >>> ExtendedContext.multiply(Decimal('7'), Decimal('3')) Decimal('21') >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8')) Decimal('0.72') >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0')) Decimal('-0.0') >>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321')) Decimal('4.28135971E+11') >>> ExtendedContext.multiply(7, 7) Decimal('49') >>> ExtendedContext.multiply(Decimal(7), 7) Decimal('49') >>> ExtendedContext.multiply(7, Decimal(7)) Decimal('49') " 980,"def test_run_cleanup_skip_archive(self, cleanup_table_mock, kwargs, should_skip): run_cleanup( clean_before_timestamp=None, table_names=['log'], dry_run=None, verbose=None, confirm=False, **kwargs, ) assert cleanup_table_mock.call_args[1]['skip_archive'] is should_skip ",test that delete confirmation input is called when appropriate,9,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_run_cleanup_skip_archive(self, cleanup_table_mock, kwargs, should_skip): run_cleanup( clean_before_timestamp=None, table_names=['log'], dry_run=None, verbose=None, confirm=False, **kwargs, ) assert cleanup_table_mock.call_args[1]['skip_archive'] is should_skip ``` ###Assistant : test that delete confirmation input is called when appropriate " 981,"def process(self) -> None: if not self.is_downloaded(): self.download() self.process_downloaded_dataset() ",Process the dataset into a dataframe and save it at self.processed_dataset_path.,11,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def process(self) -> None: if not self.is_downloaded(): self.download() self.process_downloaded_dataset() ``` ###Assistant : Process the dataset into a dataframe and save it at self.processed_dataset_path. " 982,"def test_healthy_only_works_for_list_of_functions(self): actors = [Actor.remote(i) for i in range(4)] manager = FaultTolerantActorManager(actors=actors) # Mark first and second actor as unhealthy. manager.set_actor_state(1, False) manager.set_actor_state(2, False) ",Test healthy only mode works when a list of funcs are provided.,12,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_healthy_only_works_for_list_of_functions(self): actors = [Actor.remote(i) for i in range(4)] manager = FaultTolerantActorManager(actors=actors) # Mark first and second actor as unhealthy. manager.set_actor_state(1, False) manager.set_actor_state(2, False) ``` ###Assistant : Test healthy only mode works when a list of funcs are provided. " 983,"def test_unassignment(self, mock_func): notification = UnassignedActivityNotification( Activity( project=self.project, group=self.group, user=self.user, type=ActivityType.ASSIGNED, data={""assignee"": """"}, ) ) with self.tasks(): notification.send() attachment, text = get_attachment() assert text == f""Issue unassigned by {self.name}"" assert attachment[""title""] == self.group.title assert ( attachment[""footer""] == f""{self.project.slug} | "" ) "," Test that a Slack message is sent with the expected payload when an issue is unassigned ",16,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_unassignment(self, mock_func): notification = UnassignedActivityNotification( Activity( project=self.project, group=self.group, user=self.user, type=ActivityType.ASSIGNED, data={""assignee"": """"}, ) ) with self.tasks(): notification.send() attachment, text = get_attachment() assert text == f""Issue unassigned by {self.name}"" assert attachment[""title""] == self.group.title assert ( attachment[""footer""] == f""{self.project.slug} | "" ) ``` ###Assistant : Test that a Slack message is sent with the expected payload when an issue is unassigned " 984,"def print_help(self): has_ticker_start = ""[unvl]"" if not self.ticker else """" has_ticker_end = ""[/unvl]"" if not self.ticker else """" help_text = f console.print(text=help_text, menu=""Stocks - Insider Trading"") ","Print help[cmds] view view available presets set set one of the available presets[/cmds] [param]PRESET: [/param]{self.preset}[cmds] filter filter insiders based on preset [src][Open Insider][/src] load load a specific stock ticker for analysis[/cmds] {has_ticker_start} [param]Ticker: [/param]{self.ticker} stats insider stats of the company [src][Open Insider][/src] act insider activity over time [src][Business Insider][/src] lins last insider trading of the company [src][Finviz][/src] {has_ticker_end} [info]Latest Insiders[/info] [src][Open Insider][/src][cmds] lcb latest cluster boys lpsb latest penny stock buys lit latest insider trading (all filings) lip latest insider purchases blip big latest insider purchases ($25k+) blop big latest officer purchases ($25k+) blcp big latest CEO/CFO purchases ($25k+) lis latest insider sales blis big latest insider sales ($100k+) blos big latest officer sales ($100k+) blcs big latest CEO/CFO sales ($100k+) [info]Top Insiders [src][Open Insider][/src][/info] topt top officer purchases today toppw top officer purchases past week toppm top officer purchases past month tipt top insider purchases today tippw top insider purchases past week tippm top insider purchases past month tist top insider sales today tispw top insider sales past week tispm top insider sales past month[/cmds] ",176,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def print_help(self): has_ticker_start = ""[unvl]"" if not self.ticker else """" has_ticker_end = ""[/unvl]"" if not self.ticker else """" help_text = f console.print(text=help_text, menu=""Stocks - Insider Trading"") ``` ###Assistant : Print help[cmds] view view available presets set set one of the available presets[/cmds] [param]PRESET: [/param]{self.preset}[cmds] filter filter insiders based on preset [src][Open Insider][/src] load load a specific stock ticker for analysis[/cmds] {has_ticker_start} [param]Ticker: [/param]{self.ticker} stats insider stats of the company [src][Open Insider][/src] act insider activity over time [src][Business Insider][/src] lins last insider trading of the company [src][Finviz][/src] {has_ticker_end} [info]Latest Insiders[/info] [src][Open Insider][/src][cmds] lcb latest cluster boys lpsb latest penny stock buys lit latest insider trading (all filings) lip latest insider purchases blip big latest insider purchases ($25k+) blop big latest officer purchases ($25k+) blcp big latest CEO/CFO purchases ($25k+) lis latest insider sales blis big latest insider sales ($100k+) blos big latest officer sales ($100k+) blcs big latest CEO/CFO sales ($100k+) [info]Top Insiders [src][Open Insider][/src][/info] topt top officer purchases today toppw top officer purchases past week toppm top officer purchases past month tipt top insider purchases today tippw top insider purchases past week tippm top insider purchases past month tist top insider sales today tispw top insider sales past week tispm top insider sales past month[/cmds] " 985,"def test_project_config_dynamic_sampling_is_none(default_project): default_project.update_option(""sentry:dynamic_sampling"", None) with Feature({""organizations:server-side-sampling"": True}): cfg = get_project_config(default_project) cfg = cfg.to_dict() dynamic_sampling = get_path(cfg, ""config"", ""dynamicSampling"") assert dynamic_sampling is None @pytest.mark.django_db"," Tests test check inc-237 that dynamic sampling is None, so it's pass when we have fix and fails when we dont ",21,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_project_config_dynamic_sampling_is_none(default_project): default_project.update_option(""sentry:dynamic_sampling"", None) with Feature({""organizations:server-side-sampling"": True}): cfg = get_project_config(default_project) cfg = cfg.to_dict() dynamic_sampling = get_path(cfg, ""config"", ""dynamicSampling"") assert dynamic_sampling is None @pytest.mark.django_db ``` ###Assistant : Tests test check inc-237 that dynamic sampling is None, so it's pass when we have fix and fails when we dont " 986,"def seek(self, offset, whence=io.SEEK_SET): self._check_can_seek() return self._buffer.seek(offset, whence) ","Change the file position. The new position is specified by offset, relative to the position indicated by whence. Values for whence are: 0: start of stream (default); offset must not be negative 1: current stream position 2: end of stream; offset must not be positive Returns the new file position. Note that seeking is emulated, so depending on the parameters, this operation may be extremely slow. ",66,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def seek(self, offset, whence=io.SEEK_SET): self._check_can_seek() return self._buffer.seek(offset, whence) ``` ###Assistant : Change the file position. The new position is specified by offset, relative to the position indicated by whence. Values for whence are: 0: start of stream (default); offset must not be negative 1: current stream position 2: end of stream; offset must not be positive Returns the new file position. Note that seeking is emulated, so depending on the parameters, this operation may be extremely slow. " 987,"def savepoint(self): if not self._savepoint_allowed(): return thread_ident = _thread.get_ident() tid = str(thread_ident).replace(""-"", """") self.savepoint_state += 1 sid = ""s%s_x%d"" % (tid, self.savepoint_state) self.validate_thread_sharing() self._savepoint(sid) return sid "," Create a savepoint inside the current transaction. Return an identifier for the savepoint that will be used for the subsequent rollback or commit. Do nothing if savepoints are not supported. ",30,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def savepoint(self): if not self._savepoint_allowed(): return thread_ident = _thread.get_ident() tid = str(thread_ident).replace(""-"", """") self.savepoint_state += 1 sid = ""s%s_x%d"" % (tid, self.savepoint_state) self.validate_thread_sharing() self._savepoint(sid) return sid ``` ###Assistant : Create a savepoint inside the current transaction. Return an identifier for the savepoint that will be used for the subsequent rollback or commit. Do nothing if savepoints are not supported. " 988,"def get_unclaimed_expese_claims(filters): cond = ""1=1"" if filters.get(""employee""): cond = ""ec.employee = %(employee)s"" return frappe.db.sql( .format( cond=cond ), filters, as_list=1, ) "," select ec.employee, ec.employee_name, ec.name, ec.total_sanctioned_amount, ec.total_amount_reimbursed, sum(gle.credit_in_account_currency - gle.debit_in_account_currency) as outstanding_amt from `tabExpense Claim` ec, `tabGL Entry` gle where gle.against_voucher_type = ""Expense Claim"" and gle.against_voucher = ec.name and gle.party is not null and ec.docstatus = 1 and ec.is_paid = 0 and {cond} group by ec.name having outstanding_amt > 0 ",49,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_unclaimed_expese_claims(filters): cond = ""1=1"" if filters.get(""employee""): cond = ""ec.employee = %(employee)s"" return frappe.db.sql( .format( cond=cond ), filters, as_list=1, ) ``` ###Assistant : select ec.employee, ec.employee_name, ec.name, ec.total_sanctioned_amount, ec.total_amount_reimbursed, sum(gle.credit_in_account_currency - gle.debit_in_account_currency) as outstanding_amt from `tabExpense Claim` ec, `tabGL Entry` gle where gle.against_voucher_type = ""Expense Claim"" and gle.against_voucher = ec.name and gle.party is not null and ec.docstatus = 1 and ec.is_paid = 0 and {cond} group by ec.name having outstanding_amt > 0 " 989,"def site_config_dir(self) -> str: # XDG default for $XDG_CONFIG_DIRS only first, if multipath is False path = os.environ.get(""XDG_CONFIG_DIRS"", """") if not path.strip(): path = ""/etc/xdg"" return self._with_multi_path(path) "," :return: config directories shared by users (if `multipath ` is enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS path separator), e.g. ``/etc/xdg/$appname/$version`` ",34,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def site_config_dir(self) -> str: # XDG default for $XDG_CONFIG_DIRS only first, if multipath is False path = os.environ.get(""XDG_CONFIG_DIRS"", """") if not path.strip(): path = ""/etc/xdg"" return self._with_multi_path(path) ``` ###Assistant : :return: config directories shared by users (if `multipath ` is enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS path separator), e.g. ``/etc/xdg/$appname/$version`` " 990,"def fit_transform(self, X, y=None): self._validate_params() return self._fit_transform(X, compute_sources=True) ","Fit the model and recover the sources from X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- X_new : ndarray of shape (n_samples, n_components) Estimated sources obtained by transforming the data with the estimated unmixing matrix. ",66,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fit_transform(self, X, y=None): self._validate_params() return self._fit_transform(X, compute_sources=True) ``` ###Assistant : Fit the model and recover the sources from X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- X_new : ndarray of shape (n_samples, n_components) Estimated sources obtained by transforming the data with the estimated unmixing matrix. " 991,"def __fetch_randomly_sampled_transactions(self, project, query, sample_size, query_time_range): sampling_factor = self.__generate_transactions_sampling_factor( project=project, query=query, sample_size=sample_size, query_time_range=query_time_range, ) builder = QueryBuilder( Dataset.Discover, params={ ""start"": query_time_range.start_time, ""end"": query_time_range.end_time, ""project_id"": [project.id], ""organization_id"": project.organization.id, }, query=f""{query} event.type:transaction"", selected_columns=[ ""id"", ""trace"", ""random_number() as rand_num"", f""modulo(rand_num, {sampling_factor}) as modulo_num"", ], equations=[], orderby=None, auto_fields=True, auto_aggregations=True, use_aggregate_conditions=True, functions_acl=[""random_number"", ""modulo""], limit=sample_size, offset=0, equation_config={""auto_add"": False}, ) builder.add_conditions([Condition(lhs=Column(""modulo_num""), op=Op.EQ, rhs=0)]) snuba_query = builder.get_snql_query().query snuba_query = snuba_query.set_select( snuba_query.select + [ Function( ""not"", [Function(""has"", [Column(""contexts.key""), TRACE_PARENT_SPAN_CONTEXT])], alias=""is_root"", ) ] ) snuba_query = snuba_query.set_groupby( snuba_query.groupby + [Column(""modulo_num""), Column(""contexts.key"")] ) data = raw_snql_query( SnubaRequest(dataset=Dataset.Discover.value, app_id=""default"", query=snuba_query), referrer=Referrer.DYNAMIC_SAMPLING_DISTRIBUTION_FETCH_TRANSACTIONS.value, )[""data""] return data "," Fetches a random sample of transactions of size `sample_size` in the last period defined by `stats_period`. The random sample is fetched by generating a random number by for every row, and then doing a modulo operation on it, and if that number is divisible by the sampling factor then its kept, otherwise is discarded. This is an alternative to sampling the query before applying the conditions. The goal here is to fetch the transaction ids, their sample rates and their trace ids. ",82,92,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __fetch_randomly_sampled_transactions(self, project, query, sample_size, query_time_range): sampling_factor = self.__generate_transactions_sampling_factor( project=project, query=query, sample_size=sample_size, query_time_range=query_time_range, ) builder = QueryBuilder( Dataset.Discover, params={ ""start"": query_time_range.start_time, ""end"": query_time_range.end_time, ""project_id"": [project.id], ""organization_id"": project.organization.id, }, query=f""{query} event.type:transaction"", selected_columns=[ ""id"", ""trace"", ""random_number() as rand_num"", f""modulo(rand_num, {sampling_factor}) as modulo_num"", ], equations=[], orderby=None, auto_fields=True, auto_aggregations=True, use_aggregate_conditions=True, functions_acl=[""random_number"", ""modulo""], limit=sample_size, offset=0, equation_config={""auto_add"": False}, ) builder.add_conditions([Condition(lhs=Column(""modulo_num""), op=Op.EQ, rhs=0)]) snuba_query = builder.get_snql_query().query snuba_query = snuba_query.set_select( snuba_query.select + [ Function( ""not"", [Function(""has"", [Column(""contexts.key""), TRACE_PARENT_SPAN_CONTEXT])], alias=""is_root"", ) ] ) snuba_query = snuba_query.set_groupby( snuba_query.groupby + [Column(""modulo_num""), Column(""contexts.key"")] ) data = raw_snql_query( SnubaRequest(dataset=Dataset.Discover.value, app_id=""default"", query=snuba_query), referrer=Referrer.DYNAMIC_SAMPLING_DISTRIBUTION_FETCH_TRANSACTIONS.value, )[""data""] return data ``` ###Assistant : Fetches a random sample of transactions of size `sample_size` in the last period defined by `stats_period`. The random sample is fetched by generating a random number by for every row, and then doing a modulo operation on it, and if that number is divisible by the sampling factor then its kept, otherwise is discarded. This is an alternative to sampling the query before applying the conditions. The goal here is to fetch the transaction ids, their sample rates and their trace ids. " 992,"def item_query(doctype, txt, searchfield, start, page_len, filters, as_dict=False): conditions = [] if isinstance(filters, str): filters = json.loads(filters) #Get searchfields from meta and use in Item Link field query meta = frappe.get_meta(""Item"", cached=True) searchfields = meta.get_search_fields() # these are handled separately ignored_search_fields = (""item_name"", ""description"") for ignored_field in ignored_search_fields: if ignored_field in searchfields: searchfields.remove(ignored_field) columns = '' extra_searchfields = [field for field in searchfields if not field in [""name"", ""item_group"", ""description"", ""item_name""]] if extra_searchfields: columns = "", "" + "", "".join(extra_searchfields) searchfields = searchfields + [field for field in[searchfield or ""name"", ""item_code"", ""item_group"", ""item_name""] if not field in searchfields] searchfields = "" or "".join([field + "" like %(txt)s"" for field in searchfields]) if filters and isinstance(filters, dict): if filters.get('customer') or filters.get('supplier'): party = filters.get('customer') or filters.get('supplier') item_rules_list = frappe.get_all('Party Specific Item', filters = {'party': party}, fields = ['restrict_based_on', 'based_on_value']) filters_dict = {} for rule in item_rules_list: if rule['restrict_based_on'] == 'Item': rule['restrict_based_on'] = 'name' filters_dict[rule.restrict_based_on] = [] for rule in item_rules_list: filters_dict[rule.restrict_based_on].append(rule.based_on_value) for filter in filters_dict: filters[scrub(filter)] = ['in', filters_dict[filter]] if filters.get('customer'): del filters['customer'] else: del filters['supplier'] else: filters.pop('customer', None) filters.pop('supplier', None) description_cond = '' if frappe.db.count('Item', cache=True) < 50000: # scan description only if items are less than 50000 description_cond = 'or tabItem.description LIKE %(txt)s' return frappe.db.sql(.format( columns=columns, scond=searchfields, fcond=get_filters_cond(doctype, filters, conditions).replace('%', '%%'), mcond=get_match_cond(doctype).replace('%', '%%'), description_cond = description_cond), { ""today"": nowdate(), ""txt"": ""%%%s%%"" % txt, ""_txt"": txt.replace(""%"", """"), ""start"": start, ""page_len"": page_len }, as_dict=as_dict) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs","select tabItem.name, tabItem.item_name, tabItem.item_group, if(length(tabItem.description) > 40, \ concat(substr(tabItem.description, 1, 40), ""...""), description) as description {columns} from tabItem where tabItem.docstatus < 2 and tabItem.disabled=0 and tabItem.has_variants=0 and (tabItem.end_of_life > %(today)s or ifnull(tabItem.end_of_life, '0000-00-00')='0000-00-00') and ({scond} or tabItem.item_code IN (select parent from `tabItem Barcode` where barcode LIKE %(txt)s) {description_cond}) {fcond} {mcond} order by if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999), if(locate(%(_txt)s, item_name), locate(%(_txt)s, item_name), 99999), idx desc, name, item_name limit %(start)s, %(page_len)s ",69,235,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def item_query(doctype, txt, searchfield, start, page_len, filters, as_dict=False): conditions = [] if isinstance(filters, str): filters = json.loads(filters) #Get searchfields from meta and use in Item Link field query meta = frappe.get_meta(""Item"", cached=True) searchfields = meta.get_search_fields() # these are handled separately ignored_search_fields = (""item_name"", ""description"") for ignored_field in ignored_search_fields: if ignored_field in searchfields: searchfields.remove(ignored_field) columns = '' extra_searchfields = [field for field in searchfields if not field in [""name"", ""item_group"", ""description"", ""item_name""]] if extra_searchfields: columns = "", "" + "", "".join(extra_searchfields) searchfields = searchfields + [field for field in[searchfield or ""name"", ""item_code"", ""item_group"", ""item_name""] if not field in searchfields] searchfields = "" or "".join([field + "" like %(txt)s"" for field in searchfields]) if filters and isinstance(filters, dict): if filters.get('customer') or filters.get('supplier'): party = filters.get('customer') or filters.get('supplier') item_rules_list = frappe.get_all('Party Specific Item', filters = {'party': party}, fields = ['restrict_based_on', 'based_on_value']) filters_dict = {} for rule in item_rules_list: if rule['restrict_based_on'] == 'Item': rule['restrict_based_on'] = 'name' filters_dict[rule.restrict_based_on] = [] for rule in item_rules_list: filters_dict[rule.restrict_based_on].append(rule.based_on_value) for filter in filters_dict: filters[scrub(filter)] = ['in', filters_dict[filter]] if filters.get('customer'): del filters['customer'] else: del filters['supplier'] else: filters.pop('customer', None) filters.pop('supplier', None) description_cond = '' if frappe.db.count('Item', cache=True) < 50000: # scan description only if items are less than 50000 description_cond = 'or tabItem.description LIKE %(txt)s' return frappe.db.sql(.format( columns=columns, scond=searchfields, fcond=get_filters_cond(doctype, filters, conditions).replace('%', '%%'), mcond=get_match_cond(doctype).replace('%', '%%'), description_cond = description_cond), { ""today"": nowdate(), ""txt"": ""%%%s%%"" % txt, ""_txt"": txt.replace(""%"", """"), ""start"": start, ""page_len"": page_len }, as_dict=as_dict) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs ``` ###Assistant : select tabItem.name, tabItem.item_name, tabItem.item_group, if(length(tabItem.description) > 40, \ concat(substr(tabItem.description, 1, 40), ""...""), description) as description {columns} from tabItem where tabItem.docstatus < 2 and tabItem.disabled=0 and tabItem.has_variants=0 and (tabItem.end_of_life > %(today)s or ifnull(tabItem.end_of_life, '0000-00-00')='0000-00-00') and ({scond} or tabItem.item_code IN (select parent from `tabItem Barcode` where barcode LIKE %(txt)s) {description_cond}) {fcond} {mcond} order by if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999), if(locate(%(_txt)s, item_name), locate(%(_txt)s, item_name), 99999), idx desc, name, item_name limit %(start)s, %(page_len)s " 993,"def apply_and_enforce(*args, **kwargs): func = kwargs.pop(""_func"") expected_ndim = kwargs.pop(""expected_ndim"") out = func(*args, **kwargs) if getattr(out, ""ndim"", 0) != expected_ndim: out_ndim = getattr(out, ""ndim"", 0) raise ValueError( f""Dimension mismatch: expected output of {func} "" f""to have dims = {expected_ndim}. Got {out_ndim} instead."" ) return out ","Apply a function, and enforce the output.ndim to match expected_ndim Ensures the output has the expected dimensionality.",17,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def apply_and_enforce(*args, **kwargs): func = kwargs.pop(""_func"") expected_ndim = kwargs.pop(""expected_ndim"") out = func(*args, **kwargs) if getattr(out, ""ndim"", 0) != expected_ndim: out_ndim = getattr(out, ""ndim"", 0) raise ValueError( f""Dimension mismatch: expected output of {func} "" f""to have dims = {expected_ndim}. Got {out_ndim} instead."" ) return out ``` ###Assistant : Apply a function, and enforce the output.ndim to match expected_ndim Ensures the output has the expected dimensionality. " 994,"def _sanitize_non_ordered(data) -> None: if isinstance(data, (set, frozenset)): raise TypeError(f""'{type(data).__name__}' type is unordered"") "," Raise only for unordered sets, e.g., not for dict_keys ",9,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _sanitize_non_ordered(data) -> None: if isinstance(data, (set, frozenset)): raise TypeError(f""'{type(data).__name__}' type is unordered"") ``` ###Assistant : Raise only for unordered sets, e.g., not for dict_keys " 995,"def dis(x=None, *, file=None, depth=None): if x is None: distb(file=file) return # Extract functions from methods. if hasattr(x, '__func__'): x = x.__func__ # Extract compiled code objects from... if hasattr(x, '__code__'): # ...a function, or x = x.__code__ elif hasattr(x, 'gi_code'): #...a generator object, or x = x.gi_code elif hasattr(x, 'ag_code'): #...an asynchronous generator object, or x = x.ag_code elif hasattr(x, 'cr_code'): #...a coroutine. x = x.cr_code # Perform the disassembly. if hasattr(x, '__dict__'): # Class or module items = sorted(x.__dict__.items()) for name, x1 in items: if isinstance(x1, _have_code): print(""Disassembly of %s:"" % name, file=file) try: dis(x1, file=file, depth=depth) except TypeError as msg: print(""Sorry:"", msg, file=file) print(file=file) elif hasattr(x, 'co_code'): # Code object _disassemble_recursive(x, file=file, depth=depth) elif isinstance(x, (bytes, bytearray)): # Raw bytecode _disassemble_bytes(x, file=file) elif isinstance(x, str): # Source code _disassemble_str(x, file=file, depth=depth) else: raise TypeError(""don't know how to disassemble %s objects"" % type(x).__name__) ","Disassemble classes, methods, functions, and other compiled objects. With no argument, disassemble the last traceback. Compiled objects currently include generator objects, async generator objects, and coroutine objects, all of which store their code object in a special attribute. ",38,145,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dis(x=None, *, file=None, depth=None): if x is None: distb(file=file) return # Extract functions from methods. if hasattr(x, '__func__'): x = x.__func__ # Extract compiled code objects from... if hasattr(x, '__code__'): # ...a function, or x = x.__code__ elif hasattr(x, 'gi_code'): #...a generator object, or x = x.gi_code elif hasattr(x, 'ag_code'): #...an asynchronous generator object, or x = x.ag_code elif hasattr(x, 'cr_code'): #...a coroutine. x = x.cr_code # Perform the disassembly. if hasattr(x, '__dict__'): # Class or module items = sorted(x.__dict__.items()) for name, x1 in items: if isinstance(x1, _have_code): print(""Disassembly of %s:"" % name, file=file) try: dis(x1, file=file, depth=depth) except TypeError as msg: print(""Sorry:"", msg, file=file) print(file=file) elif hasattr(x, 'co_code'): # Code object _disassemble_recursive(x, file=file, depth=depth) elif isinstance(x, (bytes, bytearray)): # Raw bytecode _disassemble_bytes(x, file=file) elif isinstance(x, str): # Source code _disassemble_str(x, file=file, depth=depth) else: raise TypeError(""don't know how to disassemble %s objects"" % type(x).__name__) ``` ###Assistant : Disassemble classes, methods, functions, and other compiled objects. With no argument, disassemble the last traceback. Compiled objects currently include generator objects, async generator objects, and coroutine objects, all of which store their code object in a special attribute. " 996,"def bernoulli_poly(n, x=None, polys=False): r return named_poly(n, dup_bernoulli, QQ, ""Bernoulli polynomial"", (x,), polys) ","Generates the Bernoulli polynomial `\operatorname{B}_n(x)`. `\operatorname{B}_n(x)` is the unique polynomial satisfying .. math :: \int_{x}^{x+1} \operatorname{B}_n(t) \,dt = x^n. Based on this, we have for nonnegative integer `s` and integer `a` and `b` .. math :: \sum_{k=a}^{b} k^s = \frac{\operatorname{B}_{s+1}(b+1) - \operatorname{B}_{s+1}(a)}{s+1} which is related to Jakob Bernoulli's original motivation for introducing the Bernoulli numbers, the values of these polynomials at `x = 1`. Examples ======== >>> from sympy import summation >>> from sympy.abc import x >>> from sympy.polys import bernoulli_poly >>> bernoulli_poly(5, x) x**5 - 5*x**4/2 + 5*x**3/3 - x/6 >>> def psum(p, a, b): ... return (bernoulli_poly(p+1,b+1) - bernoulli_poly(p+1,a)) / (p+1) >>> psum(4, -6, 27) 3144337 >>> summation(x**4, (x, -6, 27)) 3144337 >>> psum(1, 1, x).factor() x*(x + 1)/2 >>> psum(2, 1, x).factor() x*(x + 1)*(2*x + 1)/6 >>> psum(3, 1, x).factor() x**2*(x + 1)**2/4 Parameters ========== n : int Degree of the polynomial. x : optional polys : bool, optional If True, return a Poly, otherwise (default) return an expression. References ========== .. [1] https://en.wikipedia.org/wiki/Bernoulli_polynomials ",168,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def bernoulli_poly(n, x=None, polys=False): r return named_poly(n, dup_bernoulli, QQ, ""Bernoulli polynomial"", (x,), polys) ``` ###Assistant : Generates the Bernoulli polynomial `\operatorname{B}_n(x)`. `\operatorname{B}_n(x)` is the unique polynomial satisfying .. math :: \int_{x}^{x+1} \operatorname{B}_n(t) \,dt = x^n. Based on this, we have for nonnegative integer `s` and integer `a` and `b` .. math :: \sum_{k=a}^{b} k^s = \frac{\operatorname{B}_{s+1}(b+1) - \operatorname{B}_{s+1}(a)}{s+1} which is related to Jakob Bernoulli's original motivation for introducing the Bernoulli numbers, the values of these polynomials at `x = 1`. Examples ======== >>> from sympy import summation >>> from sympy.abc import x >>> from sympy.polys import bernoulli_poly >>> bernoulli_poly(5, x) x**5 - 5*x**4/2 + 5*x**3/3 - x/6 >>> def psum(p, a, b): ... return (bernoulli_poly(p+1,b+1) - bernoulli_poly(p+1,a)) / (p+1) >>> psum(4, -6, 27) 3144337 >>> summation(x**4, (x, -6, 27)) 3144337 >>> psum(1, 1, x).factor() x*(x + 1)/2 >>> psum(2, 1, x).factor() x*(x + 1)*(2*x + 1)/6 >>> psum(3, 1, x).factor() x**2*(x + 1)**2/4 Parameters ========== n : int Degree of the polynomial. x : optional polys : bool, optional If True, return a Poly, otherwise (default) return an expression. References ========== .. [1] https://en.wikipedia.org/wiki/Bernoulli_polynomials " 997,"def after_log(logger, log_level, sec_format=""%0.3f""): log_tpl = ( ""Finished call to '%s' after "" + str(sec_format) + ""(s), "" ""this was the %s time calling it."" ) ",After call strategy that logs to some logger the finished attempt.,11,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def after_log(logger, log_level, sec_format=""%0.3f""): log_tpl = ( ""Finished call to '%s' after "" + str(sec_format) + ""(s), "" ""this was the %s time calling it."" ) ``` ###Assistant : After call strategy that logs to some logger the finished attempt. " 998,"def _compat_get_offset(meth): sigs = [lambda self, width, height, xdescent, ydescent, renderer: locals(), lambda self, bbox, renderer: locals()] "," Decorator for the get_offset method of OffsetBox and subclasses, that allows supporting both the new signature (self, bbox, renderer) and the old signature (self, width, height, xdescent, ydescent, renderer). ",29,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _compat_get_offset(meth): sigs = [lambda self, width, height, xdescent, ydescent, renderer: locals(), lambda self, bbox, renderer: locals()] ``` ###Assistant : Decorator for the get_offset method of OffsetBox and subclasses, that allows supporting both the new signature (self, bbox, renderer) and the old signature (self, width, height, xdescent, ydescent, renderer). " 999,"def sixtofour(self): if (self._ip >> 112) != 0x2002: return None return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) ","Return the IPv4 6to4 embedded address. Returns: The IPv4 6to4-embedded address if present or None if the address doesn't appear to contain a 6to4 embedded address. ",26,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def sixtofour(self): if (self._ip >> 112) != 0x2002: return None return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) ``` ###Assistant : Return the IPv4 6to4 embedded address. Returns: The IPv4 6to4-embedded address if present or None if the address doesn't appear to contain a 6to4 embedded address. " 1000,"def reload_markets(self) -> None: # Check whether markets have to be reloaded if (self._last_markets_refresh > 0) and ( self._last_markets_refresh + self.markets_refresh_interval > arrow.utcnow().int_timestamp): return None logger.debug(""Performing scheduled market reload.."") try: self._markets = self._api.load_markets(reload=True) # Also reload async markets to avoid issues with newly listed pairs self._load_async_markets(reload=True) self._last_markets_refresh = arrow.utcnow().int_timestamp self.fill_leverage_tiers() except ccxt.BaseError: logger.exception(""Could not reload markets."") ",Reload markets both sync and async if refresh interval has passed ,11,56,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def reload_markets(self) -> None: # Check whether markets have to be reloaded if (self._last_markets_refresh > 0) and ( self._last_markets_refresh + self.markets_refresh_interval > arrow.utcnow().int_timestamp): return None logger.debug(""Performing scheduled market reload.."") try: self._markets = self._api.load_markets(reload=True) # Also reload async markets to avoid issues with newly listed pairs self._load_async_markets(reload=True) self._last_markets_refresh = arrow.utcnow().int_timestamp self.fill_leverage_tiers() except ccxt.BaseError: logger.exception(""Could not reload markets."") ``` ###Assistant : Reload markets both sync and async if refresh interval has passed " 1001,"def is_strongly_diagonally_dominant(self): r if not self.is_square: return False rows, cols = self.shape ","Tests if the matrix is row strongly diagonally dominant. Explanation =========== A $n, n$ matrix $A$ is row strongly diagonally dominant if .. math:: \left|A_{i, i}\right| > \sum_{j = 0, j \neq i}^{n-1} \left|A_{i, j}\right| \quad {\text{for all }} i \in \{ 0, ..., n-1 \} Examples ======== >>> from sympy import Matrix >>> A = Matrix([[3, -2, 1], [1, -3, 2], [-1, 2, 4]]) >>> A.is_strongly_diagonally_dominant False >>> A = Matrix([[-2, 2, 1], [1, 3, 2], [1, -2, 0]]) >>> A.is_strongly_diagonally_dominant False >>> A = Matrix([[-4, 2, 1], [1, 6, 2], [1, -2, 5]]) >>> A.is_strongly_diagonally_dominant True Notes ===== If you want to test whether a matrix is column diagonally dominant, you can apply the test after transposing the matrix. ",121,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_strongly_diagonally_dominant(self): r if not self.is_square: return False rows, cols = self.shape ``` ###Assistant : Tests if the matrix is row strongly diagonally dominant. Explanation =========== A $n, n$ matrix $A$ is row strongly diagonally dominant if .. math:: \left|A_{i, i}\right| > \sum_{j = 0, j \neq i}^{n-1} \left|A_{i, j}\right| \quad {\text{for all }} i \in \{ 0, ..., n-1 \} Examples ======== >>> from sympy import Matrix >>> A = Matrix([[3, -2, 1], [1, -3, 2], [-1, 2, 4]]) >>> A.is_strongly_diagonally_dominant False >>> A = Matrix([[-2, 2, 1], [1, 3, 2], [1, -2, 0]]) >>> A.is_strongly_diagonally_dominant False >>> A = Matrix([[-4, 2, 1], [1, 6, 2], [1, -2, 5]]) >>> A.is_strongly_diagonally_dominant True Notes ===== If you want to test whether a matrix is column diagonally dominant, you can apply the test after transposing the matrix. " 1002,"def create_calibration_module(self, feature) -> torch.nn.Module: if feature.get(""calibration""): calibration_cls = calibration.get_calibration_cls(BINARY, ""temperature_scaling"") return calibration_cls(binary=True) return None ","Creates the appropriate calibration module based on the feature config. Today, only one type of calibration (""temperature_scaling"") is available, but more options may be supported in the future. ",28,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def create_calibration_module(self, feature) -> torch.nn.Module: if feature.get(""calibration""): calibration_cls = calibration.get_calibration_cls(BINARY, ""temperature_scaling"") return calibration_cls(binary=True) return None ``` ###Assistant : Creates the appropriate calibration module based on the feature config. Today, only one type of calibration (""temperature_scaling"") is available, but more options may be supported in the future. " 1003,"def _download(url, path, md5sum=None): if not osp.exists(path): os.makedirs(path) fname = osp.split(url)[-1] fullname = osp.join(path, fname) retry_cnt = 0 while not (osp.exists(fullname) and _check_exist_file_md5(fullname, md5sum, url)): if retry_cnt < DOWNLOAD_RETRY_LIMIT: retry_cnt += 1 else: raise RuntimeError(""Download from {} failed. "" ""Retry limit reached"".format(url)) logger.info(""Downloading {} from {}"".format(fname, url)) # NOTE: windows path join may incur \, which is invalid in url if sys.platform == ""win32"": url = url.replace('\\', '/') req = requests.get(url, stream=True) if req.status_code != 200: raise RuntimeError(""Downloading from {} failed with code "" ""{}!"".format(url, req.status_code)) # For protecting download interupted, download to # tmp_fullname firstly, move tmp_fullname to fullname # after download finished tmp_fullname = fullname + ""_tmp"" total_size = req.headers.get('content-length') with open(tmp_fullname, 'wb') as f: if total_size: for chunk in tqdm.tqdm( req.iter_content(chunk_size=1024), total=(int(total_size) + 1023) // 1024, unit='KB'): f.write(chunk) else: for chunk in req.iter_content(chunk_size=1024): if chunk: f.write(chunk) shutil.move(tmp_fullname, fullname) return fullname "," Download from url, save to path. url (str): download url path (str): download to given path ",16,143,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _download(url, path, md5sum=None): if not osp.exists(path): os.makedirs(path) fname = osp.split(url)[-1] fullname = osp.join(path, fname) retry_cnt = 0 while not (osp.exists(fullname) and _check_exist_file_md5(fullname, md5sum, url)): if retry_cnt < DOWNLOAD_RETRY_LIMIT: retry_cnt += 1 else: raise RuntimeError(""Download from {} failed. "" ""Retry limit reached"".format(url)) logger.info(""Downloading {} from {}"".format(fname, url)) # NOTE: windows path join may incur \, which is invalid in url if sys.platform == ""win32"": url = url.replace('\\', '/') req = requests.get(url, stream=True) if req.status_code != 200: raise RuntimeError(""Downloading from {} failed with code "" ""{}!"".format(url, req.status_code)) # For protecting download interupted, download to # tmp_fullname firstly, move tmp_fullname to fullname # after download finished tmp_fullname = fullname + ""_tmp"" total_size = req.headers.get('content-length') with open(tmp_fullname, 'wb') as f: if total_size: for chunk in tqdm.tqdm( req.iter_content(chunk_size=1024), total=(int(total_size) + 1023) // 1024, unit='KB'): f.write(chunk) else: for chunk in req.iter_content(chunk_size=1024): if chunk: f.write(chunk) shutil.move(tmp_fullname, fullname) return fullname ``` ###Assistant : Download from url, save to path. url (str): download url path (str): download to given path " 1004,"def related_objects(self): all_related_fields = self._get_fields( forward=False, reverse=True, include_hidden=True ) return make_immutable_fields_list( ""related_objects"", ( obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many ), ) "," Return all related objects pointing to the current model. The related objects can come from a one-to-one, one-to-many, or many-to-many field relation type. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. ",49,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def related_objects(self): all_related_fields = self._get_fields( forward=False, reverse=True, include_hidden=True ) return make_immutable_fields_list( ""related_objects"", ( obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many ), ) ``` ###Assistant : Return all related objects pointing to the current model. The related objects can come from a one-to-one, one-to-many, or many-to-many field relation type. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. " 1005,"def test_https_good_referer(self): req = self._get_POST_request_with_token() req._is_secure_override = True req.META[""HTTP_HOST""] = ""www.example.com"" req.META[""HTTP_REFERER""] = ""https://www.example.com/somepage"" mw = CsrfViewMiddleware(post_form_view) mw.process_request(req) resp = mw.process_view(req, post_form_view, (), {}) self.assertIsNone(resp) "," A POST HTTPS request with a good referer is accepted. ",10,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_https_good_referer(self): req = self._get_POST_request_with_token() req._is_secure_override = True req.META[""HTTP_HOST""] = ""www.example.com"" req.META[""HTTP_REFERER""] = ""https://www.example.com/somepage"" mw = CsrfViewMiddleware(post_form_view) mw.process_request(req) resp = mw.process_view(req, post_form_view, (), {}) self.assertIsNone(resp) ``` ###Assistant : A POST HTTPS request with a good referer is accepted. " 1006,"def test_state_policy(self) -> None: room_id = self.helper.create_room_as(self.user_id, tok=self.token) # Set the maximum lifetime to 35 days so that the first event gets expired but not # the second one. self.helper.send_state( room_id=room_id, event_type=EventTypes.Retention, body={""max_lifetime"": one_day_ms * 35}, tok=self.token, ) self._test_retention(room_id, expected_code_for_first_event=404) ","Tests that an event gets correctly expired if there is no default retention policy but there's a policy specific to the room. ",22,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_state_policy(self) -> None: room_id = self.helper.create_room_as(self.user_id, tok=self.token) # Set the maximum lifetime to 35 days so that the first event gets expired but not # the second one. self.helper.send_state( room_id=room_id, event_type=EventTypes.Retention, body={""max_lifetime"": one_day_ms * 35}, tok=self.token, ) self._test_retention(room_id, expected_code_for_first_event=404) ``` ###Assistant : Tests that an event gets correctly expired if there is no default retention policy but there's a policy specific to the room. " 1007,"def insertion_sort(list, n): for i in range(0, n): key = list[i] j = i - 1 # Swap elements witth key iff they are # greater than key while j >= 0 and list[j] > key: list[j + 1] = list[j] j = j - 1 list[j + 1] = key return list "," sort list in assending order INPUT: list=list of values to be sorted n=size of list that contains values to be sorted OUTPUT: list of sorted values in assending order ",29,53,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def insertion_sort(list, n): for i in range(0, n): key = list[i] j = i - 1 # Swap elements witth key iff they are # greater than key while j >= 0 and list[j] > key: list[j + 1] = list[j] j = j - 1 list[j + 1] = key return list ``` ###Assistant : sort list in assending order INPUT: list=list of values to be sorted n=size of list that contains values to be sorted OUTPUT: list of sorted values in assending order " 1008,"def get_template_context(self): return {""name"": self.__class__.__name__.lower(), ""label"": self.label} "," :return: a dictionary with context variables for the javascript file associated with the context ",14,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_template_context(self): return {""name"": self.__class__.__name__.lower(), ""label"": self.label} ``` ###Assistant : :return: a dictionary with context variables for the javascript file associated with the context " 1009,"def connect(self): if self.is_connected is True: return self.connection connection = teradatasql.connect( **self.connection_data ) self.is_connected = True self.connection = connection return self.connection "," Handles the connection to a Teradata database insance. ",8,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def connect(self): if self.is_connected is True: return self.connection connection = teradatasql.connect( **self.connection_data ) self.is_connected = True self.connection = connection return self.connection ``` ###Assistant : Handles the connection to a Teradata database insance. " 1010,"def unregister_event_manager(self, manager): self.event_managers.remove(manager) for type_id in manager.type_ids: self.event_managers_dict[type_id].remove(manager) manager.stop() manager.window = None ","Unregister and stop an event manager previously registered with :meth:`register_event_manager`. .. versionadded:: 2.1.0 .. warning:: This is an experimental method and it remains so until this warning is present as it can be changed or removed in the next versions of Kivy. ",42,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def unregister_event_manager(self, manager): self.event_managers.remove(manager) for type_id in manager.type_ids: self.event_managers_dict[type_id].remove(manager) manager.stop() manager.window = None ``` ###Assistant : Unregister and stop an event manager previously registered with :meth:`register_event_manager`. .. versionadded:: 2.1.0 .. warning:: This is an experimental method and it remains so until this warning is present as it can be changed or removed in the next versions of Kivy. " 1011,"def _c3_mro(cls, abcs=None): for i, base in enumerate(reversed(cls.__bases__)): if hasattr(base, '__abstractmethods__'): boundary = len(cls.__bases__) - i break # Bases up to the last explicit ABC are considered first. else: boundary = 0 abcs = list(abcs) if abcs else [] explicit_bases = list(cls.__bases__[:boundary]) abstract_bases = [] other_bases = list(cls.__bases__[boundary:]) for base in abcs: if issubclass(cls, base) and not any( issubclass(b, base) for b in cls.__bases__ ): # If *cls* is the class that introduces behaviour described by # an ABC *base*, insert said ABC to its MRO. abstract_bases.append(base) for base in abstract_bases: abcs.remove(base) explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases] abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases] other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases] return _c3_merge( [[cls]] + explicit_c3_mros + abstract_c3_mros + other_c3_mros + [explicit_bases] + [abstract_bases] + [other_bases] ) ","Computes the method resolution order using extended C3 linearization. If no *abcs* are given, the algorithm works exactly like the built-in C3 linearization used for method resolution. If given, *abcs* is a list of abstract base classes that should be inserted into the resulting MRO. Unrelated ABCs are ignored and don't end up in the result. The algorithm inserts ABCs where their functionality is introduced, i.e. issubclass(cls, abc) returns True for the class itself but returns False for all its direct base classes. Implicit ABCs for a given class (either registered or inferred from the presence of a special method like __len__) are inserted directly after the last ABC explicitly listed in the MRO of said class. If two implicit ABCs end up next to each other in the resulting MRO, their ordering depends on the order of types in *abcs*. ",141,132,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _c3_mro(cls, abcs=None): for i, base in enumerate(reversed(cls.__bases__)): if hasattr(base, '__abstractmethods__'): boundary = len(cls.__bases__) - i break # Bases up to the last explicit ABC are considered first. else: boundary = 0 abcs = list(abcs) if abcs else [] explicit_bases = list(cls.__bases__[:boundary]) abstract_bases = [] other_bases = list(cls.__bases__[boundary:]) for base in abcs: if issubclass(cls, base) and not any( issubclass(b, base) for b in cls.__bases__ ): # If *cls* is the class that introduces behaviour described by # an ABC *base*, insert said ABC to its MRO. abstract_bases.append(base) for base in abstract_bases: abcs.remove(base) explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases] abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases] other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases] return _c3_merge( [[cls]] + explicit_c3_mros + abstract_c3_mros + other_c3_mros + [explicit_bases] + [abstract_bases] + [other_bases] ) ``` ###Assistant : Computes the method resolution order using extended C3 linearization. If no *abcs* are given, the algorithm works exactly like the built-in C3 linearization used for method resolution. If given, *abcs* is a list of abstract base classes that should be inserted into the resulting MRO. Unrelated ABCs are ignored and don't end up in the result. The algorithm inserts ABCs where their functionality is introduced, i.e. issubclass(cls, abc) returns True for the class itself but returns False for all its direct base classes. Implicit ABCs for a given class (either registered or inferred from the presence of a special method like __len__) are inserted directly after the last ABC explicitly listed in the MRO of said class. If two implicit ABCs end up next to each other in the resulting MRO, their ordering depends on the order of types in *abcs*. " 1012,"async def async_start_charging(self) -> None: await self.hass.async_add_executor_job(self.leaf.start_charging) self.schedule_update() ",Request to start charging the car. Used by the button platform.,11,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_start_charging(self) -> None: await self.hass.async_add_executor_job(self.leaf.start_charging) self.schedule_update() ``` ###Assistant : Request to start charging the car. Used by the button platform. " 1013,"def test_multiple_server_connections(tctx): server1 = Placeholder(Server) server2 = Placeholder(Server) playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False) ",Test multiple requests being rewritten to different targets.,8,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_multiple_server_connections(tctx): server1 = Placeholder(Server) server2 = Placeholder(Server) playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False) ``` ###Assistant : Test multiple requests being rewritten to different targets. " 1014,"def test_overlapping_output_names(self) -> None: self._test_overlapping_names( outputs0=['o0', 'o1'], outputs1=['o1', 'o2']) "," Tests error checking when the name of the output overlaps ",10,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_overlapping_output_names(self) -> None: self._test_overlapping_names( outputs0=['o0', 'o1'], outputs1=['o1', 'o2']) ``` ###Assistant : Tests error checking when the name of the output overlaps " 1015,"def test_write_profiles_does_not_include_default(self, temporary_profiles_path): write_profiles({}) assert ""profiles.default"" not in temporary_profiles_path.read_text() "," Including the default has a tendency to bake in settings the user may not want, and can prevent them from gaining new defaults. ",23,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_write_profiles_does_not_include_default(self, temporary_profiles_path): write_profiles({}) assert ""profiles.default"" not in temporary_profiles_path.read_text() ``` ###Assistant : Including the default has a tendency to bake in settings the user may not want, and can prevent them from gaining new defaults. " 1016,"def phase_retarder(theta=0, delta=0): R = Matrix([[cos(theta)**2 + exp(I*delta)*sin(theta)**2, (1-exp(I*delta))*cos(theta)*sin(theta)], [(1-exp(I*delta))*cos(theta)*sin(theta), sin(theta)**2 + exp(I*delta)*cos(theta)**2]]) return R*exp(-I*delta/2) ","A phase retarder Jones matrix with retardance `delta` at angle `theta`. Parameters ========== theta : numeric type or SymPy Symbol The angle of the fast axis relative to the horizontal plane. delta : numeric type or SymPy Symbol The phase difference between the fast and slow axes of the transmitted light. Returns ======= SymPy Matrix : A Jones matrix representing the retarder. Examples ======== A generic retarder. >>> from sympy import pprint, symbols >>> from sympy.physics.optics.polarization import phase_retarder >>> theta, delta = symbols(""theta, delta"", real=True) >>> R = phase_retarder(theta, delta) >>> pprint(R, use_unicode=True) ⎡ -ⅈ⋅δ -ⅈ⋅δ ⎤ ⎢ ───── ───── ⎥ ⎢⎛ ⅈ⋅δ 2 2 ⎞ 2 ⎛ ⅈ⋅δ⎞ 2 ⎥ ⎢⎝ℯ ⋅sin (θ) + cos (θ)⎠⋅ℯ ⎝1 - ℯ ⎠⋅ℯ ⋅sin(θ)⋅cos(θ)⎥ ⎢ ⎥ ⎢ -ⅈ⋅δ -ⅈ⋅δ ⎥ ⎢ ───── ─────⎥ ⎢⎛ ⅈ⋅δ⎞ 2 ⎛ ⅈ⋅δ 2 2 ⎞ 2 ⎥ ⎣⎝1 - ℯ ⎠⋅ℯ ⋅sin(θ)⋅cos(θ) ⎝ℯ ⋅cos (θ) + sin (θ)⎠⋅ℯ ⎦ ",153,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def phase_retarder(theta=0, delta=0): R = Matrix([[cos(theta)**2 + exp(I*delta)*sin(theta)**2, (1-exp(I*delta))*cos(theta)*sin(theta)], [(1-exp(I*delta))*cos(theta)*sin(theta), sin(theta)**2 + exp(I*delta)*cos(theta)**2]]) return R*exp(-I*delta/2) ``` ###Assistant : A phase retarder Jones matrix with retardance `delta` at angle `theta`. Parameters ========== theta : numeric type or SymPy Symbol The angle of the fast axis relative to the horizontal plane. delta : numeric type or SymPy Symbol The phase difference between the fast and slow axes of the transmitted light. Returns ======= SymPy Matrix : A Jones matrix representing the retarder. Examples ======== A generic retarder. >>> from sympy import pprint, symbols >>> from sympy.physics.optics.polarization import phase_retarder >>> theta, delta = symbols(""theta, delta"", real=True) >>> R = phase_retarder(theta, delta) >>> pprint(R, use_unicode=True) ⎡ -ⅈ⋅δ -ⅈ⋅δ ⎤ ⎢ ───── ───── ⎥ ⎢⎛ ⅈ⋅δ 2 2 ⎞ 2 ⎛ ⅈ⋅δ⎞ 2 ⎥ ⎢⎝ℯ ⋅sin (θ) + cos (θ)⎠⋅ℯ ⎝1 - ℯ ⎠⋅ℯ ⋅sin(θ)⋅cos(θ)⎥ ⎢ ⎥ ⎢ -ⅈ⋅δ -ⅈ⋅δ ⎥ ⎢ ───── ─────⎥ ⎢⎛ ⅈ⋅δ⎞ 2 ⎛ ⅈ⋅δ 2 2 ⎞ 2 ⎥ ⎣⎝1 - ℯ ⎠⋅ℯ ⋅sin(θ)⋅cos(θ) ⎝ℯ ⋅cos (θ) + sin (θ)⎠⋅ℯ ⎦ " 1017,"def wrapCommandForDebuggerForExec(*args): gdb_path = getExecutablePath(""gdb"") # Windows extra ball, attempt the downloaded one. if isWin32Windows() and gdb_path is None: from nuitka.Options import assumeYesForDownloads mingw64_gcc_path = getCachedDownloadedMinGW64( target_arch=getArchitecture(), assume_yes_for_downloads=assumeYesForDownloads(), ) with withEnvironmentPathAdded(""PATH"", os.path.dirname(mingw64_gcc_path)): lldb_path = getExecutablePath(""lldb"") if gdb_path is None and lldb_path is None: lldb_path = getExecutablePath(""lldb"") if lldb_path is None: general.sysexit(""Error, no 'gdb' or 'lldb' binary found in path."") if gdb_path is not None: args = (gdb_path, ""gdb"", ""-ex=run"", ""-ex=where"", ""-ex=quit"", ""--args"") + args else: args = (lldb_path, ""lldb"", ""-o"", ""run"", ""-o"", ""bt"", ""-o"", ""quit"", ""--"") + args return args ","Wrap a command for system debugger to call exec Args: args: (list of str) args for call to be debugged Returns: args tuple with debugger command inserted Notes: Currently only gdb and lldb are supported, but adding more debuggers would be very welcome. ",43,90,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def wrapCommandForDebuggerForExec(*args): gdb_path = getExecutablePath(""gdb"") # Windows extra ball, attempt the downloaded one. if isWin32Windows() and gdb_path is None: from nuitka.Options import assumeYesForDownloads mingw64_gcc_path = getCachedDownloadedMinGW64( target_arch=getArchitecture(), assume_yes_for_downloads=assumeYesForDownloads(), ) with withEnvironmentPathAdded(""PATH"", os.path.dirname(mingw64_gcc_path)): lldb_path = getExecutablePath(""lldb"") if gdb_path is None and lldb_path is None: lldb_path = getExecutablePath(""lldb"") if lldb_path is None: general.sysexit(""Error, no 'gdb' or 'lldb' binary found in path."") if gdb_path is not None: args = (gdb_path, ""gdb"", ""-ex=run"", ""-ex=where"", ""-ex=quit"", ""--args"") + args else: args = (lldb_path, ""lldb"", ""-o"", ""run"", ""-o"", ""bt"", ""-o"", ""quit"", ""--"") + args return args ``` ###Assistant : Wrap a command for system debugger to call exec Args: args: (list of str) args for call to be debugged Returns: args tuple with debugger command inserted Notes: Currently only gdb and lldb are supported, but adding more debuggers would be very welcome. " 1018,"def test_commands_with_invalid_settings(self): args = [""startproject""] out, err = self.run_django_admin(args, settings_file=""bad_settings"") self.assertNoOutput(out) self.assertOutput(err, ""You must provide a project name"", regex=True) "," Commands that don't require settings succeed if the settings file doesn't exist. ",12,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_commands_with_invalid_settings(self): args = [""startproject""] out, err = self.run_django_admin(args, settings_file=""bad_settings"") self.assertNoOutput(out) self.assertOutput(err, ""You must provide a project name"", regex=True) ``` ###Assistant : Commands that don't require settings succeed if the settings file doesn't exist. " 1019,"def read(self, size=-1): if self.closed: raise ValueError(""I/O operation on closed file"") if self.size_read >= self.chunksize: return b'' if size < 0: size = self.chunksize - self.size_read if size > self.chunksize - self.size_read: size = self.chunksize - self.size_read data = self.file.read(size) self.size_read = self.size_read + len(data) if self.size_read == self.chunksize and \ self.align and \ (self.chunksize & 1): dummy = self.file.read(1) self.size_read = self.size_read + len(dummy) return data ","Read at most size bytes from the chunk. If size is omitted or negative, read until the end of the chunk. ",21,67,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def read(self, size=-1): if self.closed: raise ValueError(""I/O operation on closed file"") if self.size_read >= self.chunksize: return b'' if size < 0: size = self.chunksize - self.size_read if size > self.chunksize - self.size_read: size = self.chunksize - self.size_read data = self.file.read(size) self.size_read = self.size_read + len(data) if self.size_read == self.chunksize and \ self.align and \ (self.chunksize & 1): dummy = self.file.read(1) self.size_read = self.size_read + len(dummy) return data ``` ###Assistant : Read at most size bytes from the chunk. If size is omitted or negative, read until the end of the chunk. " 1020,"def slicing_plan(chunks, index): from dask.array.utils import asarray_safe if not is_arraylike(index): index = np.asanyarray(index) cum_chunks = cached_cumsum(chunks) cum_chunks = asarray_safe(cum_chunks, like=index) # this dispactches to the array library chunk_locations = np.searchsorted(cum_chunks, index, side=""right"") # but we need chunk_locations as python ints for getitem calls downstream chunk_locations = chunk_locations.tolist() where = np.where(np.diff(chunk_locations))[0] + 1 extra = asarray_safe([0], like=where) c_loc = asarray_safe([len(chunk_locations)], like=where) where = np.concatenate([extra, where, c_loc]) out = [] for i in range(len(where) - 1): sub_index = index[where[i] : where[i + 1]] chunk = chunk_locations[where[i]] if chunk > 0: sub_index = sub_index - cum_chunks[chunk - 1] out.append((chunk, sub_index)) return out ","Construct a plan to slice chunks with the given index Parameters ---------- chunks : Tuple[int] One dimensions worth of chunking information index : np.ndarray[int] The index passed to slice on that dimension Returns ------- out : List[Tuple[int, np.ndarray]] A list of chunk/sub-index pairs corresponding to each output chunk ",48,99,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def slicing_plan(chunks, index): from dask.array.utils import asarray_safe if not is_arraylike(index): index = np.asanyarray(index) cum_chunks = cached_cumsum(chunks) cum_chunks = asarray_safe(cum_chunks, like=index) # this dispactches to the array library chunk_locations = np.searchsorted(cum_chunks, index, side=""right"") # but we need chunk_locations as python ints for getitem calls downstream chunk_locations = chunk_locations.tolist() where = np.where(np.diff(chunk_locations))[0] + 1 extra = asarray_safe([0], like=where) c_loc = asarray_safe([len(chunk_locations)], like=where) where = np.concatenate([extra, where, c_loc]) out = [] for i in range(len(where) - 1): sub_index = index[where[i] : where[i + 1]] chunk = chunk_locations[where[i]] if chunk > 0: sub_index = sub_index - cum_chunks[chunk - 1] out.append((chunk, sub_index)) return out ``` ###Assistant : Construct a plan to slice chunks with the given index Parameters ---------- chunks : Tuple[int] One dimensions worth of chunking information index : np.ndarray[int] The index passed to slice on that dimension Returns ------- out : List[Tuple[int, np.ndarray]] A list of chunk/sub-index pairs corresponding to each output chunk " 1021,"def _add_conv_branch(self) -> None: branch_convs = ModuleList() for i in range(self.num_convs): branch_convs.append( Bottleneck( inplanes=self.conv_out_channels, planes=self.conv_out_channels // 4, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) return branch_convs ",Add the fc branch which consists of a sequential of conv layers.,12,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _add_conv_branch(self) -> None: branch_convs = ModuleList() for i in range(self.num_convs): branch_convs.append( Bottleneck( inplanes=self.conv_out_channels, planes=self.conv_out_channels // 4, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) return branch_convs ``` ###Assistant : Add the fc branch which consists of a sequential of conv layers. " 1022,"def match_files(patterns, files): all_files = files if isinstance(files, Collection) else list(files) return_files = set() for pattern in patterns: if pattern.include is not None: result_files = pattern.match(all_files) if pattern.include: return_files.update(result_files) else: return_files.difference_update(result_files) return return_files "," Matches the files to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the normalized file paths to be matched against *patterns*. Returns the matched files (:class:`set` of :class:`str`). ",36,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def match_files(patterns, files): all_files = files if isinstance(files, Collection) else list(files) return_files = set() for pattern in patterns: if pattern.include is not None: result_files = pattern.match(all_files) if pattern.include: return_files.update(result_files) else: return_files.difference_update(result_files) return return_files ``` ###Assistant : Matches the files to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the normalized file paths to be matched against *patterns*. Returns the matched files (:class:`set` of :class:`str`). " 1023,"def _get_offsets_buffer(self) -> Tuple[PandasBuffer, Any]: if self.dtype[0] == DtypeKind.STRING: # For each string, we need to manually determine the next offset values = self._col.to_numpy() ptr = 0 offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64) for i, v in enumerate(values): # For missing values (in this case, `np.nan` values) # we don't increment the pointer if isinstance(v, str): b = v.encode(encoding=""utf-8"") ptr += len(b) offsets[i + 1] = ptr # Convert the offsets to a Pandas ""buffer"" using # the NumPy array as the backing store buffer = PandasBuffer(offsets) # Assemble the buffer dtype info dtype = ( DtypeKind.INT, 64, ArrowCTypes.INT64, Endianness.NATIVE, ) # note: currently only support native endianness else: raise NoBufferPresent( ""This column has a fixed-length dtype so "" ""it does not have an offsets buffer"" ) return buffer, dtype "," Return the buffer containing the offset values for variable-size binary data (e.g., variable-length strings) and the buffer's associated dtype. Raises NoBufferPresent if the data buffer does not have an associated offsets buffer. ",32,130,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_offsets_buffer(self) -> Tuple[PandasBuffer, Any]: if self.dtype[0] == DtypeKind.STRING: # For each string, we need to manually determine the next offset values = self._col.to_numpy() ptr = 0 offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64) for i, v in enumerate(values): # For missing values (in this case, `np.nan` values) # we don't increment the pointer if isinstance(v, str): b = v.encode(encoding=""utf-8"") ptr += len(b) offsets[i + 1] = ptr # Convert the offsets to a Pandas ""buffer"" using # the NumPy array as the backing store buffer = PandasBuffer(offsets) # Assemble the buffer dtype info dtype = ( DtypeKind.INT, 64, ArrowCTypes.INT64, Endianness.NATIVE, ) # note: currently only support native endianness else: raise NoBufferPresent( ""This column has a fixed-length dtype so "" ""it does not have an offsets buffer"" ) return buffer, dtype ``` ###Assistant : Return the buffer containing the offset values for variable-size binary data (e.g., variable-length strings) and the buffer's associated dtype. Raises NoBufferPresent if the data buffer does not have an associated offsets buffer. " 1024,"def _unschedule_refresh(self) -> None: if self._unsub_refresh: self._unsub_refresh() self._unsub_refresh = None ",Unschedule any pending refresh since there is no longer any listeners.,11,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _unschedule_refresh(self) -> None: if self._unsub_refresh: self._unsub_refresh() self._unsub_refresh = None ``` ###Assistant : Unschedule any pending refresh since there is no longer any listeners. " 1025,"def test_connect_and_rollback(self): new_connection = connection.copy() try: # Ensure the database default time zone is different than # the time zone in new_connection.settings_dict. We can # get the default time zone by reset & show. with new_connection.cursor() as cursor: cursor.execute(""RESET TIMEZONE"") cursor.execute(""SHOW TIMEZONE"") db_default_tz = cursor.fetchone()[0] new_tz = ""Europe/Paris"" if db_default_tz == ""UTC"" else ""UTC"" new_connection.close() # Invalidate timezone name cache, because the setting_changed # handler cannot know about new_connection. del new_connection.timezone_name # Fetch a new connection with the new_tz as default # time zone, run a query and rollback. with self.settings(TIME_ZONE=new_tz): new_connection.set_autocommit(False) new_connection.rollback() # Now let's see if the rollback rolled back the SET TIME ZONE. with new_connection.cursor() as cursor: cursor.execute(""SHOW TIMEZONE"") tz = cursor.fetchone()[0] self.assertEqual(new_tz, tz) finally: new_connection.close() "," PostgreSQL shouldn't roll back SET TIME ZONE, even if the first transaction is rolled back (#17062). ",16,119,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_connect_and_rollback(self): new_connection = connection.copy() try: # Ensure the database default time zone is different than # the time zone in new_connection.settings_dict. We can # get the default time zone by reset & show. with new_connection.cursor() as cursor: cursor.execute(""RESET TIMEZONE"") cursor.execute(""SHOW TIMEZONE"") db_default_tz = cursor.fetchone()[0] new_tz = ""Europe/Paris"" if db_default_tz == ""UTC"" else ""UTC"" new_connection.close() # Invalidate timezone name cache, because the setting_changed # handler cannot know about new_connection. del new_connection.timezone_name # Fetch a new connection with the new_tz as default # time zone, run a query and rollback. with self.settings(TIME_ZONE=new_tz): new_connection.set_autocommit(False) new_connection.rollback() # Now let's see if the rollback rolled back the SET TIME ZONE. with new_connection.cursor() as cursor: cursor.execute(""SHOW TIMEZONE"") tz = cursor.fetchone()[0] self.assertEqual(new_tz, tz) finally: new_connection.close() ``` ###Assistant : PostgreSQL shouldn't roll back SET TIME ZONE, even if the first transaction is rolled back (#17062). " 1026,"def test_readback_tfrecords(ray_start_regular_shared, tmp_path): # The dataset we will write to a .tfrecords file. ds = ray.data.from_items( [ # Row one. { ""int_item"": 1, ""int_list"": [2, 2, 3], ""float_item"": 1.0, ""float_list"": [2.0, 3.0, 4.0], ""bytes_item"": b""abc"", ""bytes_list"": [b""abc"", b""1234""], }, # Row two. { ""int_item"": 2, ""int_list"": [3, 3, 4], ""float_item"": 2.0, ""float_list"": [2.0, 2.0, 3.0], ""bytes_item"": b""def"", ""bytes_list"": [b""def"", b""1234""], }, ] ) # Write the TFRecords. ds.write_tfrecords(tmp_path) # Read the TFRecords. readback_ds = ray.data.read_tfrecords(tmp_path) assert ds.take() == readback_ds.take() "," Test reading back TFRecords written using datasets. The dataset we read back should be the same that we wrote. ",19,79,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_readback_tfrecords(ray_start_regular_shared, tmp_path): # The dataset we will write to a .tfrecords file. ds = ray.data.from_items( [ # Row one. { ""int_item"": 1, ""int_list"": [2, 2, 3], ""float_item"": 1.0, ""float_list"": [2.0, 3.0, 4.0], ""bytes_item"": b""abc"", ""bytes_list"": [b""abc"", b""1234""], }, # Row two. { ""int_item"": 2, ""int_list"": [3, 3, 4], ""float_item"": 2.0, ""float_list"": [2.0, 2.0, 3.0], ""bytes_item"": b""def"", ""bytes_list"": [b""def"", b""1234""], }, ] ) # Write the TFRecords. ds.write_tfrecords(tmp_path) # Read the TFRecords. readback_ds = ray.data.read_tfrecords(tmp_path) assert ds.take() == readback_ds.take() ``` ###Assistant : Test reading back TFRecords written using datasets. The dataset we read back should be the same that we wrote. " 1027,"def call(self, inputs, state): cur_state_pos = 0 cur_inp = inputs new_states = [] for i, cell in enumerate(self._cells): with tf.compat.v1.variable_scope(""cell_%d"" % i): if self._state_is_tuple: if not tf.nest.is_nested(state): raise ValueError( ""Expected state to be a tuple of length "" f""{len(self.state_size)}"" f"", but received: {state}"" ) cur_state = state[i] else: cur_state = tf.slice( state, [0, cur_state_pos], [-1, cell.state_size] ) cur_state_pos += cell.state_size cur_inp, new_state = cell(cur_inp, cur_state) new_states.append(new_state) new_states = ( tuple(new_states) if self._state_is_tuple else tf.concat(new_states, 1) ) return cur_inp, new_states ","Run this multi-layer cell on inputs, starting from state.",9,79,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call(self, inputs, state): cur_state_pos = 0 cur_inp = inputs new_states = [] for i, cell in enumerate(self._cells): with tf.compat.v1.variable_scope(""cell_%d"" % i): if self._state_is_tuple: if not tf.nest.is_nested(state): raise ValueError( ""Expected state to be a tuple of length "" f""{len(self.state_size)}"" f"", but received: {state}"" ) cur_state = state[i] else: cur_state = tf.slice( state, [0, cur_state_pos], [-1, cell.state_size] ) cur_state_pos += cell.state_size cur_inp, new_state = cell(cur_inp, cur_state) new_states.append(new_state) new_states = ( tuple(new_states) if self._state_is_tuple else tf.concat(new_states, 1) ) return cur_inp, new_states ``` ###Assistant : Run this multi-layer cell on inputs, starting from state. " 1028,"def test_basic(push_channel): msgs = [ {""foo"": ""bar""}, {""bar"": ""baz""}, {""baz"": ""qux"", ""list"": [1, 2, 3]}, ] for msg in msgs: ret = push_channel.send(msg, timeout=5, tries=1) assert ret[""load""] == msg "," Test a variety of messages, make sure we get the expected responses ",12,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_basic(push_channel): msgs = [ {""foo"": ""bar""}, {""bar"": ""baz""}, {""baz"": ""qux"", ""list"": [1, 2, 3]}, ] for msg in msgs: ret = push_channel.send(msg, timeout=5, tries=1) assert ret[""load""] == msg ``` ###Assistant : Test a variety of messages, make sure we get the expected responses " 1029,"def test_put_global(self) -> None: self.get_success( self._module_api.account_data_manager.put_global( self.user_id, ""test.data"", {""wombat"": True} ) ) # Request that account data from the normal store; check it's as we expect. self.assertEqual( self.get_success( self._store.get_global_account_data_by_type_for_user( self.user_id, ""test.data"" ) ), {""wombat"": True}, ) "," Tests that written account data using `put_global` can be read out again later. ",13,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_put_global(self) -> None: self.get_success( self._module_api.account_data_manager.put_global( self.user_id, ""test.data"", {""wombat"": True} ) ) # Request that account data from the normal store; check it's as we expect. self.assertEqual( self.get_success( self._store.get_global_account_data_by_type_for_user( self.user_id, ""test.data"" ) ), {""wombat"": True}, ) ``` ###Assistant : Tests that written account data using `put_global` can be read out again later. " 1030,"def get_conda_environment_content(build_metadata): template = environment.from_string( .strip() ) return template.render(build_metadata=build_metadata) "," # DO NOT EDIT: this file is generated from the specification found in the # following script to centralize the configuration for all Azure CI builds: # build_tools/azure/update_environments_and_lock_files.py channels: - {{ build_metadata['channel'] }} dependencies: {% for conda_dep in build_metadata['conda_dependencies'] %} - {{ conda_dep | get_package_with_constraint(build_metadata) }} {% endfor %} {% if build_metadata['pip_dependencies'] %} - pip - pip: {% for pip_dep in build_metadata.get('pip_dependencies', []) %} - {{ pip_dep | get_package_with_constraint(build_metadata, uses_pip=True) }} {% endfor %} {% endif %}",77,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_conda_environment_content(build_metadata): template = environment.from_string( .strip() ) return template.render(build_metadata=build_metadata) ``` ###Assistant : # DO NOT EDIT: this file is generated from the specification found in the # following script to centralize the configuration for all Azure CI builds: # build_tools/azure/update_environments_and_lock_files.py channels: - {{ build_metadata['channel'] }} dependencies: {% for conda_dep in build_metadata['conda_dependencies'] %} - {{ conda_dep | get_package_with_constraint(build_metadata) }} {% endfor %} {% if build_metadata['pip_dependencies'] %} - pip - pip: {% for pip_dep in build_metadata.get('pip_dependencies', []) %} - {{ pip_dep | get_package_with_constraint(build_metadata, uses_pip=True) }} {% endfor %} {% endif %} " 1031,"def preprocess(self, image, image_format): format = self.format or image_format save_kwargs = {""format"": format} # Ensuring image is properly rotated if hasattr(image, ""_getexif""): exif_datadict = image._getexif() # returns None if no EXIF data if exif_datadict is not None: exif = dict(exif_datadict.items()) orientation = exif.get(self.EXIF_ORIENTATION_KEY, None) if orientation == 3: image = image.transpose(Image.ROTATE_180) elif orientation == 6: image = image.transpose(Image.ROTATE_270) elif orientation == 8: image = image.transpose(Image.ROTATE_90) # Ensure any embedded ICC profile is preserved save_kwargs[""icc_profile""] = image.info.get(""icc_profile"") if hasattr(self, ""preprocess_%s"" % format): image, addl_save_kwargs = getattr(self, ""preprocess_%s"" % format)( image=image ) save_kwargs.update(addl_save_kwargs) return image, save_kwargs ","Preprocess an image. An API hook for image pre-processing. Calls any image format specific pre-processors (if defined). I.E. If `image_format` is 'JPEG', this method will look for a method named `preprocess_JPEG`, if found `image` will be passed to it. Arguments: image: a PIL Image instance image_format: str, a valid PIL format (i.e. 'JPEG' or 'WEBP') Subclasses should return a 2-tuple: * [0]: A PIL Image instance. * [1]: A dictionary of additional keyword arguments to be used when the instance is saved. If no additional keyword arguments, return an empty dict ({}). ",92,94,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def preprocess(self, image, image_format): format = self.format or image_format save_kwargs = {""format"": format} # Ensuring image is properly rotated if hasattr(image, ""_getexif""): exif_datadict = image._getexif() # returns None if no EXIF data if exif_datadict is not None: exif = dict(exif_datadict.items()) orientation = exif.get(self.EXIF_ORIENTATION_KEY, None) if orientation == 3: image = image.transpose(Image.ROTATE_180) elif orientation == 6: image = image.transpose(Image.ROTATE_270) elif orientation == 8: image = image.transpose(Image.ROTATE_90) # Ensure any embedded ICC profile is preserved save_kwargs[""icc_profile""] = image.info.get(""icc_profile"") if hasattr(self, ""preprocess_%s"" % format): image, addl_save_kwargs = getattr(self, ""preprocess_%s"" % format)( image=image ) save_kwargs.update(addl_save_kwargs) return image, save_kwargs ``` ###Assistant : Preprocess an image. An API hook for image pre-processing. Calls any image format specific pre-processors (if defined). I.E. If `image_format` is 'JPEG', this method will look for a method named `preprocess_JPEG`, if found `image` will be passed to it. Arguments: image: a PIL Image instance image_format: str, a valid PIL format (i.e. 'JPEG' or 'WEBP') Subclasses should return a 2-tuple: * [0]: A PIL Image instance. * [1]: A dictionary of additional keyword arguments to be used when the instance is saved. If no additional keyword arguments, return an empty dict ({}). " 1032,"def get_package_paths(package): pkg_paths = get_all_package_paths(package) if not pkg_paths: raise ValueError(f""Package '{package}' does not exist or is not a package!"") if len(pkg_paths) > 1: logger.warning( ""get_package_paths - package %s has multiple paths (%r); returning only first one!"", package, pkg_paths ) pkg_dir = pkg_paths[0] pkg_base = package_base_path(pkg_dir, package) return pkg_base, pkg_dir "," Given a package, return the path to packages stored on this machine and also returns the path to this particular package. For example, if pkg.subpkg lives in /abs/path/to/python/libs, then this function returns ``(/abs/path/to/python/libs, /abs/path/to/python/libs/pkg/subpkg)``. NOTE: due to backwards compatibility, this function returns only one package path along with its base directory. In case of PEP 420 namespace package with multiple location, only first location is returned. To obtain all package paths, use the ``get_all_package_paths`` function and obtain corresponding base directories using the ``package_base_path`` helper. ",84,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_package_paths(package): pkg_paths = get_all_package_paths(package) if not pkg_paths: raise ValueError(f""Package '{package}' does not exist or is not a package!"") if len(pkg_paths) > 1: logger.warning( ""get_package_paths - package %s has multiple paths (%r); returning only first one!"", package, pkg_paths ) pkg_dir = pkg_paths[0] pkg_base = package_base_path(pkg_dir, package) return pkg_base, pkg_dir ``` ###Assistant : Given a package, return the path to packages stored on this machine and also returns the path to this particular package. For example, if pkg.subpkg lives in /abs/path/to/python/libs, then this function returns ``(/abs/path/to/python/libs, /abs/path/to/python/libs/pkg/subpkg)``. NOTE: due to backwards compatibility, this function returns only one package path along with its base directory. In case of PEP 420 namespace package with multiple location, only first location is returned. To obtain all package paths, use the ``get_all_package_paths`` function and obtain corresponding base directories using the ``package_base_path`` helper. " 1033,"def get_address_territory(address_name): territory = None if address_name: address_fields = frappe.db.get_value(""Address"", address_name, [""city"", ""state"", ""country""]) for value in address_fields: territory = frappe.db.get_value(""Territory"", value) if territory: break return territory ","Tries to match city, state and country of address to existing territory",12,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_address_territory(address_name): territory = None if address_name: address_fields = frappe.db.get_value(""Address"", address_name, [""city"", ""state"", ""country""]) for value in address_fields: territory = frappe.db.get_value(""Territory"", value) if territory: break return territory ``` ###Assistant : Tries to match city, state and country of address to existing territory " 1034,"def _get_device_coords(self, position, height): x = self.legend_width + RACK_ELEVATION_BORDER_WIDTH y = RACK_ELEVATION_BORDER_WIDTH if self.rack.desc_units: y += int((position - 1) * self.unit_height) else: y += int((self.rack.u_height - position + 1) * self.unit_height) - int(height * self.unit_height) return x, y "," Return the X, Y coordinates of the top left corner for a device in the specified rack unit. ",18,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_device_coords(self, position, height): x = self.legend_width + RACK_ELEVATION_BORDER_WIDTH y = RACK_ELEVATION_BORDER_WIDTH if self.rack.desc_units: y += int((position - 1) * self.unit_height) else: y += int((self.rack.u_height - position + 1) * self.unit_height) - int(height * self.unit_height) return x, y ``` ###Assistant : Return the X, Y coordinates of the top left corner for a device in the specified rack unit. " 1035,"def igcd(*args): if len(args) < 2: raise TypeError( 'igcd() takes at least 2 arguments (%s given)' % len(args)) args_temp = [abs(as_int(i)) for i in args] if 1 in args_temp: return 1 a = args_temp.pop() if HAS_GMPY: # Using gmpy if present to speed up. for b in args_temp: a = gmpy.gcd(a, b) if b else a return as_int(a) for b in args_temp: a = math.gcd(a, b) return a igcd2 = math.gcd ","Computes nonnegative integer greatest common divisor. Explanation =========== The algorithm is based on the well known Euclid's algorithm [1]_. To improve speed, ``igcd()`` has its own caching mechanism. Examples ======== >>> from sympy import igcd >>> igcd(2, 4) 2 >>> igcd(5, 10, 15) 5 References ========== .. [1] https://en.wikipedia.org/wiki/Euclidean_algorithm ",49,71,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def igcd(*args): if len(args) < 2: raise TypeError( 'igcd() takes at least 2 arguments (%s given)' % len(args)) args_temp = [abs(as_int(i)) for i in args] if 1 in args_temp: return 1 a = args_temp.pop() if HAS_GMPY: # Using gmpy if present to speed up. for b in args_temp: a = gmpy.gcd(a, b) if b else a return as_int(a) for b in args_temp: a = math.gcd(a, b) return a igcd2 = math.gcd ``` ###Assistant : Computes nonnegative integer greatest common divisor. Explanation =========== The algorithm is based on the well known Euclid's algorithm [1]_. To improve speed, ``igcd()`` has its own caching mechanism. Examples ======== >>> from sympy import igcd >>> igcd(2, 4) 2 >>> igcd(5, 10, 15) 5 References ========== .. [1] https://en.wikipedia.org/wiki/Euclidean_algorithm " 1036,"def _prev_next_cb(self, found, *, going_up, callback): if found: result = browsertab.SearchNavigationResult.found # Check if the match count change is opposite to the search direction if self._old_match.current > 0: if not going_up and self._old_match.current > self.match.current: result = browsertab.SearchNavigationResult.wrapped_bottom elif going_up and self._old_match.current < self.match.current: result = browsertab.SearchNavigationResult.wrapped_top else: result = browsertab.SearchNavigationResult.not_found callback(result) ",Call the prev/next callback based on the search result.,9,52,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _prev_next_cb(self, found, *, going_up, callback): if found: result = browsertab.SearchNavigationResult.found # Check if the match count change is opposite to the search direction if self._old_match.current > 0: if not going_up and self._old_match.current > self.match.current: result = browsertab.SearchNavigationResult.wrapped_bottom elif going_up and self._old_match.current < self.match.current: result = browsertab.SearchNavigationResult.wrapped_top else: result = browsertab.SearchNavigationResult.not_found callback(result) ``` ###Assistant : Call the prev/next callback based on the search result. " 1037,"def onModuleSourceCode(self, module_name, source_code): if module_name != ""tensorflow"": return source_code source_lines = source_code.splitlines() found_insert = False for i, l in enumerate(source_lines): if l.startswith(""def "") and ""_running_from_pip_package():"" in l: source_lines.insert(i, ""_site_packages_dirs = []"") source_lines.insert(i, ""from tensorflow.python import keras"") found_insert = True break if found_insert is True: self.info(""Patched 'running-from-pip' path magic."") else: self.sysexit(""Did not find 'running-from-pip' path magic code."") return ""\n"".join(source_lines) ","Neutralize some path magic in tensorflow. Notes: Make sure tensorflow understands, we are not running as a PIP installed application. ",20,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def onModuleSourceCode(self, module_name, source_code): if module_name != ""tensorflow"": return source_code source_lines = source_code.splitlines() found_insert = False for i, l in enumerate(source_lines): if l.startswith(""def "") and ""_running_from_pip_package():"" in l: source_lines.insert(i, ""_site_packages_dirs = []"") source_lines.insert(i, ""from tensorflow.python import keras"") found_insert = True break if found_insert is True: self.info(""Patched 'running-from-pip' path magic."") else: self.sysexit(""Did not find 'running-from-pip' path magic code."") return ""\n"".join(source_lines) ``` ###Assistant : Neutralize some path magic in tensorflow. Notes: Make sure tensorflow understands, we are not running as a PIP installed application. " 1038,"def cast_scalar_indexer(val): # assumes lib.is_scalar(val) if lib.is_float(val) and val.is_integer(): raise IndexError( # GH#34193 ""Indexing with a float is no longer supported. Manually convert "" ""to an integer key instead."" ) return val "," Disallow indexing with a float key, even if that key is a round number. Parameters ---------- val : scalar Returns ------- outval : scalar ",24,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cast_scalar_indexer(val): # assumes lib.is_scalar(val) if lib.is_float(val) and val.is_integer(): raise IndexError( # GH#34193 ""Indexing with a float is no longer supported. Manually convert "" ""to an integer key instead."" ) return val ``` ###Assistant : Disallow indexing with a float key, even if that key is a round number. Parameters ---------- val : scalar Returns ------- outval : scalar " 1039,"def load_plugins(base_type, database): # type: (t.Type[C], t.Dict[str, t.Type[C]]) -> None plugins: t.Dict[str, t.Type[C]] = dict((sc.__module__.rsplit('.', 1)[1], sc) for sc in get_subclasses(base_type)) for plugin in plugins: database[plugin] = plugins[plugin] "," Load plugins of the specified type and track them in the specified database. Only plugins which have already been imported will be loaded. ",23,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_plugins(base_type, database): # type: (t.Type[C], t.Dict[str, t.Type[C]]) -> None plugins: t.Dict[str, t.Type[C]] = dict((sc.__module__.rsplit('.', 1)[1], sc) for sc in get_subclasses(base_type)) for plugin in plugins: database[plugin] = plugins[plugin] ``` ###Assistant : Load plugins of the specified type and track them in the specified database. Only plugins which have already been imported will be loaded. " 1040,"def _undetermined_coefficients_match(expr, x, func=None, eq_homogeneous=S.Zero): r a = Wild('a', exclude=[x]) b = Wild('b', exclude=[x]) expr = powsimp(expr, combine='exp') # exp(x)*exp(2*x + 1) => exp(3*x + 1) retdict = {} "," Returns a trial function match if undetermined coefficients can be applied to ``expr``, and ``None`` otherwise. A trial expression can be found for an expression for use with the method of undetermined coefficients if the expression is an additive/multiplicative combination of constants, polynomials in `x` (the independent variable of expr), `\sin(a x + b)`, `\cos(a x + b)`, and `e^{a x}` terms (in other words, it has a finite number of linearly independent derivatives). Note that you may still need to multiply each term returned here by sufficient `x` to make it linearly independent with the solutions to the homogeneous equation. This is intended for internal use by ``undetermined_coefficients`` hints. SymPy currently has no way to convert `\sin^n(x) \cos^m(y)` into a sum of only `\sin(a x)` and `\cos(b x)` terms, so these are not implemented. So, for example, you will need to manually convert `\sin^2(x)` into `[1 + \cos(2 x)]/2` to properly apply the method of undetermined coefficients on it. Examples ======== >>> from sympy import log, exp >>> from sympy.solvers.ode.nonhomogeneous import _undetermined_coefficients_match >>> from sympy.abc import x >>> _undetermined_coefficients_match(9*x*exp(x) + exp(-x), x) {'test': True, 'trialset': {x*exp(x), exp(-x), exp(x)}} >>> _undetermined_coefficients_match(log(x), x) {'test': False} ",194,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _undetermined_coefficients_match(expr, x, func=None, eq_homogeneous=S.Zero): r a = Wild('a', exclude=[x]) b = Wild('b', exclude=[x]) expr = powsimp(expr, combine='exp') # exp(x)*exp(2*x + 1) => exp(3*x + 1) retdict = {} ``` ###Assistant : Returns a trial function match if undetermined coefficients can be applied to ``expr``, and ``None`` otherwise. A trial expression can be found for an expression for use with the method of undetermined coefficients if the expression is an additive/multiplicative combination of constants, polynomials in `x` (the independent variable of expr), `\sin(a x + b)`, `\cos(a x + b)`, and `e^{a x}` terms (in other words, it has a finite number of linearly independent derivatives). Note that you may still need to multiply each term returned here by sufficient `x` to make it linearly independent with the solutions to the homogeneous equation. This is intended for internal use by ``undetermined_coefficients`` hints. SymPy currently has no way to convert `\sin^n(x) \cos^m(y)` into a sum of only `\sin(a x)` and `\cos(b x)` terms, so these are not implemented. So, for example, you will need to manually convert `\sin^2(x)` into `[1 + \cos(2 x)]/2` to properly apply the method of undetermined coefficients on it. Examples ======== >>> from sympy import log, exp >>> from sympy.solvers.ode.nonhomogeneous import _undetermined_coefficients_match >>> from sympy.abc import x >>> _undetermined_coefficients_match(9*x*exp(x) + exp(-x), x) {'test': True, 'trialset': {x*exp(x), exp(-x), exp(x)}} >>> _undetermined_coefficients_match(log(x), x) {'test': False} " 1041,"def _remove_raw(self) -> None: if ""raw"" in self._selections: return logger.debug(""Removing Raw Data from output"") for key in list(self._stats.keys()): if key.startswith(""raw""): del self._stats[key] logger.debug(""Removed Raw Data from output"") ", Remove raw values from :attr:`stats` if they are not requested. ,10,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _remove_raw(self) -> None: if ""raw"" in self._selections: return logger.debug(""Removing Raw Data from output"") for key in list(self._stats.keys()): if key.startswith(""raw""): del self._stats[key] logger.debug(""Removed Raw Data from output"") ``` ###Assistant : Remove raw values from :attr:`stats` if they are not requested. " 1042,"def get_keras_blocks(keras_weight_names): # example: 'block1a_dwconv/depthwise_kernel:0' -> 'block1a' keras_blocks = {x.split(""_"")[0] for x in keras_weight_names if ""block"" in x} return sorted(keras_blocks) ",Extract the block names from list of full weight names.,10,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_keras_blocks(keras_weight_names): # example: 'block1a_dwconv/depthwise_kernel:0' -> 'block1a' keras_blocks = {x.split(""_"")[0] for x in keras_weight_names if ""block"" in x} return sorted(keras_blocks) ``` ###Assistant : Extract the block names from list of full weight names. " 1043,"def get_dashboard_info(party_type, party, loyalty_program=None): current_fiscal_year = get_fiscal_year(nowdate(), as_dict=True) doctype = ""Sales Invoice"" if party_type == ""Customer"" else ""Purchase Invoice"" companies = frappe.get_all( doctype, filters={""docstatus"": 1, party_type.lower(): party}, distinct=1, fields=[""company""] ) company_wise_info = [] company_wise_grand_total = frappe.get_all( doctype, filters={ ""docstatus"": 1, party_type.lower(): party, ""posting_date"": ( ""between"", [current_fiscal_year.year_start_date, current_fiscal_year.year_end_date], ), }, group_by=""company"", fields=[ ""company"", ""sum(grand_total) as grand_total"", ""sum(base_grand_total) as base_grand_total"", ], ) loyalty_point_details = [] if party_type == ""Customer"": loyalty_point_details = frappe._dict( frappe.get_all( ""Loyalty Point Entry"", filters={ ""customer"": party, ""expiry_date"": ("">="", getdate()), }, group_by=""company"", fields=[""company"", ""sum(loyalty_points) as loyalty_points""], as_list=1, ) ) company_wise_billing_this_year = frappe._dict() for d in company_wise_grand_total: company_wise_billing_this_year.setdefault( d.company, {""grand_total"": d.grand_total, ""base_grand_total"": d.base_grand_total} ) company_wise_total_unpaid = frappe._dict( frappe.db.sql( , (party_type, party), ) ) for d in companies: company_default_currency = frappe.db.get_value(""Company"", d.company, ""default_currency"") party_account_currency = get_party_account_currency(party_type, party, d.company) if party_account_currency == company_default_currency: billing_this_year = flt( company_wise_billing_this_year.get(d.company, {}).get(""base_grand_total"") ) else: billing_this_year = flt(company_wise_billing_this_year.get(d.company, {}).get(""grand_total"")) total_unpaid = flt(company_wise_total_unpaid.get(d.company)) if loyalty_point_details: loyalty_points = loyalty_point_details.get(d.company) info = {} info[""billing_this_year""] = flt(billing_this_year) if billing_this_year else 0 info[""currency""] = party_account_currency info[""total_unpaid""] = flt(total_unpaid) if total_unpaid else 0 info[""company""] = d.company if party_type == ""Customer"" and loyalty_point_details: info[""loyalty_points""] = loyalty_points if party_type == ""Supplier"": info[""total_unpaid""] = -1 * info[""total_unpaid""] company_wise_info.append(info) return company_wise_info "," select company, sum(debit_in_account_currency) - sum(credit_in_account_currency) from `tabGL Entry` where party_type = %s and party=%s and is_cancelled = 0 group by company",21,193,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_dashboard_info(party_type, party, loyalty_program=None): current_fiscal_year = get_fiscal_year(nowdate(), as_dict=True) doctype = ""Sales Invoice"" if party_type == ""Customer"" else ""Purchase Invoice"" companies = frappe.get_all( doctype, filters={""docstatus"": 1, party_type.lower(): party}, distinct=1, fields=[""company""] ) company_wise_info = [] company_wise_grand_total = frappe.get_all( doctype, filters={ ""docstatus"": 1, party_type.lower(): party, ""posting_date"": ( ""between"", [current_fiscal_year.year_start_date, current_fiscal_year.year_end_date], ), }, group_by=""company"", fields=[ ""company"", ""sum(grand_total) as grand_total"", ""sum(base_grand_total) as base_grand_total"", ], ) loyalty_point_details = [] if party_type == ""Customer"": loyalty_point_details = frappe._dict( frappe.get_all( ""Loyalty Point Entry"", filters={ ""customer"": party, ""expiry_date"": ("">="", getdate()), }, group_by=""company"", fields=[""company"", ""sum(loyalty_points) as loyalty_points""], as_list=1, ) ) company_wise_billing_this_year = frappe._dict() for d in company_wise_grand_total: company_wise_billing_this_year.setdefault( d.company, {""grand_total"": d.grand_total, ""base_grand_total"": d.base_grand_total} ) company_wise_total_unpaid = frappe._dict( frappe.db.sql( , (party_type, party), ) ) for d in companies: company_default_currency = frappe.db.get_value(""Company"", d.company, ""default_currency"") party_account_currency = get_party_account_currency(party_type, party, d.company) if party_account_currency == company_default_currency: billing_this_year = flt( company_wise_billing_this_year.get(d.company, {}).get(""base_grand_total"") ) else: billing_this_year = flt(company_wise_billing_this_year.get(d.company, {}).get(""grand_total"")) total_unpaid = flt(company_wise_total_unpaid.get(d.company)) if loyalty_point_details: loyalty_points = loyalty_point_details.get(d.company) info = {} info[""billing_this_year""] = flt(billing_this_year) if billing_this_year else 0 info[""currency""] = party_account_currency info[""total_unpaid""] = flt(total_unpaid) if total_unpaid else 0 info[""company""] = d.company if party_type == ""Customer"" and loyalty_point_details: info[""loyalty_points""] = loyalty_points if party_type == ""Supplier"": info[""total_unpaid""] = -1 * info[""total_unpaid""] company_wise_info.append(info) return company_wise_info ``` ###Assistant : select company, sum(debit_in_account_currency) - sum(credit_in_account_currency) from `tabGL Entry` where party_type = %s and party=%s and is_cancelled = 0 group by company " 1044,"def print_help(self): help_text = console.print(text=help_text, menu=""Portfolio - Brokers - Robinhood"") ","Print help[cmds] login login to robinhood holdings show account holdings in stocks history show equity history of your account [/cmds]",20,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def print_help(self): help_text = console.print(text=help_text, menu=""Portfolio - Brokers - Robinhood"") ``` ###Assistant : Print help[cmds] login login to robinhood holdings show account holdings in stocks history show equity history of your account [/cmds] " 1045,"def test_page_with_inline_model_with_tabbed_panel_only(self): EventPageSpeaker.settings_panels = [ FieldPanel(""first_name""), FieldPanel(""last_name""), ] warning = checks.Warning( ""EventPageSpeaker.settings_panels will have no effect on InlinePanel model editing"", hint=, obj=EventPageSpeaker, id=self.warning_id, ) checks_results = self.get_checks_result() self.assertIn(warning, checks_results) delattr(EventPageSpeaker, ""settings_panels"") ","Test that checks will warn against setting single tabbed panel on InlinePanel modelEnsure that EventPageSpeaker uses `panels` instead of `settings_panels`. There are no tabs on non-Page model editing within InlinePanels.",30,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_page_with_inline_model_with_tabbed_panel_only(self): EventPageSpeaker.settings_panels = [ FieldPanel(""first_name""), FieldPanel(""last_name""), ] warning = checks.Warning( ""EventPageSpeaker.settings_panels will have no effect on InlinePanel model editing"", hint=, obj=EventPageSpeaker, id=self.warning_id, ) checks_results = self.get_checks_result() self.assertIn(warning, checks_results) delattr(EventPageSpeaker, ""settings_panels"") ``` ###Assistant : Test that checks will warn against setting single tabbed panel on InlinePanel modelEnsure that EventPageSpeaker uses `panels` instead of `settings_panels`. There are no tabs on non-Page model editing within InlinePanels. " 1046,"def to_kwargs(self): default_dict = self.__class__().to_dict() this_dict = self.to_dict() return {k: v for k, v in this_dict.items() if default_dict[k] != v} @dataclass"," Returns a dictionary containing the attributes with values different from the default of this class. ",15,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def to_kwargs(self): default_dict = self.__class__().to_dict() this_dict = self.to_dict() return {k: v for k, v in this_dict.items() if default_dict[k] != v} @dataclass ``` ###Assistant : Returns a dictionary containing the attributes with values different from the default of this class. " 1047,"def update_document_archive_file(document_id): document = Document.objects.get(id=document_id) mime_type = document.mime_type parser_class: Type[DocumentParser] = get_parser_class_for_mime_type(mime_type) if not parser_class: logger.error( f""No parser found for mime type {mime_type}, cannot "" f""archive document {document} (ID: {document_id})"", ) return parser: DocumentParser = parser_class(logging_group=uuid.uuid4()) try: parser.parse(document.source_path, mime_type, document.get_public_filename()) thumbnail = parser.get_thumbnail( document.source_path, mime_type, document.get_public_filename(), ) if parser.get_archive_path(): with transaction.atomic(): with open(parser.get_archive_path(), ""rb"") as f: checksum = hashlib.md5(f.read()).hexdigest() # I'm going to save first so that in case the file move # fails, the database is rolled back. # We also don't use save() since that triggers the filehandling # logic, and we don't want that yet (file not yet in place) document.archive_filename = generate_unique_filename( document, archive_filename=True, ) Document.objects.filter(pk=document.pk).update( archive_checksum=checksum, content=parser.get_text(), archive_filename=document.archive_filename, ) with FileLock(settings.MEDIA_LOCK): create_source_path_directory(document.archive_path) shutil.move(parser.get_archive_path(), document.archive_path) shutil.move(thumbnail, document.thumbnail_path) with index.open_index_writer() as writer: index.update_document(writer, document) except Exception: logger.exception( f""Error while parsing document {document} "" f""(ID: {document_id})"", ) finally: parser.cleanup() "," Re-creates the archive file of a document, including new OCR content and thumbnail ",13,141,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update_document_archive_file(document_id): document = Document.objects.get(id=document_id) mime_type = document.mime_type parser_class: Type[DocumentParser] = get_parser_class_for_mime_type(mime_type) if not parser_class: logger.error( f""No parser found for mime type {mime_type}, cannot "" f""archive document {document} (ID: {document_id})"", ) return parser: DocumentParser = parser_class(logging_group=uuid.uuid4()) try: parser.parse(document.source_path, mime_type, document.get_public_filename()) thumbnail = parser.get_thumbnail( document.source_path, mime_type, document.get_public_filename(), ) if parser.get_archive_path(): with transaction.atomic(): with open(parser.get_archive_path(), ""rb"") as f: checksum = hashlib.md5(f.read()).hexdigest() # I'm going to save first so that in case the file move # fails, the database is rolled back. # We also don't use save() since that triggers the filehandling # logic, and we don't want that yet (file not yet in place) document.archive_filename = generate_unique_filename( document, archive_filename=True, ) Document.objects.filter(pk=document.pk).update( archive_checksum=checksum, content=parser.get_text(), archive_filename=document.archive_filename, ) with FileLock(settings.MEDIA_LOCK): create_source_path_directory(document.archive_path) shutil.move(parser.get_archive_path(), document.archive_path) shutil.move(thumbnail, document.thumbnail_path) with index.open_index_writer() as writer: index.update_document(writer, document) except Exception: logger.exception( f""Error while parsing document {document} "" f""(ID: {document_id})"", ) finally: parser.cleanup() ``` ###Assistant : Re-creates the archive file of a document, including new OCR content and thumbnail " 1048,"def execute(): frappe.reload_doc(""Selling"", ""doctype"", ""Customer Credit Limit"") frappe.reload_doc(""Selling"", ""doctype"", ""Customer"") frappe.reload_doc(""Setup"", ""doctype"", ""Customer Group"") if frappe.db.a_row_exists(""Customer Credit Limit""): return move_credit_limit_to_child_table() ",Move credit limit and bypass credit limit to the child table of customer credit limit,15,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def execute(): frappe.reload_doc(""Selling"", ""doctype"", ""Customer Credit Limit"") frappe.reload_doc(""Selling"", ""doctype"", ""Customer"") frappe.reload_doc(""Setup"", ""doctype"", ""Customer Group"") if frappe.db.a_row_exists(""Customer Credit Limit""): return move_credit_limit_to_child_table() ``` ###Assistant : Move credit limit and bypass credit limit to the child table of customer credit limit " 1049,"def forceexit(self, tradeid, ordertype=None, amount=None): return self._post(""forceexit"", data={ ""tradeid"": tradeid, ""ordertype"": ordertype, ""amount"": amount, }) ","Force-exit a trade. :param tradeid: Id of the trade (can be received via status command) :param ordertype: Order type to use (must be market or limit) :param amount: Amount to sell. Full sell if not given :return: json object ",39,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def forceexit(self, tradeid, ordertype=None, amount=None): return self._post(""forceexit"", data={ ""tradeid"": tradeid, ""ordertype"": ordertype, ""amount"": amount, }) ``` ###Assistant : Force-exit a trade. :param tradeid: Id of the trade (can be received via status command) :param ordertype: Order type to use (must be market or limit) :param amount: Amount to sell. Full sell if not given :return: json object " 1050,"def _abi3_applies(python_version): # type: (PythonVersion) -> bool return len(python_version) > 1 and tuple(python_version) >= (3, 2) "," Determine if the Python version supports abi3. PEP 384 was first implemented in Python 3.2. ",15,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _abi3_applies(python_version): # type: (PythonVersion) -> bool return len(python_version) > 1 and tuple(python_version) >= (3, 2) ``` ###Assistant : Determine if the Python version supports abi3. PEP 384 was first implemented in Python 3.2. " 1051,"def variable(value, dtype=None, name=None, constraint=None): if dtype is None: dtype = floatx() if hasattr(value, ""tocoo""): sparse_coo = value.tocoo() indices = np.concatenate( ( np.expand_dims(sparse_coo.row, 1), np.expand_dims(sparse_coo.col, 1), ), 1, ) v = tf.SparseTensor( indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape, ) v._keras_shape = sparse_coo.shape return v v = tf.Variable( value, dtype=tf.as_dtype(dtype), name=name, constraint=constraint ) if isinstance(value, np.ndarray): v._keras_shape = value.shape elif hasattr(value, ""shape""): v._keras_shape = int_shape(value) track_variable(v) return v ","Instantiates a variable and returns it. Args: value: Numpy array, initial value of the tensor. dtype: Tensor type. name: Optional name string for the tensor. constraint: Optional projection function to be applied to the variable after an optimizer update. Returns: A variable instance (with Keras metadata included). Examples: >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val, dtype='float64', ... name='example_var') >>> tf.keras.backend.dtype(kvar) 'float64' >>> print(kvar) ",77,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def variable(value, dtype=None, name=None, constraint=None): if dtype is None: dtype = floatx() if hasattr(value, ""tocoo""): sparse_coo = value.tocoo() indices = np.concatenate( ( np.expand_dims(sparse_coo.row, 1), np.expand_dims(sparse_coo.col, 1), ), 1, ) v = tf.SparseTensor( indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape, ) v._keras_shape = sparse_coo.shape return v v = tf.Variable( value, dtype=tf.as_dtype(dtype), name=name, constraint=constraint ) if isinstance(value, np.ndarray): v._keras_shape = value.shape elif hasattr(value, ""shape""): v._keras_shape = int_shape(value) track_variable(v) return v ``` ###Assistant : Instantiates a variable and returns it. Args: value: Numpy array, initial value of the tensor. dtype: Tensor type. name: Optional name string for the tensor. constraint: Optional projection function to be applied to the variable after an optimizer update. Returns: A variable instance (with Keras metadata included). Examples: >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val, dtype='float64', ... name='example_var') >>> tf.keras.backend.dtype(kvar) 'float64' >>> print(kvar) " 1052,"def get_cost_to_borrow() -> pd.DataFrame: ftp = ftplib.FTP(""ftp3.interactivebrokers.com"", ""shortstock"") flo = BytesIO() ftp.retrbinary(""RETR usa.txt"", flo.write) flo.seek(0) data = pd.read_csv(flo, sep=""|"", skiprows=1) data = data[[""#SYM"", ""FEERATE"", ""AVAILABLE""]] data[""AVAILABLE""] = data[""AVAILABLE""].replace("">10000000"", 10000000) data.fillna(0, inplace=True) data[""AVAILABLE""] = data[""AVAILABLE""].astype(int) data.sort_values(by=[""FEERATE""], ascending=False, inplace=True) data[""FEERATE""] = data[""FEERATE""].apply(lambda x: str(x) + ""%"") data.columns = [""Symbol"", ""Fees"", ""Available""] return data ","Get stocks with highest cost to borrow [Source: Interactive Broker] Returns ------- pd.DataFrame Cost to borrow ",16,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_cost_to_borrow() -> pd.DataFrame: ftp = ftplib.FTP(""ftp3.interactivebrokers.com"", ""shortstock"") flo = BytesIO() ftp.retrbinary(""RETR usa.txt"", flo.write) flo.seek(0) data = pd.read_csv(flo, sep=""|"", skiprows=1) data = data[[""#SYM"", ""FEERATE"", ""AVAILABLE""]] data[""AVAILABLE""] = data[""AVAILABLE""].replace("">10000000"", 10000000) data.fillna(0, inplace=True) data[""AVAILABLE""] = data[""AVAILABLE""].astype(int) data.sort_values(by=[""FEERATE""], ascending=False, inplace=True) data[""FEERATE""] = data[""FEERATE""].apply(lambda x: str(x) + ""%"") data.columns = [""Symbol"", ""Fees"", ""Available""] return data ``` ###Assistant : Get stocks with highest cost to borrow [Source: Interactive Broker] Returns ------- pd.DataFrame Cost to borrow " 1053,"def _enable_task_listeners(): if get_listener_manager().has_listeners: register_task_instance_state_events() "," Check if we have any registered listeners, then register sqlalchemy hooks for TI state change if we do. ",18,5,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _enable_task_listeners(): if get_listener_manager().has_listeners: register_task_instance_state_events() ``` ###Assistant : Check if we have any registered listeners, then register sqlalchemy hooks for TI state change if we do. " 1054,"def has_computed_output(self) -> bool: return self._snapshot_blocks is not None and not self._stages_after_snapshot ","Whether this plan has a computed snapshot for the final stage, i.e. for the output of this plan. ",18,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def has_computed_output(self) -> bool: return self._snapshot_blocks is not None and not self._stages_after_snapshot ``` ###Assistant : Whether this plan has a computed snapshot for the final stage, i.e. for the output of this plan. " 1055,"def get_conditions(filters): conditions = """" accounting_dimensions = get_accounting_dimensions(as_list=False) or [] accounting_dimensions_list = [d.fieldname for d in accounting_dimensions] if filters.get(""company""): conditions += "" and company=%(company)s"" if filters.get(""customer"") and ""customer"" not in accounting_dimensions_list: conditions += "" and customer = %(customer)s"" if filters.get(""from_date""): conditions += "" and posting_date >= %(from_date)s"" if filters.get(""to_date""): conditions += "" and posting_date <= %(to_date)s"" if filters.get(""owner""): conditions += "" and owner = %(owner)s"" def get_sales_invoice_item_field_condition(field, table=""Sales Invoice Item"") -> str: if not filters.get(field) or field in accounting_dimensions_list: return """" return f conditions += get_sales_invoice_item_field_condition(""mode_of_payments"", ""Sales Invoice Payment"") conditions += get_sales_invoice_item_field_condition(""cost_center"") conditions += get_sales_invoice_item_field_condition(""warehouse"") conditions += get_sales_invoice_item_field_condition(""brand"") conditions += get_sales_invoice_item_field_condition(""item_group"") if accounting_dimensions: common_condition = for dimension in accounting_dimensions: if filters.get(dimension.fieldname): if frappe.get_cached_value(""DocType"", dimension.document_type, ""is_tree""): filters[dimension.fieldname] = get_dimension_with_children( dimension.document_type, filters.get(dimension.fieldname) ) conditions += ( common_condition + ""and ifnull(`tabSales Invoice Item`.{0}, '') in %({0})s)"".format(dimension.fieldname) ) else: conditions += ( common_condition + ""and ifnull(`tabSales Invoice Item`.{0}, '') in %({0})s)"".format(dimension.fieldname) ) return conditions "," and exists(select name from `tab{table}` where parent=`tabSales Invoice`.name and ifnull(`tab{table}`.{field}, '') = %({field})s) and exists(select name from `tabSales Invoice Item` where parent=`tabSales Invoice`.name ",23,150,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_conditions(filters): conditions = """" accounting_dimensions = get_accounting_dimensions(as_list=False) or [] accounting_dimensions_list = [d.fieldname for d in accounting_dimensions] if filters.get(""company""): conditions += "" and company=%(company)s"" if filters.get(""customer"") and ""customer"" not in accounting_dimensions_list: conditions += "" and customer = %(customer)s"" if filters.get(""from_date""): conditions += "" and posting_date >= %(from_date)s"" if filters.get(""to_date""): conditions += "" and posting_date <= %(to_date)s"" if filters.get(""owner""): conditions += "" and owner = %(owner)s"" def get_sales_invoice_item_field_condition(field, table=""Sales Invoice Item"") -> str: if not filters.get(field) or field in accounting_dimensions_list: return """" return f conditions += get_sales_invoice_item_field_condition(""mode_of_payments"", ""Sales Invoice Payment"") conditions += get_sales_invoice_item_field_condition(""cost_center"") conditions += get_sales_invoice_item_field_condition(""warehouse"") conditions += get_sales_invoice_item_field_condition(""brand"") conditions += get_sales_invoice_item_field_condition(""item_group"") if accounting_dimensions: common_condition = for dimension in accounting_dimensions: if filters.get(dimension.fieldname): if frappe.get_cached_value(""DocType"", dimension.document_type, ""is_tree""): filters[dimension.fieldname] = get_dimension_with_children( dimension.document_type, filters.get(dimension.fieldname) ) conditions += ( common_condition + ""and ifnull(`tabSales Invoice Item`.{0}, '') in %({0})s)"".format(dimension.fieldname) ) else: conditions += ( common_condition + ""and ifnull(`tabSales Invoice Item`.{0}, '') in %({0})s)"".format(dimension.fieldname) ) return conditions ``` ###Assistant : and exists(select name from `tab{table}` where parent=`tabSales Invoice`.name and ifnull(`tab{table}`.{field}, '') = %({field})s) and exists(select name from `tabSales Invoice Item` where parent=`tabSales Invoice`.name " 1056,"def _get_classifier_artifacts(fitted_estimator, prefix, X, y_true, sample_weight): import sklearn if not _is_plotting_supported(): return [] "," Draw and record various common artifacts for classifier For all classifiers, we always log: (1) confusion matrix: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html For only binary classifiers, we will log: (2) precision recall curve: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_precision_recall_curve.html (3) roc curve: https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html Steps: 1. Extract X and y_true from fit_args and fit_kwargs, and split into train & test datasets. 2. If the sample_weight argument exists in fit_func (accuracy_score by default has sample_weight), extract it from fit_args or fit_kwargs as (y_true, y_pred, sample_weight, multioutput), otherwise as (y_true, y_pred, multioutput) 3. return a list of artifacts path to be logged :param fitted_estimator: The already fitted regressor :param fit_args: Positional arguments given to fit_func. :param fit_kwargs: Keyword arguments given to fit_func. :return: List of artifacts to be logged ",117,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_classifier_artifacts(fitted_estimator, prefix, X, y_true, sample_weight): import sklearn if not _is_plotting_supported(): return [] ``` ###Assistant : Draw and record various common artifacts for classifier For all classifiers, we always log: (1) confusion matrix: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html For only binary classifiers, we will log: (2) precision recall curve: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_precision_recall_curve.html (3) roc curve: https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html Steps: 1. Extract X and y_true from fit_args and fit_kwargs, and split into train & test datasets. 2. If the sample_weight argument exists in fit_func (accuracy_score by default has sample_weight), extract it from fit_args or fit_kwargs as (y_true, y_pred, sample_weight, multioutput), otherwise as (y_true, y_pred, multioutput) 3. return a list of artifacts path to be logged :param fitted_estimator: The already fitted regressor :param fit_args: Positional arguments given to fit_func. :param fit_kwargs: Keyword arguments given to fit_func. :return: List of artifacts to be logged " 1057,"def double_edge_swap(G, nswap=1, max_tries=100, seed=None): if G.is_directed(): raise nx.NetworkXError( ""double_edge_swap() not defined for directed graphs. Use directed_edge_swap instead."" ) if nswap > max_tries: raise nx.NetworkXError(""Number of swaps > number of tries allowed."") if len(G) < 4: raise nx.NetworkXError(""Graph has less than four nodes."") # Instead of choosing uniformly at random from a generated edge list, # this algorithm chooses nonuniformly from the set of nodes with # probability weighted by degree. n = 0 swapcount = 0 keys, degrees = zip(*G.degree()) # keys, degree cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree discrete_sequence = nx.utils.discrete_sequence while swapcount < nswap: # if random.random() < 0.5: continue # trick to avoid periodicities? # pick two random edges without creating edge list # choose source node indices from discrete distribution (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed) if ui == xi: continue # same source, skip u = keys[ui] # convert index to label x = keys[xi] # choose target uniformly from neighbors v = seed.choice(list(G[u])) y = seed.choice(list(G[x])) if v == y: continue # same target, skip if (x not in G[u]) and (y not in G[v]): # don't create parallel edges G.add_edge(u, x) G.add_edge(v, y) G.remove_edge(u, v) G.remove_edge(x, y) swapcount += 1 if n >= max_tries: e = ( f""Maximum number of swap attempts ({n}) exceeded "" f""before desired swaps achieved ({nswap})."" ) raise nx.NetworkXAlgorithmError(e) n += 1 return G @py_random_state(3)","Swap two edges in the graph while keeping the node degrees fixed. A double-edge swap removes two randomly chosen edges u-v and x-y and creates the new edges u-x and v-y:: u--v u v becomes | | x--y x y If either the edge u-x or v-y already exist no swap is performed and another attempt is made to find a suitable edge pair. Parameters ---------- G : graph An undirected graph nswap : integer (optional, default=1) Number of double-edge swaps to perform max_tries : integer (optional) Maximum number of attempts to swap edges seed : integer, random_state, or None (default) Indicator of random number generation state. See :ref:`Randomness`. Returns ------- G : graph The graph after double edge swaps. Notes ----- Does not enforce any connectivity constraints. The graph G is modified in place. ",135,228,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def double_edge_swap(G, nswap=1, max_tries=100, seed=None): if G.is_directed(): raise nx.NetworkXError( ""double_edge_swap() not defined for directed graphs. Use directed_edge_swap instead."" ) if nswap > max_tries: raise nx.NetworkXError(""Number of swaps > number of tries allowed."") if len(G) < 4: raise nx.NetworkXError(""Graph has less than four nodes."") # Instead of choosing uniformly at random from a generated edge list, # this algorithm chooses nonuniformly from the set of nodes with # probability weighted by degree. n = 0 swapcount = 0 keys, degrees = zip(*G.degree()) # keys, degree cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree discrete_sequence = nx.utils.discrete_sequence while swapcount < nswap: # if random.random() < 0.5: continue # trick to avoid periodicities? # pick two random edges without creating edge list # choose source node indices from discrete distribution (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed) if ui == xi: continue # same source, skip u = keys[ui] # convert index to label x = keys[xi] # choose target uniformly from neighbors v = seed.choice(list(G[u])) y = seed.choice(list(G[x])) if v == y: continue # same target, skip if (x not in G[u]) and (y not in G[v]): # don't create parallel edges G.add_edge(u, x) G.add_edge(v, y) G.remove_edge(u, v) G.remove_edge(x, y) swapcount += 1 if n >= max_tries: e = ( f""Maximum number of swap attempts ({n}) exceeded "" f""before desired swaps achieved ({nswap})."" ) raise nx.NetworkXAlgorithmError(e) n += 1 return G @py_random_state(3) ``` ###Assistant : Swap two edges in the graph while keeping the node degrees fixed. A double-edge swap removes two randomly chosen edges u-v and x-y and creates the new edges u-x and v-y:: u--v u v becomes | | x--y x y If either the edge u-x or v-y already exist no swap is performed and another attempt is made to find a suitable edge pair. Parameters ---------- G : graph An undirected graph nswap : integer (optional, default=1) Number of double-edge swaps to perform max_tries : integer (optional) Maximum number of attempts to swap edges seed : integer, random_state, or None (default) Indicator of random number generation state. See :ref:`Randomness`. Returns ------- G : graph The graph after double edge swaps. Notes ----- Does not enforce any connectivity constraints. The graph G is modified in place. " 1058,"def test_dataset_shard_with_task_parallelization(self): config = { ""input"": ""dataset"", ""input_config"": { ""format"": ""json"", ""paths"": self.dset_path, ""parallelism"": 10, }, } NUM_WORKERS = 4 _, shards = get_dataset_and_shards(config, num_workers=NUM_WORKERS) assert len(shards) == NUM_WORKERS + 1 assert shards[0] is None assert all( isinstance(remote_shard, ray.data.Dataset) for remote_shard in shards[1:] ) ","Tests whether the dataset_shard function works correctly with parallelism for reading the dataset.",13,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_dataset_shard_with_task_parallelization(self): config = { ""input"": ""dataset"", ""input_config"": { ""format"": ""json"", ""paths"": self.dset_path, ""parallelism"": 10, }, } NUM_WORKERS = 4 _, shards = get_dataset_and_shards(config, num_workers=NUM_WORKERS) assert len(shards) == NUM_WORKERS + 1 assert shards[0] is None assert all( isinstance(remote_shard, ray.data.Dataset) for remote_shard in shards[1:] ) ``` ###Assistant : Tests whether the dataset_shard function works correctly with parallelism for reading the dataset. " 1059,"def test_image_comparison_expect_rms(im1, im2, tol, expect_rms): baseline_dir, result_dir = map(Path, _image_directories(lambda: ""dummy"")) # Copy both ""baseline"" and ""test"" image to result_dir, so that 1) # compare_images writes the diff to result_dir, rather than to the source # tree and 2) the baseline image doesn't appear missing to triage_tests.py. uid = str(uuid.uuid4()) result_im1 = make_test_filename(result_dir / (uid + im1), ""expected"") shutil.copyfile(baseline_dir / im1, result_im1) result_im2 = result_dir / im1 shutil.copyfile(baseline_dir / im2, result_im2) results = compare_images( result_im1, result_im2, tol=tol, in_decorator=True) if expect_rms is None: assert results is None else: assert results is not None assert results['rms'] == approx(expect_rms, abs=1e-4) "," Compare two images, expecting a particular RMS error. im1 and im2 are filenames relative to the baseline_dir directory. tol is the tolerance to pass to compare_images. expect_rms is the expected RMS value, or None. If None, the test will succeed if compare_images succeeds. Otherwise, the test will succeed if compare_images fails and returns an RMS error almost equal to this value. ",61,97,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_image_comparison_expect_rms(im1, im2, tol, expect_rms): baseline_dir, result_dir = map(Path, _image_directories(lambda: ""dummy"")) # Copy both ""baseline"" and ""test"" image to result_dir, so that 1) # compare_images writes the diff to result_dir, rather than to the source # tree and 2) the baseline image doesn't appear missing to triage_tests.py. uid = str(uuid.uuid4()) result_im1 = make_test_filename(result_dir / (uid + im1), ""expected"") shutil.copyfile(baseline_dir / im1, result_im1) result_im2 = result_dir / im1 shutil.copyfile(baseline_dir / im2, result_im2) results = compare_images( result_im1, result_im2, tol=tol, in_decorator=True) if expect_rms is None: assert results is None else: assert results is not None assert results['rms'] == approx(expect_rms, abs=1e-4) ``` ###Assistant : Compare two images, expecting a particular RMS error. im1 and im2 are filenames relative to the baseline_dir directory. tol is the tolerance to pass to compare_images. expect_rms is the expected RMS value, or None. If None, the test will succeed if compare_images succeeds. Otherwise, the test will succeed if compare_images fails and returns an RMS error almost equal to this value. " 1060,"def test_versioned_symbols_reserialization(self): module_v2 = torch.jit.load(pytorch_test_dir + ""/jit/fixtures/_test_serialization_subcmul_v2.pt"") buffer = io.BytesIO() torch.jit.save(module_v2, buffer) buffer.seek(0) module_reserialized = torch.jit.load(buffer) subcmul_nodes = sum(""subcmul"" in n.kind() for n in module_reserialized.graph.nodes()) self.assertEqual(subcmul_nodes, 0) "," Tests that loading and saving serialized Torchscript with a versioned symbol won't persist the original function and will inline the versioned builtin. ",22,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_versioned_symbols_reserialization(self): module_v2 = torch.jit.load(pytorch_test_dir + ""/jit/fixtures/_test_serialization_subcmul_v2.pt"") buffer = io.BytesIO() torch.jit.save(module_v2, buffer) buffer.seek(0) module_reserialized = torch.jit.load(buffer) subcmul_nodes = sum(""subcmul"" in n.kind() for n in module_reserialized.graph.nodes()) self.assertEqual(subcmul_nodes, 0) ``` ###Assistant : Tests that loading and saving serialized Torchscript with a versioned symbol won't persist the original function and will inline the versioned builtin. " 1061,"def test_stylesheet_apply_takes_final_rule_in_specificity_clash(): css = "".a {background: red; color: lime;} .b {background: blue;}"" stylesheet = _make_stylesheet(css) node = DOMNode(classes=""a b"", id=""c"") stylesheet.apply(node) assert node.styles.color == Color(0, 255, 0) # color: lime assert node.styles.background == Color(0, 0, 255) # background: blue ",".a and .b both contain background and have same specificity, so .b wins since it was declared last - the background should be blue.",24,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_stylesheet_apply_takes_final_rule_in_specificity_clash(): css = "".a {background: red; color: lime;} .b {background: blue;}"" stylesheet = _make_stylesheet(css) node = DOMNode(classes=""a b"", id=""c"") stylesheet.apply(node) assert node.styles.color == Color(0, 255, 0) # color: lime assert node.styles.background == Color(0, 0, 255) # background: blue ``` ###Assistant : .a and .b both contain background and have same specificity, so .b wins since it was declared last - the background should be blue. " 1062,"def upgrade(): conn = op.get_bind() if conn.dialect.name == 'sqlite': op.execute('PRAGMA foreign_keys=OFF') with op.batch_alter_table('ab_view_menu', schema=None) as batch_op: batch_op.create_unique_constraint(batch_op.f('ab_view_menu_name_uq'), ['name']) op.execute('PRAGMA foreign_keys=ON') elif conn.dialect.name == 'mysql': with op.batch_alter_table('ab_register_user', schema=None) as batch_op: batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) with op.batch_alter_table('ab_user', schema=None) as batch_op: batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) elif conn.dialect.name == 'mssql': with op.batch_alter_table('ab_register_user') as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, 'ab_register_user') for k, _ in constraints.get('UNIQUE').items(): batch_op.drop_constraint(k, type_='unique') batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.create_unique_constraint(None, ['username']) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) with op.batch_alter_table('ab_user') as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, 'ab_user') for k, _ in constraints.get('UNIQUE').items(): batch_op.drop_constraint(k, type_='unique') batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.create_unique_constraint(None, ['username']) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) batch_op.create_unique_constraint(None, ['email']) ",Apply Update migration for FAB tables to add missing constraints,10,116,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def upgrade(): conn = op.get_bind() if conn.dialect.name == 'sqlite': op.execute('PRAGMA foreign_keys=OFF') with op.batch_alter_table('ab_view_menu', schema=None) as batch_op: batch_op.create_unique_constraint(batch_op.f('ab_view_menu_name_uq'), ['name']) op.execute('PRAGMA foreign_keys=ON') elif conn.dialect.name == 'mysql': with op.batch_alter_table('ab_register_user', schema=None) as batch_op: batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) with op.batch_alter_table('ab_user', schema=None) as batch_op: batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) elif conn.dialect.name == 'mssql': with op.batch_alter_table('ab_register_user') as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, 'ab_register_user') for k, _ in constraints.get('UNIQUE').items(): batch_op.drop_constraint(k, type_='unique') batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.create_unique_constraint(None, ['username']) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) with op.batch_alter_table('ab_user') as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, 'ab_user') for k, _ in constraints.get('UNIQUE').items(): batch_op.drop_constraint(k, type_='unique') batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.create_unique_constraint(None, ['username']) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) batch_op.create_unique_constraint(None, ['email']) ``` ###Assistant : Apply Update migration for FAB tables to add missing constraints " 1063,"def upgrade(): op.drop_table('ai_table') conn = op.get_bind() # views was created with unnamed fk. Therefore need recreate it op.create_table( 'view_tmp', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(), nullable=False), sa.Column('company_id', sa.Integer(), nullable=True), sa.Column('query', sa.String(), nullable=False), sa.Column('integration_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['integration_id'], ['integration.id'], name='fk_integration_id'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'company_id', name='unique_name_company_id') ) conn.execute(text()) op.drop_table('view') op.rename_table('view_tmp', 'view') op.create_table( 'analysis', sa.Column('id', sa.Integer(), nullable=False), sa.Column('analysis', mindsdb.interfaces.storage.db.Json(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('id') ) with op.batch_alter_table('datasource', schema=None) as batch_op: batch_op.add_column(sa.Column('analysis_id', sa.Integer(), nullable=True)) batch_op.create_foreign_key('fk_analysis_id', 'analysis', ['analysis_id'], ['id']) batch_op.add_column(sa.Column('ds_class', sa.String(), nullable=True)) session = sa.orm.Session(bind=conn) dsatasources = conn.execute('select id, analysis from datasource').fetchall() for row in dsatasources: if row['analysis'] is not None: # NOTE 'returning' is relatively new in sqlite, so better will be use select after insert. conn.execute( text(), { 'id': row['id'] } ) analysis_id = conn.execute(text()).fetchall() conn.execute( text(), { 'analysis_id': analysis_id[0][0], 'id': row['id'] } ) with op.batch_alter_table('datasource', schema=None) as batch_op: batch_op.drop_column('analysis') op.create_table( 'file', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(), nullable=False), sa.Column('company_id', sa.Integer(), nullable=True), sa.Column('source_file_path', sa.String(), nullable=False), sa.Column('file_path', sa.String(), nullable=False), sa.Column('row_count', sa.Integer(), nullable=False), sa.Column('columns', mindsdb.interfaces.storage.db.Json(), nullable=False), # sa.Column('created_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp()), # ????? # sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp(), server_onupdate=sa.func.current_timestamp()), # ????? erver_default=func.now() # sa.Column('created_at', sa.DateTime(), nullable=True, server_default=datetime.datetime.now), # ????? # sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=datetime.datetime.now, server_onupdate=datetime.datetime.now), # ????? erver_default=func.now() sa.Column('created_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp()), # ????? sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp(), server_onupdate=sa.func.current_timestamp()), # ????? erver_default=func.now() sa.Column('analysis_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['analysis_id'], ['analysis.id'], name='fk_analysis_id'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'company_id', name='unique_name_company_id') ) # delete ds where data is none dsatasources = conn.execute(text('select * from datasource')).fetchall() for ds in dsatasources: if ds['data'] is None: conn.execute(text('delete from datasource where id = :id'), {'id': ds['id']}) continue ds_data = json.loads(ds['data']) creation_info = json.loads(ds['creation_info']) datasource_name = ds_data.get('source_type') if datasource_name == 'file': created_at = None if isinstance(ds['created_at'], str): created_at = datetime.datetime.fromisoformat(ds['created_at']) elif isinstance(ds['created_at'], [float, int]): created_at = datetime.fromtimestamp(ds['created_at']) updated_at = None if isinstance(ds['updated_at'], str): updated_at = datetime.datetime.fromisoformat(ds['updated_at']) elif isinstance(ds['updated_at'], [float, int]): updated_at = datetime.fromtimestamp(ds['updated_at']) file = mindsdb.interfaces.storage.db.File( name=ds['name'], company_id=ds['company_id'], source_file_path=ds_data['source'], file_path=creation_info['args'][0], row_count=ds_data['row_count'], columns=ds_data['columns'], created_at=created_at, updated_at=updated_at, analysis_id=ds['analysis_id'] ) session.add(file) conn.execute( text(), { 'datasource_name': datasource_name, 'company_id': ds['company_id'], 'ds_class': creation_info['class'], 'id': ds['id'] } ) session.commit() op.rename_table('datasource', 'dataset') with op.batch_alter_table('dataset', schema=None) as batch_op: batch_op.create_foreign_key('fk_integration_id', 'integration', ['integration_id'], ['id']) # NOTE two different 'batch' is necessary, in other way FK is not creating with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.alter_column('datasource_id', new_column_name='dataset_id') with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.create_foreign_key('fk_dataset_id', 'dataset', ['dataset_id'], ['id']) with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.create_unique_constraint('unique_name_company_id', ['name', 'company_id']) with op.batch_alter_table('integration', schema=None) as batch_op: batch_op.create_unique_constraint('unique_name_company_id', ['name', 'company_id']) with op.batch_alter_table('dataset', schema=None) as batch_op: batch_op.create_unique_constraint('unique_name_company_id', ['name', 'company_id']) "," insert into view_tmp (id, name, company_id, query, integration_id) select id, name, company_id, query, datasource_id from view; insert into analysis (analysis) select analysis from datasource where id = :id; select id from analysis order by id desc limit 1; update datasource set analysis_id = :analysis_id where id = :id update datasource set integration_id = (select id from integration where name = :datasource_name and company_id = :company_id), ds_class = :ds_class where id = :id ",72,386,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def upgrade(): op.drop_table('ai_table') conn = op.get_bind() # views was created with unnamed fk. Therefore need recreate it op.create_table( 'view_tmp', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(), nullable=False), sa.Column('company_id', sa.Integer(), nullable=True), sa.Column('query', sa.String(), nullable=False), sa.Column('integration_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['integration_id'], ['integration.id'], name='fk_integration_id'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'company_id', name='unique_name_company_id') ) conn.execute(text()) op.drop_table('view') op.rename_table('view_tmp', 'view') op.create_table( 'analysis', sa.Column('id', sa.Integer(), nullable=False), sa.Column('analysis', mindsdb.interfaces.storage.db.Json(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('id') ) with op.batch_alter_table('datasource', schema=None) as batch_op: batch_op.add_column(sa.Column('analysis_id', sa.Integer(), nullable=True)) batch_op.create_foreign_key('fk_analysis_id', 'analysis', ['analysis_id'], ['id']) batch_op.add_column(sa.Column('ds_class', sa.String(), nullable=True)) session = sa.orm.Session(bind=conn) dsatasources = conn.execute('select id, analysis from datasource').fetchall() for row in dsatasources: if row['analysis'] is not None: # NOTE 'returning' is relatively new in sqlite, so better will be use select after insert. conn.execute( text(), { 'id': row['id'] } ) analysis_id = conn.execute(text()).fetchall() conn.execute( text(), { 'analysis_id': analysis_id[0][0], 'id': row['id'] } ) with op.batch_alter_table('datasource', schema=None) as batch_op: batch_op.drop_column('analysis') op.create_table( 'file', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(), nullable=False), sa.Column('company_id', sa.Integer(), nullable=True), sa.Column('source_file_path', sa.String(), nullable=False), sa.Column('file_path', sa.String(), nullable=False), sa.Column('row_count', sa.Integer(), nullable=False), sa.Column('columns', mindsdb.interfaces.storage.db.Json(), nullable=False), # sa.Column('created_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp()), # ????? # sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp(), server_onupdate=sa.func.current_timestamp()), # ????? erver_default=func.now() # sa.Column('created_at', sa.DateTime(), nullable=True, server_default=datetime.datetime.now), # ????? # sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=datetime.datetime.now, server_onupdate=datetime.datetime.now), # ????? erver_default=func.now() sa.Column('created_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp()), # ????? sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp(), server_onupdate=sa.func.current_timestamp()), # ????? erver_default=func.now() sa.Column('analysis_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['analysis_id'], ['analysis.id'], name='fk_analysis_id'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'company_id', name='unique_name_company_id') ) # delete ds where data is none dsatasources = conn.execute(text('select * from datasource')).fetchall() for ds in dsatasources: if ds['data'] is None: conn.execute(text('delete from datasource where id = :id'), {'id': ds['id']}) continue ds_data = json.loads(ds['data']) creation_info = json.loads(ds['creation_info']) datasource_name = ds_data.get('source_type') if datasource_name == 'file': created_at = None if isinstance(ds['created_at'], str): created_at = datetime.datetime.fromisoformat(ds['created_at']) elif isinstance(ds['created_at'], [float, int]): created_at = datetime.fromtimestamp(ds['created_at']) updated_at = None if isinstance(ds['updated_at'], str): updated_at = datetime.datetime.fromisoformat(ds['updated_at']) elif isinstance(ds['updated_at'], [float, int]): updated_at = datetime.fromtimestamp(ds['updated_at']) file = mindsdb.interfaces.storage.db.File( name=ds['name'], company_id=ds['company_id'], source_file_path=ds_data['source'], file_path=creation_info['args'][0], row_count=ds_data['row_count'], columns=ds_data['columns'], created_at=created_at, updated_at=updated_at, analysis_id=ds['analysis_id'] ) session.add(file) conn.execute( text(), { 'datasource_name': datasource_name, 'company_id': ds['company_id'], 'ds_class': creation_info['class'], 'id': ds['id'] } ) session.commit() op.rename_table('datasource', 'dataset') with op.batch_alter_table('dataset', schema=None) as batch_op: batch_op.create_foreign_key('fk_integration_id', 'integration', ['integration_id'], ['id']) # NOTE two different 'batch' is necessary, in other way FK is not creating with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.alter_column('datasource_id', new_column_name='dataset_id') with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.create_foreign_key('fk_dataset_id', 'dataset', ['dataset_id'], ['id']) with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.create_unique_constraint('unique_name_company_id', ['name', 'company_id']) with op.batch_alter_table('integration', schema=None) as batch_op: batch_op.create_unique_constraint('unique_name_company_id', ['name', 'company_id']) with op.batch_alter_table('dataset', schema=None) as batch_op: batch_op.create_unique_constraint('unique_name_company_id', ['name', 'company_id']) ``` ###Assistant : insert into view_tmp (id, name, company_id, query, integration_id) select id, name, company_id, query, datasource_id from view; insert into analysis (analysis) select analysis from datasource where id = :id; select id from analysis order by id desc limit 1; update datasource set analysis_id = :analysis_id where id = :id update datasource set integration_id = (select id from integration where name = :datasource_name and company_id = :company_id), ds_class = :ds_class where id = :id " 1064,"def _predict_recursive(self, X, sample_weight, cluster_node): if cluster_node.left is None: # This cluster has no subcluster. Labels are just the label of the cluster. return np.full(X.shape[0], cluster_node.label, dtype=np.int32) # Determine if data points belong to the left or right subcluster centers = np.vstack((cluster_node.left.center, cluster_node.right.center)) if hasattr(self, ""_X_mean""): centers += self._X_mean cluster_labels = _labels_inertia_threadpool_limit( X, sample_weight, centers, self._n_threads, return_inertia=False, ) mask = cluster_labels == 0 # Compute the labels for each subset of the data points. labels = np.full(X.shape[0], -1, dtype=np.int32) labels[mask] = self._predict_recursive( X[mask], sample_weight[mask], cluster_node.left ) labels[~mask] = self._predict_recursive( X[~mask], sample_weight[~mask], cluster_node.right ) return labels ","Predict recursively by going down the hierarchical tree. Parameters ---------- X : {ndarray, csr_matrix} of shape (n_samples, n_features) The data points, currently assigned to `cluster_node`, to predict between the subclusters of this node. sample_weight : ndarray of shape (n_samples,) The weights for each observation in X. cluster_node : _BisectingTree node object The cluster node of the hierarchical tree. Returns ------- labels : ndarray of shape (n_samples,) Index of the cluster each sample belongs to. ",74,95,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _predict_recursive(self, X, sample_weight, cluster_node): if cluster_node.left is None: # This cluster has no subcluster. Labels are just the label of the cluster. return np.full(X.shape[0], cluster_node.label, dtype=np.int32) # Determine if data points belong to the left or right subcluster centers = np.vstack((cluster_node.left.center, cluster_node.right.center)) if hasattr(self, ""_X_mean""): centers += self._X_mean cluster_labels = _labels_inertia_threadpool_limit( X, sample_weight, centers, self._n_threads, return_inertia=False, ) mask = cluster_labels == 0 # Compute the labels for each subset of the data points. labels = np.full(X.shape[0], -1, dtype=np.int32) labels[mask] = self._predict_recursive( X[mask], sample_weight[mask], cluster_node.left ) labels[~mask] = self._predict_recursive( X[~mask], sample_weight[~mask], cluster_node.right ) return labels ``` ###Assistant : Predict recursively by going down the hierarchical tree. Parameters ---------- X : {ndarray, csr_matrix} of shape (n_samples, n_features) The data points, currently assigned to `cluster_node`, to predict between the subclusters of this node. sample_weight : ndarray of shape (n_samples,) The weights for each observation in X. cluster_node : _BisectingTree node object The cluster node of the hierarchical tree. Returns ------- labels : ndarray of shape (n_samples,) Index of the cluster each sample belongs to. " 1065,"def inference_voice_conversion(self, reference_wav, speaker_id=None, d_vector=None, reference_speaker_id=None, reference_d_vector=None): # compute spectrograms y = wav_to_spec(reference_wav, self.config.audio.fft_size, self.config.audio.hop_length, self.config.audio.win_length, center=False).transpose(1, 2) y_lengths = torch.tensor([y.size(-1)]).to(y.device) speaker_cond_src = reference_speaker_id if reference_speaker_id is not None else reference_d_vector speaker_cond_tgt = speaker_id if speaker_id is not None else d_vector # print(y.shape, y_lengths.shape) wav, _, _ = self.voice_conversion(y, y_lengths, speaker_cond_src, speaker_cond_tgt) return wav ","Inference for voice conversion Args: reference_wav (Tensor): Reference wavform. Tensor of shape [B, T] speaker_id (Tensor): speaker_id of the target speaker. Tensor of shape [B] d_vector (Tensor): d_vector embedding of target speaker. Tensor of shape `[B, C]` reference_speaker_id (Tensor): speaker_id of the reference_wav speaker. Tensor of shape [B] reference_d_vector (Tensor): d_vector embedding of the reference_wav speaker. Tensor of shape `[B, C]` ",61,54,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def inference_voice_conversion(self, reference_wav, speaker_id=None, d_vector=None, reference_speaker_id=None, reference_d_vector=None): # compute spectrograms y = wav_to_spec(reference_wav, self.config.audio.fft_size, self.config.audio.hop_length, self.config.audio.win_length, center=False).transpose(1, 2) y_lengths = torch.tensor([y.size(-1)]).to(y.device) speaker_cond_src = reference_speaker_id if reference_speaker_id is not None else reference_d_vector speaker_cond_tgt = speaker_id if speaker_id is not None else d_vector # print(y.shape, y_lengths.shape) wav, _, _ = self.voice_conversion(y, y_lengths, speaker_cond_src, speaker_cond_tgt) return wav ``` ###Assistant : Inference for voice conversion Args: reference_wav (Tensor): Reference wavform. Tensor of shape [B, T] speaker_id (Tensor): speaker_id of the target speaker. Tensor of shape [B] d_vector (Tensor): d_vector embedding of target speaker. Tensor of shape `[B, C]` reference_speaker_id (Tensor): speaker_id of the reference_wav speaker. Tensor of shape [B] reference_d_vector (Tensor): d_vector embedding of the reference_wav speaker. Tensor of shape `[B, C]` " 1066,"def test_nonconflicting_specified_basename(self): self.router.register(r'notes', NoteViewSet, basename='notes') self.router.register(r'notes_kwduplicate', KWargedNoteViewSet, basename='notes_kwduplicate') self.router.register(r'notes_duplicate', NoteViewSet, basename='notes_duplicate') "," Ensure 2 routers with the same model, and a distinct basename specified on each does not throw an exception ",19,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_nonconflicting_specified_basename(self): self.router.register(r'notes', NoteViewSet, basename='notes') self.router.register(r'notes_kwduplicate', KWargedNoteViewSet, basename='notes_kwduplicate') self.router.register(r'notes_duplicate', NoteViewSet, basename='notes_duplicate') ``` ###Assistant : Ensure 2 routers with the same model, and a distinct basename specified on each does not throw an exception " 1067,"async def test_state(hass, setup_comp): state = hass.states.get(COVER_GROUP) # No entity has a valid state -> group state unknown assert state.state == STATE_UNKNOWN assert state.attributes[ATTR_FRIENDLY_NAME] == DEFAULT_NAME assert state.attributes[ATTR_ENTITY_ID] == [ DEMO_COVER, DEMO_COVER_POS, DEMO_COVER_TILT, DEMO_TILT, ] assert ATTR_ASSUMED_STATE not in state.attributes assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0 assert ATTR_CURRENT_POSITION not in state.attributes assert ATTR_CURRENT_TILT_POSITION not in state.attributes # The group state is unknown if all group members are unknown or unavailable. for state_1 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_UNAVAILABLE, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_UNKNOWN for state_1 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_UNKNOWN, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_UNKNOWN # At least one member opening -> group opening for state_1 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_2 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_3 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNAVAILABLE, STATE_UNKNOWN, ): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_OPENING, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_OPENING # At least one member closing -> group closing for state_1 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_2 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_3 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN, ): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_CLOSING, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_CLOSING # At least one member open -> group open for state_1 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_OPEN, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_OPEN # At least one member closed -> group closed for state_1 in (STATE_CLOSED, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_CLOSED, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_CLOSED, STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_CLOSED, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_CLOSED # All group members removed from the state machine -> unknown hass.states.async_remove(DEMO_COVER) hass.states.async_remove(DEMO_COVER_POS) hass.states.async_remove(DEMO_COVER_TILT) hass.states.async_remove(DEMO_TILT) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_UNKNOWN @pytest.mark.parametrize(""config_count"", [(CONFIG_ATTRIBUTES, 1)])","Test handling of state. The group state is unknown if all group members are unknown or unavailable. Otherwise, the group state is opening if at least one group member is opening. Otherwise, the group state is closing if at least one group member is closing. Otherwise, the group state is open if at least one group member is open. Otherwise, the group state is closed. ",65,389,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_state(hass, setup_comp): state = hass.states.get(COVER_GROUP) # No entity has a valid state -> group state unknown assert state.state == STATE_UNKNOWN assert state.attributes[ATTR_FRIENDLY_NAME] == DEFAULT_NAME assert state.attributes[ATTR_ENTITY_ID] == [ DEMO_COVER, DEMO_COVER_POS, DEMO_COVER_TILT, DEMO_TILT, ] assert ATTR_ASSUMED_STATE not in state.attributes assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0 assert ATTR_CURRENT_POSITION not in state.attributes assert ATTR_CURRENT_TILT_POSITION not in state.attributes # The group state is unknown if all group members are unknown or unavailable. for state_1 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_UNAVAILABLE, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_UNKNOWN for state_1 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_UNKNOWN, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_UNKNOWN # At least one member opening -> group opening for state_1 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_2 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_3 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNAVAILABLE, STATE_UNKNOWN, ): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_OPENING, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_OPENING # At least one member closing -> group closing for state_1 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_2 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_3 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN, ): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_CLOSING, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_CLOSING # At least one member open -> group open for state_1 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_OPEN, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_OPEN # At least one member closed -> group closed for state_1 in (STATE_CLOSED, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_CLOSED, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_CLOSED, STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_CLOSED, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_CLOSED # All group members removed from the state machine -> unknown hass.states.async_remove(DEMO_COVER) hass.states.async_remove(DEMO_COVER_POS) hass.states.async_remove(DEMO_COVER_TILT) hass.states.async_remove(DEMO_TILT) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_UNKNOWN @pytest.mark.parametrize(""config_count"", [(CONFIG_ATTRIBUTES, 1)]) ``` ###Assistant : Test handling of state. The group state is unknown if all group members are unknown or unavailable. Otherwise, the group state is opening if at least one group member is opening. Otherwise, the group state is closing if at least one group member is closing. Otherwise, the group state is open if at least one group member is open. Otherwise, the group state is closed. " 1068,"def connect(self, signal, func): if self._signals is not None: _api.check_in_list(self._signals, signal=signal) self._func_cid_map.setdefault(signal, {}) proxy = _weak_or_strong_ref(func, self._remove_proxy) if proxy in self._func_cid_map[signal]: return self._func_cid_map[signal][proxy] cid = next(self._cid_gen) self._func_cid_map[signal][proxy] = cid self.callbacks.setdefault(signal, {}) self.callbacks[signal][cid] = proxy return cid ",Register *func* to be called when signal *signal* is generated.,10,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def connect(self, signal, func): if self._signals is not None: _api.check_in_list(self._signals, signal=signal) self._func_cid_map.setdefault(signal, {}) proxy = _weak_or_strong_ref(func, self._remove_proxy) if proxy in self._func_cid_map[signal]: return self._func_cid_map[signal][proxy] cid = next(self._cid_gen) self._func_cid_map[signal][proxy] = cid self.callbacks.setdefault(signal, {}) self.callbacks[signal][cid] = proxy return cid ``` ###Assistant : Register *func* to be called when signal *signal* is generated. " 1069,"def get_or_create_account(company_name, account): default_root_type = ""Liability"" root_type = account.get(""root_type"", default_root_type) existing_accounts = frappe.get_all( ""Account"", filters={""company"": company_name, ""root_type"": root_type}, or_filters={ ""account_name"": account.get(""account_name""), ""account_number"": account.get(""account_number""), }, ) if existing_accounts: return frappe.get_doc(""Account"", existing_accounts[0].name) tax_group = get_or_create_tax_group(company_name, root_type) account[""doctype""] = ""Account"" account[""company""] = company_name account[""parent_account""] = tax_group account[""report_type""] = ""Balance Sheet"" account[""account_type""] = ""Tax"" account[""root_type""] = root_type account[""is_group""] = 0 doc = frappe.get_doc(account) doc.flags.ignore_links = True doc.flags.ignore_validate = True doc.insert(ignore_permissions=True, ignore_mandatory=True) return doc "," Check if account already exists. If not, create it. Return a tax account or None. ",15,69,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_or_create_account(company_name, account): default_root_type = ""Liability"" root_type = account.get(""root_type"", default_root_type) existing_accounts = frappe.get_all( ""Account"", filters={""company"": company_name, ""root_type"": root_type}, or_filters={ ""account_name"": account.get(""account_name""), ""account_number"": account.get(""account_number""), }, ) if existing_accounts: return frappe.get_doc(""Account"", existing_accounts[0].name) tax_group = get_or_create_tax_group(company_name, root_type) account[""doctype""] = ""Account"" account[""company""] = company_name account[""parent_account""] = tax_group account[""report_type""] = ""Balance Sheet"" account[""account_type""] = ""Tax"" account[""root_type""] = root_type account[""is_group""] = 0 doc = frappe.get_doc(account) doc.flags.ignore_links = True doc.flags.ignore_validate = True doc.insert(ignore_permissions=True, ignore_mandatory=True) return doc ``` ###Assistant : Check if account already exists. If not, create it. Return a tax account or None. " 1070,"def set_default_options(self) -> None: default = self.cli_opts.get_option_values() logger.debug(default) self._gui_objects.default_options = default self.project.set_default_options() "," Set the default options for :mod:`lib.gui.projects` The Default GUI options are stored on Faceswap startup. Exposed as the :attr:`_default_opts` for a project cannot be set until after the main Command Tabs have been loaded. ",34,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_default_options(self) -> None: default = self.cli_opts.get_option_values() logger.debug(default) self._gui_objects.default_options = default self.project.set_default_options() ``` ###Assistant : Set the default options for :mod:`lib.gui.projects` The Default GUI options are stored on Faceswap startup. Exposed as the :attr:`_default_opts` for a project cannot be set until after the main Command Tabs have been loaded. " 1071,"def _format_lines(self, tokensource): nocls = self.noclasses lsep = self.lineseparator tagsfile = self.tagsfile lspan = '' line = [] for ttype, value in tokensource: try: cspan = self.span_element_openers[ttype] except KeyError: title = ' title=""%s""' % '.'.join(ttype) if self.debug_token_types else '' if nocls: css_style = self._get_css_inline_styles(ttype) if css_style: css_style = self.class2style[css_style][0] cspan = '' % (css_style, title) else: cspan = '' else: css_class = self._get_css_classes(ttype) if css_class: cspan = '' % (css_class, title) else: cspan = '' self.span_element_openers[ttype] = cspan parts = self._translate_parts(value) if tagsfile and ttype in Token.Name: filename, linenumber = self._lookup_ctag(value) if linenumber: base, filename = os.path.split(filename) if base: base += '/' filename, extension = os.path.splitext(filename) url = self.tagurlformat % {'path': base, 'fname': filename, 'fext': extension} parts[0] = ""
    %s"" % \ (url, self.lineanchors, linenumber, parts[0]) parts[-1] = parts[-1] + """" # for all but the last line for part in parts[:-1]: if line: if lspan != cspan: line.extend(((lspan and ''), cspan, part, (cspan and ''), lsep)) else: # both are the same line.extend((part, (lspan and ''), lsep)) yield 1, ''.join(line) line = [] elif part: yield 1, ''.join((cspan, part, (cspan and ''), lsep)) else: yield 1, lsep # for the last line if line and parts[-1]: if lspan != cspan: line.extend(((lspan and ''), cspan, parts[-1])) lspan = cspan else: line.append(parts[-1]) elif parts[-1]: line = [cspan, parts[-1]] lspan = cspan # else we neither have to open a new span nor set lspan if line: line.extend(((lspan and ''), lsep)) yield 1, ''.join(line) "," Just format the tokens, without any wrapping tags. Yield individual lines. ",11,244,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _format_lines(self, tokensource): nocls = self.noclasses lsep = self.lineseparator tagsfile = self.tagsfile lspan = '' line = [] for ttype, value in tokensource: try: cspan = self.span_element_openers[ttype] except KeyError: title = ' title=""%s""' % '.'.join(ttype) if self.debug_token_types else '' if nocls: css_style = self._get_css_inline_styles(ttype) if css_style: css_style = self.class2style[css_style][0] cspan = '' % (css_style, title) else: cspan = '' else: css_class = self._get_css_classes(ttype) if css_class: cspan = '' % (css_class, title) else: cspan = '' self.span_element_openers[ttype] = cspan parts = self._translate_parts(value) if tagsfile and ttype in Token.Name: filename, linenumber = self._lookup_ctag(value) if linenumber: base, filename = os.path.split(filename) if base: base += '/' filename, extension = os.path.splitext(filename) url = self.tagurlformat % {'path': base, 'fname': filename, 'fext': extension} parts[0] = ""%s"" % \ (url, self.lineanchors, linenumber, parts[0]) parts[-1] = parts[-1] + """" # for all but the last line for part in parts[:-1]: if line: if lspan != cspan: line.extend(((lspan and ''), cspan, part, (cspan and ''), lsep)) else: # both are the same line.extend((part, (lspan and ''), lsep)) yield 1, ''.join(line) line = [] elif part: yield 1, ''.join((cspan, part, (cspan and ''), lsep)) else: yield 1, lsep # for the last line if line and parts[-1]: if lspan != cspan: line.extend(((lspan and ''), cspan, parts[-1])) lspan = cspan else: line.append(parts[-1]) elif parts[-1]: line = [cspan, parts[-1]] lspan = cspan # else we neither have to open a new span nor set lspan if line: line.extend(((lspan and ''), lsep)) yield 1, ''.join(line) ``` ###Assistant : Just format the tokens, without any wrapping tags. Yield individual lines. " 1072,"def _get_remote_resource(self) -> Optional[Union[SourceRead, DestinationRead, ConnectionRead]]: search_results = self._search().get(f""{self.resource_type}s"", []) if len(search_results) > 1: raise DuplicateResourceError(""Two or more ressources exist with the same name."") if len(search_results) == 1: return search_results[0] else: return None ","Find the remote resource on the Airbyte instance associated with the current resource. Raises: DuplicateResourceError: raised if the search results return multiple resources. Returns: Optional[Union[SourceRead, DestinationRead, ConnectionRead]]: The remote resource found. ",31,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_remote_resource(self) -> Optional[Union[SourceRead, DestinationRead, ConnectionRead]]: search_results = self._search().get(f""{self.resource_type}s"", []) if len(search_results) > 1: raise DuplicateResourceError(""Two or more ressources exist with the same name."") if len(search_results) == 1: return search_results[0] else: return None ``` ###Assistant : Find the remote resource on the Airbyte instance associated with the current resource. Raises: DuplicateResourceError: raised if the search results return multiple resources. Returns: Optional[Union[SourceRead, DestinationRead, ConnectionRead]]: The remote resource found. " 1073,"def _get_compile_args(self, user_metrics=True): self._assert_compile_was_called() # pylint: disable=protected-access saved_metrics = self.compiled_metrics._user_metrics saved_weighted_metrics = self.compiled_metrics._user_weighted_metrics if not user_metrics: if saved_metrics is not None: saved_metrics = self.compiled_metrics._metrics if saved_weighted_metrics is not None: saved_weighted_metrics = self.compiled_metrics._weighted_metrics compile_args = { ""optimizer"": self.optimizer, ""loss"": self.compiled_loss._user_losses, ""metrics"": saved_metrics, ""weighted_metrics"": saved_weighted_metrics, ""loss_weights"": self.compiled_loss._user_loss_weights, } # pylint: enable=protected-access return compile_args ","Used for saving or cloning a Model. Args: user_metrics: Whether to return user-supplied metrics or `Metric` objects. Defaults to returning the user-supplied metrics. Returns: Dictionary of arguments that were used when compiling the model. ",34,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_compile_args(self, user_metrics=True): self._assert_compile_was_called() # pylint: disable=protected-access saved_metrics = self.compiled_metrics._user_metrics saved_weighted_metrics = self.compiled_metrics._user_weighted_metrics if not user_metrics: if saved_metrics is not None: saved_metrics = self.compiled_metrics._metrics if saved_weighted_metrics is not None: saved_weighted_metrics = self.compiled_metrics._weighted_metrics compile_args = { ""optimizer"": self.optimizer, ""loss"": self.compiled_loss._user_losses, ""metrics"": saved_metrics, ""weighted_metrics"": saved_weighted_metrics, ""loss_weights"": self.compiled_loss._user_loss_weights, } # pylint: enable=protected-access return compile_args ``` ###Assistant : Used for saving or cloning a Model. Args: user_metrics: Whether to return user-supplied metrics or `Metric` objects. Defaults to returning the user-supplied metrics. Returns: Dictionary of arguments that were used when compiling the model. " 1074,"def test_norestexdoc(capfd, hello_world_f90, monkeypatch): ipath = Path(hello_world_f90) mname = ""blah"" monkeypatch.setattr(sys, ""argv"", f'f2py -m {mname} {ipath} --no-rest-doc'.split()) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert ""ReST Documentation is saved to file"" not in out ","Ensures that TeX documentation is written out CLI :: --no-rest-doc ",10,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_norestexdoc(capfd, hello_world_f90, monkeypatch): ipath = Path(hello_world_f90) mname = ""blah"" monkeypatch.setattr(sys, ""argv"", f'f2py -m {mname} {ipath} --no-rest-doc'.split()) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert ""ReST Documentation is saved to file"" not in out ``` ###Assistant : Ensures that TeX documentation is written out CLI :: --no-rest-doc " 1075,"def set_weights(self, weights): params = self.weights if len(params) != len(weights): raise ValueError( ""Length of the specified weight list ("" + str(len(weights)) + "") does not match the number of weights "" ""of the optimizer ("" + str(len(params)) + "")"" ) weight_value_tuples = [] param_values = backend.batch_get_value(params) for pv, p, w in zip(param_values, params, weights): if pv.shape != w.shape: raise ValueError( ""Optimizer weight shape "" + str(pv.shape) + "" not compatible with "" ""provided weight shape "" + str(w.shape) ) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples) ","Sets the weights of the optimizer, from Numpy arrays. Should only be called after computing the gradients (otherwise the optimizer has no weights). Args: weights: a list of Numpy arrays. The number of arrays and their shape must match number of the dimensions of the weights of the optimizer (i.e. it should match the output of `get_weights`). Raises: ValueError: in case of incompatible weight shapes. ",65,82,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_weights(self, weights): params = self.weights if len(params) != len(weights): raise ValueError( ""Length of the specified weight list ("" + str(len(weights)) + "") does not match the number of weights "" ""of the optimizer ("" + str(len(params)) + "")"" ) weight_value_tuples = [] param_values = backend.batch_get_value(params) for pv, p, w in zip(param_values, params, weights): if pv.shape != w.shape: raise ValueError( ""Optimizer weight shape "" + str(pv.shape) + "" not compatible with "" ""provided weight shape "" + str(w.shape) ) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples) ``` ###Assistant : Sets the weights of the optimizer, from Numpy arrays. Should only be called after computing the gradients (otherwise the optimizer has no weights). Args: weights: a list of Numpy arrays. The number of arrays and their shape must match number of the dimensions of the weights of the optimizer (i.e. it should match the output of `get_weights`). Raises: ValueError: in case of incompatible weight shapes. " 1076,"def extract_tensors_from_dataset(dataset): iterator = get_iterator(dataset) inputs, targets, sample_weight = unpack_iterator_input(iterator) return inputs, targets, sample_weight ","Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset. Args: dataset: Dataset instance. Returns: Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None. ",29,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def extract_tensors_from_dataset(dataset): iterator = get_iterator(dataset) inputs, targets, sample_weight = unpack_iterator_input(iterator) return inputs, targets, sample_weight ``` ###Assistant : Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset. Args: dataset: Dataset instance. Returns: Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None. " 1077,"def _set_autocommit(self, autocommit): raise NotImplementedError( ""subclasses of BaseDatabaseWrapper may require a _set_autocommit() method"" ) # ##### Generic transaction management methods ##### "," Backend-specific implementation to enable or disable autocommit. ",7,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _set_autocommit(self, autocommit): raise NotImplementedError( ""subclasses of BaseDatabaseWrapper may require a _set_autocommit() method"" ) # ##### Generic transaction management methods ##### ``` ###Assistant : Backend-specific implementation to enable or disable autocommit. " 1078,"def get_edit_upload_form_context_data(self): edit_form_class = self.get_edit_form_class() return { self.context_upload_name: self.upload_object, ""edit_action"": reverse( self.edit_upload_url_name, args=(self.upload_object.id,) ), ""delete_action"": reverse( self.delete_upload_url_name, args=(self.upload_object.id,) ), ""form"": edit_form_class( instance=self.object, prefix=""%s-%d"" % (self.edit_upload_form_prefix, self.upload_object.id), user=self.request.user, ), } "," Return the context data necessary for rendering the HTML form for supplying the metadata to turn an upload object into a final object ",23,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_edit_upload_form_context_data(self): edit_form_class = self.get_edit_form_class() return { self.context_upload_name: self.upload_object, ""edit_action"": reverse( self.edit_upload_url_name, args=(self.upload_object.id,) ), ""delete_action"": reverse( self.delete_upload_url_name, args=(self.upload_object.id,) ), ""form"": edit_form_class( instance=self.object, prefix=""%s-%d"" % (self.edit_upload_form_prefix, self.upload_object.id), user=self.request.user, ), } ``` ###Assistant : Return the context data necessary for rendering the HTML form for supplying the metadata to turn an upload object into a final object " 1079,"def test_overriding_has_module_permission(self): articles = Article._meta.verbose_name_plural.title() sections = Section._meta.verbose_name_plural.title() index_url = reverse(""admin7:index"") self.client.force_login(self.superuser) response = self.client.get(index_url) self.assertContains(response, sections) self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.viewuser) response = self.client.get(index_url) self.assertNotContains(response, ""admin_views"") self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.adduser) response = self.client.get(index_url) self.assertNotContains(response, ""admin_views"") self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.changeuser) response = self.client.get(index_url) self.assertNotContains(response, ""admin_views"") self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.deleteuser) response = self.client.get(index_url) self.assertNotContains(response, articles) # The app list displays Sections but not Articles as the latter has # ModelAdmin.has_module_permission() = False. self.client.force_login(self.superuser) response = self.client.get(reverse(""admin7:app_list"", args=(""admin_views"",))) self.assertContains(response, sections) self.assertNotContains(response, articles) "," If has_module_permission() always returns False, the module shouldn't be displayed on the admin index page for any users. ",18,79,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_overriding_has_module_permission(self): articles = Article._meta.verbose_name_plural.title() sections = Section._meta.verbose_name_plural.title() index_url = reverse(""admin7:index"") self.client.force_login(self.superuser) response = self.client.get(index_url) self.assertContains(response, sections) self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.viewuser) response = self.client.get(index_url) self.assertNotContains(response, ""admin_views"") self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.adduser) response = self.client.get(index_url) self.assertNotContains(response, ""admin_views"") self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.changeuser) response = self.client.get(index_url) self.assertNotContains(response, ""admin_views"") self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.deleteuser) response = self.client.get(index_url) self.assertNotContains(response, articles) # The app list displays Sections but not Articles as the latter has # ModelAdmin.has_module_permission() = False. self.client.force_login(self.superuser) response = self.client.get(reverse(""admin7:app_list"", args=(""admin_views"",))) self.assertContains(response, sections) self.assertNotContains(response, articles) ``` ###Assistant : If has_module_permission() always returns False, the module shouldn't be displayed on the admin index page for any users. " 1080,"def non_field_errors(self): return self.errors.get( NON_FIELD_ERRORS, self.error_class(error_class=""nonfield"", renderer=self.renderer), ) "," Return an ErrorList of errors that aren't associated with a particular field -- i.e., from Form.clean(). Return an empty ErrorList if there are none. ",24,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def non_field_errors(self): return self.errors.get( NON_FIELD_ERRORS, self.error_class(error_class=""nonfield"", renderer=self.renderer), ) ``` ###Assistant : Return an ErrorList of errors that aren't associated with a particular field -- i.e., from Form.clean(). Return an empty ErrorList if there are none. " 1081,"def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, VanModel): module.gradient_checkpointing = value VAN_START_DOCSTRING = r VAN_INPUTS_DOCSTRING = r @add_start_docstrings( ""The bare VAN model outputting raw features without any specific head on top. Note, VAN does not have an embedding layer."", VAN_START_DOCSTRING, )"," This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`VanConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See [`AutoFeatureExtractor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all stages. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. ",128,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, VanModel): module.gradient_checkpointing = value VAN_START_DOCSTRING = r VAN_INPUTS_DOCSTRING = r @add_start_docstrings( ""The bare VAN model outputting raw features without any specific head on top. Note, VAN does not have an embedding layer."", VAN_START_DOCSTRING, ) ``` ###Assistant : This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`VanConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See [`AutoFeatureExtractor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all stages. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. " 1082,"def update(self, dest, url, rev_options): # type: (str, HiddenText, RevOptions) -> None raise NotImplementedError "," Update an already-existing repo to the given ``rev_options``. Args: rev_options: a RevOptions object. ",13,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update(self, dest, url, rev_options): # type: (str, HiddenText, RevOptions) -> None raise NotImplementedError ``` ###Assistant : Update an already-existing repo to the given ``rev_options``. Args: rev_options: a RevOptions object. " 1083,"def get_nested_field(value, field): if field == '__self__': return value fields = field.split('__') for fld in fields: if isinstance(value, list): value = [getattr(v, fld) for v in value] else: value = getattr(value, fld) return value"," Get nested field from list of objects or single instance :param value: Single instance or list to look up field :param field: Field to lookup :return: List or single instance of looked up field ",34,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_nested_field(value, field): if field == '__self__': return value fields = field.split('__') for fld in fields: if isinstance(value, list): value = [getattr(v, fld) for v in value] else: value = getattr(value, fld) return value ``` ###Assistant : Get nested field from list of objects or single instance :param value: Single instance or list to look up field :param field: Field to lookup :return: List or single instance of looked up field " 1084,"def test_stylesheet_many_classes_dont_overrule_id(): css = ""#id {color: red;} .a.b.c.d {color: blue;}"" stylesheet = _make_stylesheet(css) node = DOMNode(classes=""a b c d"", id=""id"") stylesheet.apply(node) assert node.styles.color == Color(255, 0, 0) ","#id is further to the left in the specificity tuple than class, and a selector containing multiple classes cannot take priority over even a single class.",26,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_stylesheet_many_classes_dont_overrule_id(): css = ""#id {color: red;} .a.b.c.d {color: blue;}"" stylesheet = _make_stylesheet(css) node = DOMNode(classes=""a b c d"", id=""id"") stylesheet.apply(node) assert node.styles.color == Color(255, 0, 0) ``` ###Assistant : #id is further to the left in the specificity tuple than class, and a selector containing multiple classes cannot take priority over even a single class. " 1085,"def site_data_dir(self) -> str: return self._append_app_name_and_version(""/Library/Application Support"") ",":return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``",9,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def site_data_dir(self) -> str: return self._append_app_name_and_version(""/Library/Application Support"") ``` ###Assistant : :return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version`` " 1086,"def test_warm_start(): tpot_obj = TPOTClassifier( random_state=42, population_size=1, offspring_size=2, generations=1, verbosity=0, config_dict='TPOT light', warm_start=True) tpot_obj.fit(pretest_X, pretest_y) assert tpot_obj._pop is not None assert tpot_obj._pareto_front is not None first_pop = tpot_obj._pop tpot_obj.random_state = 21 tpot_obj.fit(pretest_X, pretest_y) assert tpot_obj._pop == first_pop ",Assert that the TPOT warm_start flag stores the pop and pareto_front from the first run.,15,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_warm_start(): tpot_obj = TPOTClassifier( random_state=42, population_size=1, offspring_size=2, generations=1, verbosity=0, config_dict='TPOT light', warm_start=True) tpot_obj.fit(pretest_X, pretest_y) assert tpot_obj._pop is not None assert tpot_obj._pareto_front is not None first_pop = tpot_obj._pop tpot_obj.random_state = 21 tpot_obj.fit(pretest_X, pretest_y) assert tpot_obj._pop == first_pop ``` ###Assistant : Assert that the TPOT warm_start flag stores the pop and pareto_front from the first run. " 1087,"def load_data_wiki(batch_size, max_len): num_workers = d2l.get_dataloader_workers() data_dir = d2l.download_extract('wikitext-2', 'wikitext-2') paragraphs = _read_wiki(data_dir) train_set = _WikiTextDataset(paragraphs, max_len) train_iter = gluon.data.DataLoader(train_set, batch_size, shuffle=True, num_workers=num_workers) return train_iter, train_set.vocab ","Load the WikiText-2 dataset. Defined in :numref:`subsec_prepare_mlm_data`",7,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_data_wiki(batch_size, max_len): num_workers = d2l.get_dataloader_workers() data_dir = d2l.download_extract('wikitext-2', 'wikitext-2') paragraphs = _read_wiki(data_dir) train_set = _WikiTextDataset(paragraphs, max_len) train_iter = gluon.data.DataLoader(train_set, batch_size, shuffle=True, num_workers=num_workers) return train_iter, train_set.vocab ``` ###Assistant : Load the WikiText-2 dataset. Defined in :numref:`subsec_prepare_mlm_data` " 1088,"def save(self, loc, **kwargs) -> Plot: # TODO expose important keyword arguments in our signature? with theme_context(self._theme_with_defaults()): self._plot().save(loc, **kwargs) return self "," Compile the plot and write it to a buffer or file on disk. Parameters ---------- loc : str, path, or buffer Location on disk to save the figure, or a buffer to write into. kwargs Other keyword arguments are passed through to :meth:`matplotlib.figure.Figure.savefig`. ",43,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save(self, loc, **kwargs) -> Plot: # TODO expose important keyword arguments in our signature? with theme_context(self._theme_with_defaults()): self._plot().save(loc, **kwargs) return self ``` ###Assistant : Compile the plot and write it to a buffer or file on disk. Parameters ---------- loc : str, path, or buffer Location on disk to save the figure, or a buffer to write into. kwargs Other keyword arguments are passed through to :meth:`matplotlib.figure.Figure.savefig`. " 1089,"def test_create_realm_no_creation_key(self) -> None: email = ""user1@test.com"" with self.settings(OPEN_REALM_CREATION=False): # Create new realm with the email, but no creation key. result = self.client_post(""/new/"", {""email"": email}) self.assertEqual(result.status_code, 200) self.assert_in_response(""Organization creation link required"", result) "," Trying to create a realm without a creation_key should fail when OPEN_REALM_CREATION is false. ",14,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_create_realm_no_creation_key(self) -> None: email = ""user1@test.com"" with self.settings(OPEN_REALM_CREATION=False): # Create new realm with the email, but no creation key. result = self.client_post(""/new/"", {""email"": email}) self.assertEqual(result.status_code, 200) self.assert_in_response(""Organization creation link required"", result) ``` ###Assistant : Trying to create a realm without a creation_key should fail when OPEN_REALM_CREATION is false. " 1090,"def _get_device_names(self) -> List[str]: names = [pynvml.nvmlDeviceGetName(handle).decode(""utf-8"") for handle in self._handles] self._log(""debug"", f""GPU Devices: {names}"") return names "," Obtain the list of names of connected Nvidia GPUs as identified in :attr:`_handles`. Returns ------- list The list of connected Nvidia GPU names ",23,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_device_names(self) -> List[str]: names = [pynvml.nvmlDeviceGetName(handle).decode(""utf-8"") for handle in self._handles] self._log(""debug"", f""GPU Devices: {names}"") return names ``` ###Assistant : Obtain the list of names of connected Nvidia GPUs as identified in :attr:`_handles`. Returns ------- list The list of connected Nvidia GPU names " 1091,"def test_delayed_message(self) -> None: user1 = UserID.from_string(self.user_id1) # Send a message before user2 joins event_id1 = self.create_and_send_event(self.room_id, user1) # Have user2 join the room self.helper.join(self.room_id, self.user_id2, tok=self.tok2) # They start off with no notifications self.assertEqual(self.get_notif_count(self.user_id2), 0) # Send another message that references the event before the join to # simulate a ""delayed"" event self.create_and_send_event(self.room_id, user1, prev_event_ids=[event_id1]) # user2 should not be notified about it, because they can't see it. self.assertEqual(self.get_notif_count(self.user_id2), 0) ","Test that a delayed message that was from before a user joined doesn't cause a notification for the joined user. ",20,71,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_delayed_message(self) -> None: user1 = UserID.from_string(self.user_id1) # Send a message before user2 joins event_id1 = self.create_and_send_event(self.room_id, user1) # Have user2 join the room self.helper.join(self.room_id, self.user_id2, tok=self.tok2) # They start off with no notifications self.assertEqual(self.get_notif_count(self.user_id2), 0) # Send another message that references the event before the join to # simulate a ""delayed"" event self.create_and_send_event(self.room_id, user1, prev_event_ids=[event_id1]) # user2 should not be notified about it, because they can't see it. self.assertEqual(self.get_notif_count(self.user_id2), 0) ``` ###Assistant : Test that a delayed message that was from before a user joined doesn't cause a notification for the joined user. " 1092,"def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset): model, X, y, coef, _, _, _ = glm_dataset n_samples, n_features = X.shape alpha = 0 # unpenalized params = dict( alpha=alpha, fit_intercept=fit_intercept, # solver=solver, # only lbfgs available tol=1e-12, max_iter=1000, ) model = clone(model).set_params(**params) if fit_intercept: intercept = coef[-1] coef = coef[:-1] if n_samples > n_features: X = X[:, :-1] # remove intercept X = 0.5 * np.concatenate((X, X), axis=1) else: # To know the minimum norm solution, we keep one intercept column and do # not divide by 2. Later on, we must take special care. X = np.c_[X[:, :-1], X[:, :-1], X[:, -1]] else: intercept = 0 X = 0.5 * np.concatenate((X, X), axis=1) assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) with warnings.catch_warnings(): if fit_intercept and n_samples <= n_features: # XXX: Investigate if the lack of convergence in this case should be # considered a bug or not. warnings.filterwarnings(""ignore"", category=ConvergenceWarning) model.fit(X, y) if fit_intercept and n_samples <= n_features: # Here we take special care. model_intercept = 2 * model.intercept_ model_coef = 2 * model.coef_[:-1] # exclude the other intercept term. # For minimum norm solution, we would have # assert model.intercept_ == pytest.approx(model.coef_[-1]) else: model_intercept = model.intercept_ model_coef = model.coef_ rtol = 6e-5 if n_samples > n_features: assert model_intercept == pytest.approx(intercept) assert_allclose(model_coef, np.r_[coef, coef], rtol=1e-4) else: # As it is an underdetermined problem, prediction = y. The following shows that # we get a solution, i.e. a (non-unique) minimum of the objective function ... assert_allclose(model.predict(X), y, rtol=1e-6) if fit_intercept: # Same as in test_glm_regression_unpenalized. # But it is not the minimum norm solution. Otherwise the norms would be # equal. norm_solution = np.linalg.norm( 0.5 * np.r_[intercept, intercept, coef, coef] ) norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) assert norm_model > (1 + 1e-12) * norm_solution # For minimum norm solution, we would have # assert model.intercept_ == pytest.approx(model.coef_[-1]) else: assert model_intercept == pytest.approx(intercept) assert_allclose(model_coef, np.r_[coef, coef], rtol=rtol) @pytest.mark.parametrize(""solver"", SOLVERS) @pytest.mark.parametrize(""fit_intercept"", [True, False])","Test that unpenalized GLM converges for all solvers to correct solution. We work with a simple constructed data set with known solution. GLM fit on [X] is the same as fit on [X, X]/2. For long X, [X, X] is a singular matrix and we check against the minimum norm solution: min ||w||_2 subject to w = argmin deviance(X, y, w) ",61,314,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset): model, X, y, coef, _, _, _ = glm_dataset n_samples, n_features = X.shape alpha = 0 # unpenalized params = dict( alpha=alpha, fit_intercept=fit_intercept, # solver=solver, # only lbfgs available tol=1e-12, max_iter=1000, ) model = clone(model).set_params(**params) if fit_intercept: intercept = coef[-1] coef = coef[:-1] if n_samples > n_features: X = X[:, :-1] # remove intercept X = 0.5 * np.concatenate((X, X), axis=1) else: # To know the minimum norm solution, we keep one intercept column and do # not divide by 2. Later on, we must take special care. X = np.c_[X[:, :-1], X[:, :-1], X[:, -1]] else: intercept = 0 X = 0.5 * np.concatenate((X, X), axis=1) assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) with warnings.catch_warnings(): if fit_intercept and n_samples <= n_features: # XXX: Investigate if the lack of convergence in this case should be # considered a bug or not. warnings.filterwarnings(""ignore"", category=ConvergenceWarning) model.fit(X, y) if fit_intercept and n_samples <= n_features: # Here we take special care. model_intercept = 2 * model.intercept_ model_coef = 2 * model.coef_[:-1] # exclude the other intercept term. # For minimum norm solution, we would have # assert model.intercept_ == pytest.approx(model.coef_[-1]) else: model_intercept = model.intercept_ model_coef = model.coef_ rtol = 6e-5 if n_samples > n_features: assert model_intercept == pytest.approx(intercept) assert_allclose(model_coef, np.r_[coef, coef], rtol=1e-4) else: # As it is an underdetermined problem, prediction = y. The following shows that # we get a solution, i.e. a (non-unique) minimum of the objective function ... assert_allclose(model.predict(X), y, rtol=1e-6) if fit_intercept: # Same as in test_glm_regression_unpenalized. # But it is not the minimum norm solution. Otherwise the norms would be # equal. norm_solution = np.linalg.norm( 0.5 * np.r_[intercept, intercept, coef, coef] ) norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) assert norm_model > (1 + 1e-12) * norm_solution # For minimum norm solution, we would have # assert model.intercept_ == pytest.approx(model.coef_[-1]) else: assert model_intercept == pytest.approx(intercept) assert_allclose(model_coef, np.r_[coef, coef], rtol=rtol) @pytest.mark.parametrize(""solver"", SOLVERS) @pytest.mark.parametrize(""fit_intercept"", [True, False]) ``` ###Assistant : Test that unpenalized GLM converges for all solvers to correct solution. We work with a simple constructed data set with known solution. GLM fit on [X] is the same as fit on [X, X]/2. For long X, [X, X] is a singular matrix and we check against the minimum norm solution: min ||w||_2 subject to w = argmin deviance(X, y, w) " 1093,"def test_count_aggregation_threads(self) -> None: user_id, token, _, other_token, room_id = self._create_users_and_room() thread_id: str last_event_id: str "," This is essentially the same test as test_count_aggregation, but adds events to the main timeline and to a thread. ",19,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_count_aggregation_threads(self) -> None: user_id, token, _, other_token, room_id = self._create_users_and_room() thread_id: str last_event_id: str ``` ###Assistant : This is essentially the same test as test_count_aggregation, but adds events to the main timeline and to a thread. " 1094,"def test_global_instantiated_before_config_load(self): cache = LruCache(100) add_resizable_cache(""foo"", cache_resize_callback=cache.set_cache_factor) self.assertEqual(cache.max_size, 50) config = {""caches"": {""global_factor"": 4}} self.config.read_config(config, config_dir_path="""", data_dir_path="""") self.config.resize_all_caches() self.assertEqual(cache.max_size, 400) "," If a cache is instantiated before the config is read, it will be given the default cache size in the interim, and then resized to the new default cache size once the config is loaded. ",35,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_global_instantiated_before_config_load(self): cache = LruCache(100) add_resizable_cache(""foo"", cache_resize_callback=cache.set_cache_factor) self.assertEqual(cache.max_size, 50) config = {""caches"": {""global_factor"": 4}} self.config.read_config(config, config_dir_path="""", data_dir_path="""") self.config.resize_all_caches() self.assertEqual(cache.max_size, 400) ``` ###Assistant : If a cache is instantiated before the config is read, it will be given the default cache size in the interim, and then resized to the new default cache size once the config is loaded. " 1095,"def in_top_k(predictions, targets, k): return tf.compat.v1.math.in_top_k(predictions, targets, k) # CONVOLUTIONS ","Returns whether the `targets` are in the top `k` `predictions`. Args: predictions: A tensor of shape `(batch_size, classes)` and type `float32`. targets: A 1D tensor of length `batch_size` and type `int32` or `int64`. k: An `int`, number of top elements to consider. Returns: A 1D tensor of length `batch_size` and type `bool`. `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k` values of `predictions[i]`. ",64,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def in_top_k(predictions, targets, k): return tf.compat.v1.math.in_top_k(predictions, targets, k) # CONVOLUTIONS ``` ###Assistant : Returns whether the `targets` are in the top `k` `predictions`. Args: predictions: A tensor of shape `(batch_size, classes)` and type `float32`. targets: A 1D tensor of length `batch_size` and type `int32` or `int64`. k: An `int`, number of top elements to consider. Returns: A 1D tensor of length `batch_size` and type `bool`. `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k` values of `predictions[i]`. " 1096,"def test_update_job(self, parent_job, grouped_jobs, api, batch): parent_job.update_job() # assert for job in grouped_jobs: job.update_job.assert_called_once_with(batch=batch) ",Checks jobs status in advance and restart if some failed.,10,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_update_job(self, parent_job, grouped_jobs, api, batch): parent_job.update_job() # assert for job in grouped_jobs: job.update_job.assert_called_once_with(batch=batch) ``` ###Assistant : Checks jobs status in advance and restart if some failed. " 1097,"def list_master_symlinks(saltenv=None, prefix=""""): if not saltenv: saltenv = __opts__[""saltenv""] or ""base"" return _client().symlink_list(saltenv, prefix) "," .. versionchanged:: 3005 ``saltenv`` will use value from config if not explicitly set List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks ",30,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def list_master_symlinks(saltenv=None, prefix=""""): if not saltenv: saltenv = __opts__[""saltenv""] or ""base"" return _client().symlink_list(saltenv, prefix) ``` ###Assistant : .. versionchanged:: 3005 ``saltenv`` will use value from config if not explicitly set List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks " 1098,"def print_help(self): has_screen_tickers_start = """" if self.screen_tickers else ""[unvl]"" has_screen_tickers_end = """" if self.screen_tickers else ""[/unvl]"" help_text = f console.print(text=help_text, menu=""Stocks - Options - Screener"") ","Print help[cmds] view view available presets (or one in particular) set set one of the available presets [/cmds] [param]PRESET: [/param]{self.preset}[cmds] scr screen data from this preset[/cmds] {has_screen_tickers_start} [param]Last screened tickers: [/param]{', '.join(self.screen_tickers)}[menu] > ca take these to comparison analysis menu > po take these to portoflio optimization menu{has_screen_tickers_end} ",48,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def print_help(self): has_screen_tickers_start = """" if self.screen_tickers else ""[unvl]"" has_screen_tickers_end = """" if self.screen_tickers else ""[/unvl]"" help_text = f console.print(text=help_text, menu=""Stocks - Options - Screener"") ``` ###Assistant : Print help[cmds] view view available presets (or one in particular) set set one of the available presets [/cmds] [param]PRESET: [/param]{self.preset}[cmds] scr screen data from this preset[/cmds] {has_screen_tickers_start} [param]Last screened tickers: [/param]{', '.join(self.screen_tickers)}[menu] > ca take these to comparison analysis menu > po take these to portoflio optimization menu{has_screen_tickers_end} " 1099,"def _find(self, tests, obj, name, module, source_lines, globs, seen): if self._verbose: print('Finding tests in %s' % name) # If we've already processed this object, then ignore it. if id(obj) in seen: return seen[id(obj)] = 1 # Make sure we don't run doctests for classes outside of sympy, such # as in numpy or scipy. if inspect.isclass(obj): if obj.__module__.split('.')[0] != 'sympy': return # Find a test for this object, and add it to the list of tests. test = self._get_test(obj, name, module, globs, source_lines) if test is not None: tests.append(test) if not self._recurse: return # Look for tests in a module's contained objects. if inspect.ismodule(obj): for rawname, val in obj.__dict__.items(): # Recurse to functions & classes. if inspect.isfunction(val) or inspect.isclass(val): # Make sure we don't run doctests functions or classes # from different modules if val.__module__ != module.__name__: continue assert self._from_module(module, val), \ ""%s is not in module %s (rawname %s)"" % (val, module, rawname) try: valname = '%s.%s' % (name, rawname) self._find(tests, val, valname, module, source_lines, globs, seen) except KeyboardInterrupt: raise # Look for tests in a module's __test__ dictionary. for valname, val in getattr(obj, '__test__', {}).items(): if not isinstance(valname, str): raise ValueError(""SymPyDocTestFinder.find: __test__ keys "" ""must be strings: %r"" % (type(valname),)) if not (inspect.isfunction(val) or inspect.isclass(val) or inspect.ismethod(val) or inspect.ismodule(val) or isinstance(val, str)): raise ValueError(""SymPyDocTestFinder.find: __test__ values "" ""must be strings, functions, methods, "" ""classes, or modules: %r"" % (type(val),)) valname = '%s.__test__.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) # Look for tests in a class's contained objects. if inspect.isclass(obj): for valname, val in obj.__dict__.items(): # Special handling for staticmethod/classmethod. if isinstance(val, staticmethod): val = getattr(obj, valname) if isinstance(val, classmethod): val = getattr(obj, valname).__func__ # Recurse to methods, properties, and nested classes. if ((inspect.isfunction(unwrap(val)) or inspect.isclass(val) or isinstance(val, property)) and self._from_module(module, val)): # Make sure we don't run doctests functions or classes # from different modules if isinstance(val, property): if hasattr(val.fget, '__module__'): if val.fget.__module__ != module.__name__: continue else: if val.__module__ != module.__name__: continue assert self._from_module(module, val), \ ""%s is not in module %s (valname %s)"" % ( val, module, valname) valname = '%s.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) "," Find tests for the given object and any contained objects, and add them to ``tests``. ",15,358,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _find(self, tests, obj, name, module, source_lines, globs, seen): if self._verbose: print('Finding tests in %s' % name) # If we've already processed this object, then ignore it. if id(obj) in seen: return seen[id(obj)] = 1 # Make sure we don't run doctests for classes outside of sympy, such # as in numpy or scipy. if inspect.isclass(obj): if obj.__module__.split('.')[0] != 'sympy': return # Find a test for this object, and add it to the list of tests. test = self._get_test(obj, name, module, globs, source_lines) if test is not None: tests.append(test) if not self._recurse: return # Look for tests in a module's contained objects. if inspect.ismodule(obj): for rawname, val in obj.__dict__.items(): # Recurse to functions & classes. if inspect.isfunction(val) or inspect.isclass(val): # Make sure we don't run doctests functions or classes # from different modules if val.__module__ != module.__name__: continue assert self._from_module(module, val), \ ""%s is not in module %s (rawname %s)"" % (val, module, rawname) try: valname = '%s.%s' % (name, rawname) self._find(tests, val, valname, module, source_lines, globs, seen) except KeyboardInterrupt: raise # Look for tests in a module's __test__ dictionary. for valname, val in getattr(obj, '__test__', {}).items(): if not isinstance(valname, str): raise ValueError(""SymPyDocTestFinder.find: __test__ keys "" ""must be strings: %r"" % (type(valname),)) if not (inspect.isfunction(val) or inspect.isclass(val) or inspect.ismethod(val) or inspect.ismodule(val) or isinstance(val, str)): raise ValueError(""SymPyDocTestFinder.find: __test__ values "" ""must be strings, functions, methods, "" ""classes, or modules: %r"" % (type(val),)) valname = '%s.__test__.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) # Look for tests in a class's contained objects. if inspect.isclass(obj): for valname, val in obj.__dict__.items(): # Special handling for staticmethod/classmethod. if isinstance(val, staticmethod): val = getattr(obj, valname) if isinstance(val, classmethod): val = getattr(obj, valname).__func__ # Recurse to methods, properties, and nested classes. if ((inspect.isfunction(unwrap(val)) or inspect.isclass(val) or isinstance(val, property)) and self._from_module(module, val)): # Make sure we don't run doctests functions or classes # from different modules if isinstance(val, property): if hasattr(val.fget, '__module__'): if val.fget.__module__ != module.__name__: continue else: if val.__module__ != module.__name__: continue assert self._from_module(module, val), \ ""%s is not in module %s (valname %s)"" % ( val, module, valname) valname = '%s.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) ``` ###Assistant : Find tests for the given object and any contained objects, and add them to ``tests``. " 1100,"def get_mapped_key_strings_to_ints(self) -> MutableMapping[str, int]: cache_key_results: MutableMapping[str, int] = {} for org_id, result_dict in self.results.items(): for string, id in result_dict.items(): key = f""{org_id}:{string}"" if id is not None: cache_key_results[key] = id return cache_key_results "," Return the results, but formatted as the following: { ""1:a"": 10, ""1:b"": 11, ""1:c"", 12, ""2:e"": 13 } This is for when we use indexer_cache.set_many() ",25,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_mapped_key_strings_to_ints(self) -> MutableMapping[str, int]: cache_key_results: MutableMapping[str, int] = {} for org_id, result_dict in self.results.items(): for string, id in result_dict.items(): key = f""{org_id}:{string}"" if id is not None: cache_key_results[key] = id return cache_key_results ``` ###Assistant : Return the results, but formatted as the following: { ""1:a"": 10, ""1:b"": 11, ""1:c"", 12, ""2:e"": 13 } This is for when we use indexer_cache.set_many() " 1101,"def get_atext(value): m = _non_atom_end_matcher(value) if not m: raise errors.HeaderParseError( ""expected atext but found '{}'"".format(value)) atext = m.group() value = value[len(atext):] atext = ValueTerminal(atext, 'atext') _validate_xtext(atext) return atext, value ","atext = We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to the token's defects list if we find non-atext characters. ",24,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_atext(value): m = _non_atom_end_matcher(value) if not m: raise errors.HeaderParseError( ""expected atext but found '{}'"".format(value)) atext = m.group() value = value[len(atext):] atext = ValueTerminal(atext, 'atext') _validate_xtext(atext) return atext, value ``` ###Assistant : atext = We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to the token's defects list if we find non-atext characters. " 1102,"def _get_linenumber_pos(self, lineno): return (self.image_pad, self._get_line_y(lineno)) "," Get the actual position for the start of a line number. ",11,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_linenumber_pos(self, lineno): return (self.image_pad, self._get_line_y(lineno)) ``` ###Assistant : Get the actual position for the start of a line number. " 1103,"def test_image_inside_paragraph(self): # In Draftail's data model, images are block-level elements and therefore # split up preceding / following text into their own paragraphs converter = ContentstateConverter(features=[""image""]) result = json.loads( converter.from_database_format( ) ) self.assertContentStateEqual( result, { ""blocks"": [ { ""key"": ""00000"", ""inlineStyleRanges"": [], ""entityRanges"": [], ""depth"": 0, ""text"": ""before"", ""type"": ""unstyled"", }, { ""key"": ""00000"", ""inlineStyleRanges"": [], ""entityRanges"": [{""key"": 0, ""offset"": 0, ""length"": 1}], ""depth"": 0, ""text"": "" "", ""type"": ""atomic"", }, { ""key"": ""00000"", ""inlineStyleRanges"": [], ""entityRanges"": [], ""depth"": 0, ""text"": ""after"", ""type"": ""unstyled"", }, ], ""entityMap"": { ""0"": { ""data"": { ""format"": ""left"", ""alt"": ""an image"", ""id"": ""1"", ""src"": ""/media/not-found"", }, ""mutability"": ""IMMUTABLE"", ""type"": ""IMAGE"", } }, }, ) ","

    before after

    ",9,111,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_image_inside_paragraph(self): # In Draftail's data model, images are block-level elements and therefore # split up preceding / following text into their own paragraphs converter = ContentstateConverter(features=[""image""]) result = json.loads( converter.from_database_format( ) ) self.assertContentStateEqual( result, { ""blocks"": [ { ""key"": ""00000"", ""inlineStyleRanges"": [], ""entityRanges"": [], ""depth"": 0, ""text"": ""before"", ""type"": ""unstyled"", }, { ""key"": ""00000"", ""inlineStyleRanges"": [], ""entityRanges"": [{""key"": 0, ""offset"": 0, ""length"": 1}], ""depth"": 0, ""text"": "" "", ""type"": ""atomic"", }, { ""key"": ""00000"", ""inlineStyleRanges"": [], ""entityRanges"": [], ""depth"": 0, ""text"": ""after"", ""type"": ""unstyled"", }, ], ""entityMap"": { ""0"": { ""data"": { ""format"": ""left"", ""alt"": ""an image"", ""id"": ""1"", ""src"": ""/media/not-found"", }, ""mutability"": ""IMMUTABLE"", ""type"": ""IMAGE"", } }, }, ) ``` ###Assistant :

    before after

    " 1104,"def l1_min_c(X, y, *, loss=""squared_hinge"", fit_intercept=True, intercept_scaling=1.0): if loss not in (""squared_hinge"", ""log""): raise ValueError('loss type not in (""squared_hinge"", ""log"")') X = check_array(X, accept_sparse=""csc"") check_consistent_length(X, y) Y = LabelBinarizer(neg_label=-1).fit_transform(y).T # maximum absolute value over classes and features den = np.max(np.abs(safe_sparse_dot(Y, X))) if fit_intercept: bias = np.full( (np.size(y), 1), intercept_scaling, dtype=np.array(intercept_scaling).dtype ) den = max(den, abs(np.dot(Y, bias)).max()) if den == 0.0: raise ValueError( ""Ill-posed l1_min_c calculation: l1 will always "" ""select zero coefficients for this data"" ) if loss == ""squared_hinge"": return 0.5 / den else: # loss == 'log': return 2.0 / den ","Return the lowest bound for C. The lower bound for C is computed such that for C in (l1_min_C, infinity) the model is guaranteed not to be empty. This applies to l1 penalized classifiers, such as LinearSVC with penalty='l1' and linear_model.LogisticRegression with penalty='l1'. This value is valid if class_weight parameter in fit() is not set. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target vector relative to X. loss : {'squared_hinge', 'log'}, default='squared_hinge' Specifies the loss function. With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss). With 'log' it is the loss of logistic regression models. fit_intercept : bool, default=True Specifies if the intercept should be fitted by the model. It must match the fit() method parameter. intercept_scaling : float, default=1.0 When fit_intercept is True, instance vector x becomes [x, intercept_scaling], i.e. a ""synthetic"" feature with constant value equals to intercept_scaling is appended to the instance vector. It must match the fit() method parameter. Returns ------- l1_min_c : float Minimum value for C. ",190,93,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def l1_min_c(X, y, *, loss=""squared_hinge"", fit_intercept=True, intercept_scaling=1.0): if loss not in (""squared_hinge"", ""log""): raise ValueError('loss type not in (""squared_hinge"", ""log"")') X = check_array(X, accept_sparse=""csc"") check_consistent_length(X, y) Y = LabelBinarizer(neg_label=-1).fit_transform(y).T # maximum absolute value over classes and features den = np.max(np.abs(safe_sparse_dot(Y, X))) if fit_intercept: bias = np.full( (np.size(y), 1), intercept_scaling, dtype=np.array(intercept_scaling).dtype ) den = max(den, abs(np.dot(Y, bias)).max()) if den == 0.0: raise ValueError( ""Ill-posed l1_min_c calculation: l1 will always "" ""select zero coefficients for this data"" ) if loss == ""squared_hinge"": return 0.5 / den else: # loss == 'log': return 2.0 / den ``` ###Assistant : Return the lowest bound for C. The lower bound for C is computed such that for C in (l1_min_C, infinity) the model is guaranteed not to be empty. This applies to l1 penalized classifiers, such as LinearSVC with penalty='l1' and linear_model.LogisticRegression with penalty='l1'. This value is valid if class_weight parameter in fit() is not set. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target vector relative to X. loss : {'squared_hinge', 'log'}, default='squared_hinge' Specifies the loss function. With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss). With 'log' it is the loss of logistic regression models. fit_intercept : bool, default=True Specifies if the intercept should be fitted by the model. It must match the fit() method parameter. intercept_scaling : float, default=1.0 When fit_intercept is True, instance vector x becomes [x, intercept_scaling], i.e. a ""synthetic"" feature with constant value equals to intercept_scaling is appended to the instance vector. It must match the fit() method parameter. Returns ------- l1_min_c : float Minimum value for C. " 1105,"def _mysql_tables_where_indexes_already_present(conn): to_check = [ ('xcom', 'idx_xcom_task_instance'), ('task_reschedule', 'idx_task_reschedule_dag_run'), ('task_fail', 'idx_task_fail_task_instance'), ] tables = set() for tbl, idx in to_check: if conn.execute(f""show indexes from {tbl} where Key_name = '{idx}'"").first(): tables.add(tbl) return tables "," If user downgraded and is upgrading again, we have to check for existing indexes on mysql because we can't (and don't) drop them as part of the downgrade. ",28,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _mysql_tables_where_indexes_already_present(conn): to_check = [ ('xcom', 'idx_xcom_task_instance'), ('task_reschedule', 'idx_task_reschedule_dag_run'), ('task_fail', 'idx_task_fail_task_instance'), ] tables = set() for tbl, idx in to_check: if conn.execute(f""show indexes from {tbl} where Key_name = '{idx}'"").first(): tables.add(tbl) return tables ``` ###Assistant : If user downgraded and is upgrading again, we have to check for existing indexes on mysql because we can't (and don't) drop them as part of the downgrade. " 1106,"def test_webclient_resolves_with_client_resource(self): for resource_name_order_list in [ [""webclient"", ""client""], [""client"", ""webclient""], ]: # Create a dictionary from path regex -> resource resource_dict: Dict[str, Resource] = {} for resource_name in resource_name_order_list: resource_dict.update( SynapseHomeServer._configure_named_resource(self.hs, resource_name) ) # Create a root resource which ties the above resources together into one root_resource = Resource() create_resource_tree(resource_dict, root_resource) # Create a site configured with this resource to make HTTP requests against listener_config = ListenerConfig( port=8008, bind_addresses=[""127.0.0.1""], type=""http"", http_options=HttpListenerConfig( resources=[HttpResourceConfig(names=resource_name_order_list)] ), ) test_site = SynapseSite( logger_name=""synapse.access.http.fake"", site_tag=self.hs.config.server.server_name, config=listener_config, resource=root_resource, server_version_string=""1"", max_request_body_size=1234, reactor=self.reactor, ) # Attempt to make requests to endpoints on both the webclient and client resources # on test_site. self._request_client_and_webclient_resources(test_site) "," Tests that both client and webclient resources can be accessed simultaneously. This is a regression test created in response to https://github.com/matrix-org/synapse/issues/11763. ",21,103,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_webclient_resolves_with_client_resource(self): for resource_name_order_list in [ [""webclient"", ""client""], [""client"", ""webclient""], ]: # Create a dictionary from path regex -> resource resource_dict: Dict[str, Resource] = {} for resource_name in resource_name_order_list: resource_dict.update( SynapseHomeServer._configure_named_resource(self.hs, resource_name) ) # Create a root resource which ties the above resources together into one root_resource = Resource() create_resource_tree(resource_dict, root_resource) # Create a site configured with this resource to make HTTP requests against listener_config = ListenerConfig( port=8008, bind_addresses=[""127.0.0.1""], type=""http"", http_options=HttpListenerConfig( resources=[HttpResourceConfig(names=resource_name_order_list)] ), ) test_site = SynapseSite( logger_name=""synapse.access.http.fake"", site_tag=self.hs.config.server.server_name, config=listener_config, resource=root_resource, server_version_string=""1"", max_request_body_size=1234, reactor=self.reactor, ) # Attempt to make requests to endpoints on both the webclient and client resources # on test_site. self._request_client_and_webclient_resources(test_site) ``` ###Assistant : Tests that both client and webclient resources can be accessed simultaneously. This is a regression test created in response to https://github.com/matrix-org/synapse/issues/11763. " 1107,"def unregister_pickle_by_value(module): if not isinstance(module, types.ModuleType): raise ValueError(f""Input should be a module object, got {str(module)} instead"") if module.__name__ not in _PICKLE_BY_VALUE_MODULES: raise ValueError(f""{module} is not registered for pickle by value"") else: _PICKLE_BY_VALUE_MODULES.remove(module.__name__) ",Unregister that the input module should be pickled by value.,10,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def unregister_pickle_by_value(module): if not isinstance(module, types.ModuleType): raise ValueError(f""Input should be a module object, got {str(module)} instead"") if module.__name__ not in _PICKLE_BY_VALUE_MODULES: raise ValueError(f""{module} is not registered for pickle by value"") else: _PICKLE_BY_VALUE_MODULES.remove(module.__name__) ``` ###Assistant : Unregister that the input module should be pickled by value. " 1108,"def test_get_bad_permissions(self): # Remove privileges from user self.user.is_superuser = False self.user.user_permissions.add( Permission.objects.get( content_type__app_label=""wagtailadmin"", codename=""access_admin"" ) ) self.user.save() # Get response = self.client.get( reverse(""wagtailimages:url_generator"", args=(self.image.id,)) ) # Check response self.assertRedirects(response, reverse(""wagtailadmin_home"")) "," This tests that the view returns a ""permission denied"" redirect if a user without correct permissions attempts to access it ",20,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_bad_permissions(self): # Remove privileges from user self.user.is_superuser = False self.user.user_permissions.add( Permission.objects.get( content_type__app_label=""wagtailadmin"", codename=""access_admin"" ) ) self.user.save() # Get response = self.client.get( reverse(""wagtailimages:url_generator"", args=(self.image.id,)) ) # Check response self.assertRedirects(response, reverse(""wagtailadmin_home"")) ``` ###Assistant : This tests that the view returns a ""permission denied"" redirect if a user without correct permissions attempts to access it " 1109,"def get_archive_formats(): formats = [(name, registry[2]) for name, registry in _ARCHIVE_FORMATS.items()] formats.sort() return formats ","Returns a list of supported formats for archiving and unarchiving. Each element of the returned sequence is a tuple (name, description) ",21,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_archive_formats(): formats = [(name, registry[2]) for name, registry in _ARCHIVE_FORMATS.items()] formats.sort() return formats ``` ###Assistant : Returns a list of supported formats for archiving and unarchiving. Each element of the returned sequence is a tuple (name, description) " 1110,"def _handle_transforms(self, element, mobject): if element.hasAttribute(""x"") and element.hasAttribute(""y""): x = self._attribute_to_float(element.getAttribute(""x"")) # Flip y y = -self._attribute_to_float(element.getAttribute(""y"")) mobject.shift(x * RIGHT + y * UP) transform_attr_value = element.getAttribute(""transform"") # parse the various transforms in the attribute value transform_names = [""matrix"", ""translate"", ""scale"", ""rotate"", ""skewX"", ""skewY""] # Borrowed/Inspired from: # https://github.com/cjlano/svg/blob/3ea3384457c9780fa7d67837c9c5fd4ebc42cb3b/svg/svg.py#L75 # match any SVG transformation with its parameter (until final parenthesis) # [^)]* == anything but a closing parenthesis # '|'.join == OR-list of SVG transformations transform_regex = ""|"".join([x + r""[^)]*\)"" for x in transform_names]) transforms = re.findall(transform_regex, transform_attr_value) number_regex = r""[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?"" for t in transforms: op_name, op_args = t.split(""("") op_name = op_name.strip() op_args = [float(x) for x in re.findall(number_regex, op_args)] if op_name == ""matrix"": transform_args = np.array(op_args).reshape([3, 2]) x = transform_args[2][0] y = -transform_args[2][1] matrix = np.identity(self.dim) matrix[:2, :2] = transform_args[:2, :] matrix[1] *= -1 matrix[:, 1] *= -1 for mob in mobject.family_members_with_points(): if config[""renderer""] == ""opengl"": mob.points = np.dot(mob.points, matrix) else: mob.points = np.dot(mob.points, matrix) mobject.shift(x * RIGHT + y * UP) elif op_name == ""scale"": scale_values = op_args if len(scale_values) == 2: scale_x, scale_y = scale_values mobject.scale(np.array([scale_x, scale_y, 1]), about_point=ORIGIN) elif len(scale_values) == 1: scale = scale_values[0] mobject.scale(np.array([scale, scale, 1]), about_point=ORIGIN) elif op_name == ""translate"": if len(op_args) == 2: x, y = op_args else: x = op_args y = 0 mobject.shift(x * RIGHT + y * DOWN) else: # TODO: handle rotate, skewX and skewY # for now adding a warning message logger.warning( ""Handling of %s transform is not supported yet!"", op_name, ) ","Applies the SVG transform to the specified mobject. Transforms include: ``matrix``, ``translate``, and ``scale``. Parameters ---------- element : :class:`minidom.Element` The transform command to perform mobject : :class:`Mobject` The Mobject to transform. ",31,245,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _handle_transforms(self, element, mobject): if element.hasAttribute(""x"") and element.hasAttribute(""y""): x = self._attribute_to_float(element.getAttribute(""x"")) # Flip y y = -self._attribute_to_float(element.getAttribute(""y"")) mobject.shift(x * RIGHT + y * UP) transform_attr_value = element.getAttribute(""transform"") # parse the various transforms in the attribute value transform_names = [""matrix"", ""translate"", ""scale"", ""rotate"", ""skewX"", ""skewY""] # Borrowed/Inspired from: # https://github.com/cjlano/svg/blob/3ea3384457c9780fa7d67837c9c5fd4ebc42cb3b/svg/svg.py#L75 # match any SVG transformation with its parameter (until final parenthesis) # [^)]* == anything but a closing parenthesis # '|'.join == OR-list of SVG transformations transform_regex = ""|"".join([x + r""[^)]*\)"" for x in transform_names]) transforms = re.findall(transform_regex, transform_attr_value) number_regex = r""[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?"" for t in transforms: op_name, op_args = t.split(""("") op_name = op_name.strip() op_args = [float(x) for x in re.findall(number_regex, op_args)] if op_name == ""matrix"": transform_args = np.array(op_args).reshape([3, 2]) x = transform_args[2][0] y = -transform_args[2][1] matrix = np.identity(self.dim) matrix[:2, :2] = transform_args[:2, :] matrix[1] *= -1 matrix[:, 1] *= -1 for mob in mobject.family_members_with_points(): if config[""renderer""] == ""opengl"": mob.points = np.dot(mob.points, matrix) else: mob.points = np.dot(mob.points, matrix) mobject.shift(x * RIGHT + y * UP) elif op_name == ""scale"": scale_values = op_args if len(scale_values) == 2: scale_x, scale_y = scale_values mobject.scale(np.array([scale_x, scale_y, 1]), about_point=ORIGIN) elif len(scale_values) == 1: scale = scale_values[0] mobject.scale(np.array([scale, scale, 1]), about_point=ORIGIN) elif op_name == ""translate"": if len(op_args) == 2: x, y = op_args else: x = op_args y = 0 mobject.shift(x * RIGHT + y * DOWN) else: # TODO: handle rotate, skewX and skewY # for now adding a warning message logger.warning( ""Handling of %s transform is not supported yet!"", op_name, ) ``` ###Assistant : Applies the SVG transform to the specified mobject. Transforms include: ``matrix``, ``translate``, and ``scale``. Parameters ---------- element : :class:`minidom.Element` The transform command to perform mobject : :class:`Mobject` The Mobject to transform. " 1111,"def pro_data_fixture(): return json.loads(load_fixture(""data.json"", ""airvisual_pro"")) @pytest.fixture(name=""pro"")",Define an update coordinator data example for the Pro.,9,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def pro_data_fixture(): return json.loads(load_fixture(""data.json"", ""airvisual_pro"")) @pytest.fixture(name=""pro"") ``` ###Assistant : Define an update coordinator data example for the Pro. " 1112,"def fetch_species_distributions(*, data_home=None, download_if_missing=True): data_home = get_data_home(data_home) if not exists(data_home): makedirs(data_home) # Define parameters for the data files. These should not be changed # unless the data model changes. They will be saved in the npz file # with the downloaded data. extra_params = dict( x_left_lower_corner=-94.8, Nx=1212, y_left_lower_corner=-56.05, Ny=1592, grid_size=0.05, ) dtype = np.int16 archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME) if not exists(archive_path): if not download_if_missing: raise IOError(""Data not found and `download_if_missing` is False"") logger.info(""Downloading species data from %s to %s"" % (SAMPLES.url, data_home)) samples_path = _fetch_remote(SAMPLES, dirname=data_home) with np.load(samples_path) as X: # samples.zip is a valid npz for f in X.files: fhandle = BytesIO(X[f]) if ""train"" in f: train = _load_csv(fhandle) if ""test"" in f: test = _load_csv(fhandle) remove(samples_path) logger.info( ""Downloading coverage data from %s to %s"" % (COVERAGES.url, data_home) ) coverages_path = _fetch_remote(COVERAGES, dirname=data_home) with np.load(coverages_path) as X: # coverages.zip is a valid npz coverages = [] for f in X.files: fhandle = BytesIO(X[f]) logger.debug("" - converting {}"".format(f)) coverages.append(_load_coverage(fhandle)) coverages = np.asarray(coverages, dtype=dtype) remove(coverages_path) bunch = Bunch(coverages=coverages, test=test, train=train, **extra_params) joblib.dump(bunch, archive_path, compress=9) else: bunch = joblib.load(archive_path) return bunch ","Loader for species distribution dataset from Phillips et. al. (2006). Read more in the :ref:`User Guide `. Parameters ---------- data_home : str, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing : bool, default=True If False, raise a IOError if the data is not locally available instead of trying to download the data from the source site. Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. coverages : array, shape = [14, 1592, 1212] These represent the 14 features measured at each point of the map grid. The latitude/longitude values for the grid are discussed below. Missing data is represented by the value -9999. train : record array, shape = (1624,) The training points for the data. Each point has three fields: - train['species'] is the species name - train['dd long'] is the longitude, in degrees - train['dd lat'] is the latitude, in degrees test : record array, shape = (620,) The test points for the data. Same format as the training data. Nx, Ny : integers The number of longitudes (x) and latitudes (y) in the grid x_left_lower_corner, y_left_lower_corner : floats The (x,y) position of the lower-left corner, in degrees grid_size : float The spacing between points of the grid, in degrees Notes ----- This dataset represents the geographic distribution of species. The dataset is provided by Phillips et. al. (2006). The two species are: - `""Bradypus variegatus"" `_ , the Brown-throated Sloth. - `""Microryzomys minutus"" `_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. - For an example of using this dataset with scikit-learn, see :ref:`examples/applications/plot_species_distribution_modeling.py `. References ---------- * `""Maximum entropy modeling of species geographic distributions"" `_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. ",310,179,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fetch_species_distributions(*, data_home=None, download_if_missing=True): data_home = get_data_home(data_home) if not exists(data_home): makedirs(data_home) # Define parameters for the data files. These should not be changed # unless the data model changes. They will be saved in the npz file # with the downloaded data. extra_params = dict( x_left_lower_corner=-94.8, Nx=1212, y_left_lower_corner=-56.05, Ny=1592, grid_size=0.05, ) dtype = np.int16 archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME) if not exists(archive_path): if not download_if_missing: raise IOError(""Data not found and `download_if_missing` is False"") logger.info(""Downloading species data from %s to %s"" % (SAMPLES.url, data_home)) samples_path = _fetch_remote(SAMPLES, dirname=data_home) with np.load(samples_path) as X: # samples.zip is a valid npz for f in X.files: fhandle = BytesIO(X[f]) if ""train"" in f: train = _load_csv(fhandle) if ""test"" in f: test = _load_csv(fhandle) remove(samples_path) logger.info( ""Downloading coverage data from %s to %s"" % (COVERAGES.url, data_home) ) coverages_path = _fetch_remote(COVERAGES, dirname=data_home) with np.load(coverages_path) as X: # coverages.zip is a valid npz coverages = [] for f in X.files: fhandle = BytesIO(X[f]) logger.debug("" - converting {}"".format(f)) coverages.append(_load_coverage(fhandle)) coverages = np.asarray(coverages, dtype=dtype) remove(coverages_path) bunch = Bunch(coverages=coverages, test=test, train=train, **extra_params) joblib.dump(bunch, archive_path, compress=9) else: bunch = joblib.load(archive_path) return bunch ``` ###Assistant : Loader for species distribution dataset from Phillips et. al. (2006). Read more in the :ref:`User Guide `. Parameters ---------- data_home : str, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing : bool, default=True If False, raise a IOError if the data is not locally available instead of trying to download the data from the source site. Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. coverages : array, shape = [14, 1592, 1212] These represent the 14 features measured at each point of the map grid. The latitude/longitude values for the grid are discussed below. Missing data is represented by the value -9999. train : record array, shape = (1624,) The training points for the data. Each point has three fields: - train['species'] is the species name - train['dd long'] is the longitude, in degrees - train['dd lat'] is the latitude, in degrees test : record array, shape = (620,) The test points for the data. Same format as the training data. Nx, Ny : integers The number of longitudes (x) and latitudes (y) in the grid x_left_lower_corner, y_left_lower_corner : floats The (x,y) position of the lower-left corner, in degrees grid_size : float The spacing between points of the grid, in degrees Notes ----- This dataset represents the geographic distribution of species. The dataset is provided by Phillips et. al. (2006). The two species are: - `""Bradypus variegatus"" `_ , the Brown-throated Sloth. - `""Microryzomys minutus"" `_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. - For an example of using this dataset with scikit-learn, see :ref:`examples/applications/plot_species_distribution_modeling.py `. References ---------- * `""Maximum entropy modeling of species geographic distributions"" `_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. " 1113,"def set_split_factor(factor, dev=None): assert 0 <= factor global split_factors dev = ivy.default(dev, default_device()) split_factors[dev] = factor # noinspection PyShadowingNames"," Set the global split factor for a given device, which can be used to scale batch splitting chunk sizes for the device across the codebase. :param factor: The factor to set the device-specific split factor to. :type factor: float :param dev: The device to set the split factor for. Sets the default device by default. :type dev: str, optional ",59,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_split_factor(factor, dev=None): assert 0 <= factor global split_factors dev = ivy.default(dev, default_device()) split_factors[dev] = factor # noinspection PyShadowingNames ``` ###Assistant : Set the global split factor for a given device, which can be used to scale batch splitting chunk sizes for the device across the codebase. :param factor: The factor to set the device-specific split factor to. :type factor: float :param dev: The device to set the split factor for. Sets the default device by default. :type dev: str, optional " 1114,"def run(self, test, compileflags=None, out=None, clear_globs=True): self.test = test # Remove ``` from the end of example, which may appear in Markdown # files for example in test.examples: example.want = example.want.replace('```\n', '') example.exc_msg = example.exc_msg and example.exc_msg.replace('```\n', '') if compileflags is None: compileflags = pdoctest._extract_future_flags(test.globs) save_stdout = sys.stdout if out is None: out = save_stdout.write sys.stdout = self._fakeout # Patch pdb.set_trace to restore sys.stdout during interactive # debugging (so it's not still redirected to self._fakeout). # Note that the interactive output will go to *our* # save_stdout, even if that's not the real sys.stdout; this # allows us to write test cases for the set_trace behavior. save_set_trace = pdb.set_trace self.debugger = pdoctest._OutputRedirectingPdb(save_stdout) self.debugger.reset() pdb.set_trace = self.debugger.set_trace # Patch linecache.getlines, so we can see the example's source # when we're inside the debugger. self.save_linecache_getlines = pdoctest.linecache.getlines linecache.getlines = self.__patched_linecache_getlines # Fail for deprecation warnings with raise_on_deprecated(): try: return self.__run(test, compileflags, out) finally: sys.stdout = save_stdout pdb.set_trace = save_set_trace linecache.getlines = self.save_linecache_getlines if clear_globs: test.globs.clear() # We have to override the name mangled methods. monkeypatched_methods = [ 'patched_linecache_getlines', 'run', 'record_outcome' ] for method in monkeypatched_methods: oldname = '_DocTestRunner__' + method newname = '_SymPyDocTestRunner__' + method setattr(SymPyDocTestRunner, newname, getattr(DocTestRunner, oldname)) "," Run the examples in ``test``, and display the results using the writer function ``out``. The examples are run in the namespace ``test.globs``. If ``clear_globs`` is true (the default), then this namespace will be cleared after the test runs, to help with garbage collection. If you would like to examine the namespace after the test completes, then use ``clear_globs=False``. ``compileflags`` gives the set of flags that should be used by the Python compiler when running the examples. If not specified, then it will default to the set of future-import flags that apply to ``globs``. The output of each example is checked using ``SymPyDocTestRunner.check_output``, and the results are formatted by the ``SymPyDocTestRunner.report_*`` methods. ",111,197,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def run(self, test, compileflags=None, out=None, clear_globs=True): self.test = test # Remove ``` from the end of example, which may appear in Markdown # files for example in test.examples: example.want = example.want.replace('```\n', '') example.exc_msg = example.exc_msg and example.exc_msg.replace('```\n', '') if compileflags is None: compileflags = pdoctest._extract_future_flags(test.globs) save_stdout = sys.stdout if out is None: out = save_stdout.write sys.stdout = self._fakeout # Patch pdb.set_trace to restore sys.stdout during interactive # debugging (so it's not still redirected to self._fakeout). # Note that the interactive output will go to *our* # save_stdout, even if that's not the real sys.stdout; this # allows us to write test cases for the set_trace behavior. save_set_trace = pdb.set_trace self.debugger = pdoctest._OutputRedirectingPdb(save_stdout) self.debugger.reset() pdb.set_trace = self.debugger.set_trace # Patch linecache.getlines, so we can see the example's source # when we're inside the debugger. self.save_linecache_getlines = pdoctest.linecache.getlines linecache.getlines = self.__patched_linecache_getlines # Fail for deprecation warnings with raise_on_deprecated(): try: return self.__run(test, compileflags, out) finally: sys.stdout = save_stdout pdb.set_trace = save_set_trace linecache.getlines = self.save_linecache_getlines if clear_globs: test.globs.clear() # We have to override the name mangled methods. monkeypatched_methods = [ 'patched_linecache_getlines', 'run', 'record_outcome' ] for method in monkeypatched_methods: oldname = '_DocTestRunner__' + method newname = '_SymPyDocTestRunner__' + method setattr(SymPyDocTestRunner, newname, getattr(DocTestRunner, oldname)) ``` ###Assistant : Run the examples in ``test``, and display the results using the writer function ``out``. The examples are run in the namespace ``test.globs``. If ``clear_globs`` is true (the default), then this namespace will be cleared after the test runs, to help with garbage collection. If you would like to examine the namespace after the test completes, then use ``clear_globs=False``. ``compileflags`` gives the set of flags that should be used by the Python compiler when running the examples. If not specified, then it will default to the set of future-import flags that apply to ``globs``. The output of each example is checked using ``SymPyDocTestRunner.check_output``, and the results are formatted by the ``SymPyDocTestRunner.report_*`` methods. " 1115,"def _fetch_all_variants(client, variables={}, permissions=None): query = response = client.post_graphql( query, variables, permissions=permissions, check_no_permissions=False ) content = get_graphql_content(response) return content[""data""][""productVariants""] "," query fetchAllVariants($channel: String) { productVariants(first: 10, channel: $channel) { totalCount edges { node { id } } } } ",19,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _fetch_all_variants(client, variables={}, permissions=None): query = response = client.post_graphql( query, variables, permissions=permissions, check_no_permissions=False ) content = get_graphql_content(response) return content[""data""][""productVariants""] ``` ###Assistant : query fetchAllVariants($channel: String) { productVariants(first: 10, channel: $channel) { totalCount edges { node { id } } } } " 1116,"def astar_torus(grid, start_node, goal_node): colors = ['white', 'black', 'red', 'pink', 'yellow', 'green', 'orange'] levels = [0, 1, 2, 3, 4, 5, 6, 7] cmap, norm = from_levels_and_colors(levels, colors) grid[start_node] = 4 grid[goal_node] = 5 parent_map = [[() for _ in range(M)] for _ in range(M)] heuristic_map = calc_heuristic_map(M, goal_node) explored_heuristic_map = np.full((M, M), np.inf) distance_map = np.full((M, M), np.inf) explored_heuristic_map[start_node] = heuristic_map[start_node] distance_map[start_node] = 0 while True: grid[start_node] = 4 grid[goal_node] = 5 current_node = np.unravel_index( np.argmin(explored_heuristic_map, axis=None), explored_heuristic_map.shape) min_distance = np.min(explored_heuristic_map) if (current_node == goal_node) or np.isinf(min_distance): break grid[current_node] = 2 explored_heuristic_map[current_node] = np.inf i, j = current_node[0], current_node[1] neighbors = find_neighbors(i, j) for neighbor in neighbors: if grid[neighbor] == 0 or grid[neighbor] == 5: distance_map[neighbor] = distance_map[current_node] + 1 explored_heuristic_map[neighbor] = heuristic_map[neighbor] parent_map[neighbor[0]][neighbor[1]] = current_node grid[neighbor] = 3 if np.isinf(explored_heuristic_map[goal_node]): route = [] print(""No route found."") else: route = [goal_node] while parent_map[route[0][0]][route[0][1]] != (): route.insert(0, parent_map[route[0][0]][route[0][1]]) print(""The route found covers %d grid cells."" % len(route)) for i in range(1, len(route)): grid[route[i]] = 6 plt.cla() # for stopping simulation with the esc key. plt.gcf().canvas.mpl_connect('key_release_event', lambda event: [exit(0) if event.key == 'escape' else None]) plt.imshow(grid, cmap=cmap, norm=norm, interpolation=None) plt.show() plt.pause(1e-2) return route "," Finds a path between an initial and goal joint configuration using the A* Algorithm on a tororiadal grid. Args: grid: An occupancy grid (ndarray) start_node: Initial joint configuration (tuple) goal_node: Goal joint configuration (tuple) Returns: Obstacle-free route in joint space from start_node to goal_node ",44,192,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def astar_torus(grid, start_node, goal_node): colors = ['white', 'black', 'red', 'pink', 'yellow', 'green', 'orange'] levels = [0, 1, 2, 3, 4, 5, 6, 7] cmap, norm = from_levels_and_colors(levels, colors) grid[start_node] = 4 grid[goal_node] = 5 parent_map = [[() for _ in range(M)] for _ in range(M)] heuristic_map = calc_heuristic_map(M, goal_node) explored_heuristic_map = np.full((M, M), np.inf) distance_map = np.full((M, M), np.inf) explored_heuristic_map[start_node] = heuristic_map[start_node] distance_map[start_node] = 0 while True: grid[start_node] = 4 grid[goal_node] = 5 current_node = np.unravel_index( np.argmin(explored_heuristic_map, axis=None), explored_heuristic_map.shape) min_distance = np.min(explored_heuristic_map) if (current_node == goal_node) or np.isinf(min_distance): break grid[current_node] = 2 explored_heuristic_map[current_node] = np.inf i, j = current_node[0], current_node[1] neighbors = find_neighbors(i, j) for neighbor in neighbors: if grid[neighbor] == 0 or grid[neighbor] == 5: distance_map[neighbor] = distance_map[current_node] + 1 explored_heuristic_map[neighbor] = heuristic_map[neighbor] parent_map[neighbor[0]][neighbor[1]] = current_node grid[neighbor] = 3 if np.isinf(explored_heuristic_map[goal_node]): route = [] print(""No route found."") else: route = [goal_node] while parent_map[route[0][0]][route[0][1]] != (): route.insert(0, parent_map[route[0][0]][route[0][1]]) print(""The route found covers %d grid cells."" % len(route)) for i in range(1, len(route)): grid[route[i]] = 6 plt.cla() # for stopping simulation with the esc key. plt.gcf().canvas.mpl_connect('key_release_event', lambda event: [exit(0) if event.key == 'escape' else None]) plt.imshow(grid, cmap=cmap, norm=norm, interpolation=None) plt.show() plt.pause(1e-2) return route ``` ###Assistant : Finds a path between an initial and goal joint configuration using the A* Algorithm on a tororiadal grid. Args: grid: An occupancy grid (ndarray) start_node: Initial joint configuration (tuple) goal_node: Goal joint configuration (tuple) Returns: Obstacle-free route in joint space from start_node to goal_node " 1117,"def root_node(self) -> Optional[str]: if len(self.graph.nodes) < 1: return None return list(self.graph.nodes)[0] # List conversion is required, see networkx docs "," Returns the root node of the pipeline's graph. ",8,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def root_node(self) -> Optional[str]: if len(self.graph.nodes) < 1: return None return list(self.graph.nodes)[0] # List conversion is required, see networkx docs ``` ###Assistant : Returns the root node of the pipeline's graph. " 1118,"def as_coefficients_dict(self, *syms): if not syms: d = defaultdict(list) for ai in self.args: c, m = ai.as_coeff_Mul() d[m].append(c) for k, v in d.items(): if len(v) == 1: d[k] = v[0] else: d[k] = Add(*v) di = defaultdict(int) di.update(d) return di else: d = defaultdict(list) ind, dep = self.as_independent(*syms, as_Add=True) for i in Add.make_args(dep): c, x = i.as_independent(*syms, as_Add=False) d[x].append(c) d = {k: Add(*d[k]) for k in d} d.update({S.One: ind}) return d ","Return a dictionary mapping terms to their Rational coefficient. Since the dictionary is a defaultdict, inquiries about terms which were not present will return a coefficient of 0. If an expression is not an Add it is considered to have a single term. If symbols `syms` are provided, any multiplicative terms independent of them will be considered a coefficient and a regular dictionary of syms-dependent generators as keys and their corresponding coefficients as values will be returned. Examples ======== >>> from sympy import exp >>> from sympy.abc import a, x >>> (3*x + a*x + 4).as_coefficients_dict() {1: 4, x: 3, a*x: 1} >>> _[a] 0 >>> (3*a*x).as_coefficients_dict() {a*x: 3} >>> (3*exp(x)*x + a/x + 2).as_coefficients_dict(x) {1: 2, 1/x: a, x*exp(x): 3} ",121,71,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def as_coefficients_dict(self, *syms): if not syms: d = defaultdict(list) for ai in self.args: c, m = ai.as_coeff_Mul() d[m].append(c) for k, v in d.items(): if len(v) == 1: d[k] = v[0] else: d[k] = Add(*v) di = defaultdict(int) di.update(d) return di else: d = defaultdict(list) ind, dep = self.as_independent(*syms, as_Add=True) for i in Add.make_args(dep): c, x = i.as_independent(*syms, as_Add=False) d[x].append(c) d = {k: Add(*d[k]) for k in d} d.update({S.One: ind}) return d ``` ###Assistant : Return a dictionary mapping terms to their Rational coefficient. Since the dictionary is a defaultdict, inquiries about terms which were not present will return a coefficient of 0. If an expression is not an Add it is considered to have a single term. If symbols `syms` are provided, any multiplicative terms independent of them will be considered a coefficient and a regular dictionary of syms-dependent generators as keys and their corresponding coefficients as values will be returned. Examples ======== >>> from sympy import exp >>> from sympy.abc import a, x >>> (3*x + a*x + 4).as_coefficients_dict() {1: 4, x: 3, a*x: 1} >>> _[a] 0 >>> (3*a*x).as_coefficients_dict() {a*x: 3} >>> (3*exp(x)*x + a/x + 2).as_coefficients_dict(x) {1: 2, 1/x: a, x*exp(x): 3} " 1119,"def verify_emoji_code_foreign_keys(self) -> None: dct = {} for row in RealmEmoji.objects.all(): dct[row.id] = row if not dct: raise AssertionError(""test needs RealmEmoji rows"") count = 0 for row in Reaction.objects.filter(reaction_type=Reaction.REALM_EMOJI): realm_emoji_id = int(row.emoji_code) assert realm_emoji_id in dct self.assertEqual(dct[realm_emoji_id].name, row.emoji_name) self.assertEqual(dct[realm_emoji_id].realm_id, row.user_profile.realm_id) count += 1 for row in UserStatus.objects.filter(reaction_type=UserStatus.REALM_EMOJI): realm_emoji_id = int(row.emoji_code) assert realm_emoji_id in dct self.assertEqual(dct[realm_emoji_id].name, row.emoji_name) self.assertEqual(dct[realm_emoji_id].realm_id, row.user_profile.realm_id) count += 1 if count == 0: raise AssertionError(""test is meaningless without any pertinent rows"") "," DB tables that refer to RealmEmoji use int(emoji_code) as the foreign key. Those tables tend to de-normalize emoji_name due to our inheritance-based setup. This helper makes sure those invariants are intact, which is particularly tricky during the import/export process (or during conversions from things like Slack/RocketChat/MatterMost/etc.). ",46,73,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def verify_emoji_code_foreign_keys(self) -> None: dct = {} for row in RealmEmoji.objects.all(): dct[row.id] = row if not dct: raise AssertionError(""test needs RealmEmoji rows"") count = 0 for row in Reaction.objects.filter(reaction_type=Reaction.REALM_EMOJI): realm_emoji_id = int(row.emoji_code) assert realm_emoji_id in dct self.assertEqual(dct[realm_emoji_id].name, row.emoji_name) self.assertEqual(dct[realm_emoji_id].realm_id, row.user_profile.realm_id) count += 1 for row in UserStatus.objects.filter(reaction_type=UserStatus.REALM_EMOJI): realm_emoji_id = int(row.emoji_code) assert realm_emoji_id in dct self.assertEqual(dct[realm_emoji_id].name, row.emoji_name) self.assertEqual(dct[realm_emoji_id].realm_id, row.user_profile.realm_id) count += 1 if count == 0: raise AssertionError(""test is meaningless without any pertinent rows"") ``` ###Assistant : DB tables that refer to RealmEmoji use int(emoji_code) as the foreign key. Those tables tend to de-normalize emoji_name due to our inheritance-based setup. This helper makes sure those invariants are intact, which is particularly tricky during the import/export process (or during conversions from things like Slack/RocketChat/MatterMost/etc.). " 1120,"def format_string_to_json(balance_info): Working Account|KES|481000.00|481000.00|0.00|0.00 balance_dict = frappe._dict() for account_info in balance_info.split(""&""): account_info = account_info.split(""|"") balance_dict[account_info[0]] = dict( current_balance=fmt_money(account_info[2], currency=""KES""), available_balance=fmt_money(account_info[3], currency=""KES""), reserved_balance=fmt_money(account_info[4], currency=""KES""), uncleared_balance=fmt_money(account_info[5], currency=""KES""), ) return dumps(balance_dict) "," Format string to json. e.g: => {'Working Account': {'current_balance': '481000.00', 'available_balance': '481000.00', 'reserved_balance': '0.00', 'uncleared_balance': '0.00'}} ",16,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def format_string_to_json(balance_info): Working Account|KES|481000.00|481000.00|0.00|0.00 balance_dict = frappe._dict() for account_info in balance_info.split(""&""): account_info = account_info.split(""|"") balance_dict[account_info[0]] = dict( current_balance=fmt_money(account_info[2], currency=""KES""), available_balance=fmt_money(account_info[3], currency=""KES""), reserved_balance=fmt_money(account_info[4], currency=""KES""), uncleared_balance=fmt_money(account_info[5], currency=""KES""), ) return dumps(balance_dict) ``` ###Assistant : Format string to json. e.g: => {'Working Account': {'current_balance': '481000.00', 'available_balance': '481000.00', 'reserved_balance': '0.00', 'uncleared_balance': '0.00'}} " 1121,"def _validate_path(self) -> list[Any]: msg = ( ""xpath does not return any nodes or attributes. "" ""Be sure to specify in `xpath` the parent nodes of "" ""children and attributes to parse. "" ""If document uses namespaces denoted with "" ""xmlns, be sure to define namespaces and "" ""use them in xpath."" ) try: elems = self.xml_doc.findall(self.xpath, namespaces=self.namespaces) children = [ch for el in elems for ch in el.findall(""*"")] attrs = {k: v for el in elems for k, v in el.attrib.items()} if elems is None: raise ValueError(msg) if elems is not None: if self.elems_only and children == []: raise ValueError(msg) if self.attrs_only and attrs == {}: raise ValueError(msg) if children == [] and attrs == {}: raise ValueError(msg) except (KeyError, SyntaxError): raise SyntaxError( ""You have used an incorrect or unsupported XPath "" ""expression for etree library or you used an "" ""undeclared namespace prefix."" ) return elems "," Notes ----- `etree` supports limited XPath. If user attempts a more complex expression syntax error will raise. ",17,148,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _validate_path(self) -> list[Any]: msg = ( ""xpath does not return any nodes or attributes. "" ""Be sure to specify in `xpath` the parent nodes of "" ""children and attributes to parse. "" ""If document uses namespaces denoted with "" ""xmlns, be sure to define namespaces and "" ""use them in xpath."" ) try: elems = self.xml_doc.findall(self.xpath, namespaces=self.namespaces) children = [ch for el in elems for ch in el.findall(""*"")] attrs = {k: v for el in elems for k, v in el.attrib.items()} if elems is None: raise ValueError(msg) if elems is not None: if self.elems_only and children == []: raise ValueError(msg) if self.attrs_only and attrs == {}: raise ValueError(msg) if children == [] and attrs == {}: raise ValueError(msg) except (KeyError, SyntaxError): raise SyntaxError( ""You have used an incorrect or unsupported XPath "" ""expression for etree library or you used an "" ""undeclared namespace prefix."" ) return elems ``` ###Assistant : Notes ----- `etree` supports limited XPath. If user attempts a more complex expression syntax error will raise. " 1122,"def mask(self, row_labels, col_labels): new_obj = super().mask(row_labels, col_labels) if isinstance(row_labels, slice) and isinstance( self._length_cache, ObjectIDType ): new_obj._length_cache = compute_sliced_len.remote( row_labels, self._length_cache ) if isinstance(col_labels, slice) and isinstance( self._width_cache, ObjectIDType ): new_obj._width_cache = compute_sliced_len.remote( col_labels, self._width_cache ) return new_obj "," Lazily create a mask that extracts the indices provided. Parameters ---------- row_labels : list-like, slice or label The row labels for the rows to extract. col_labels : list-like, slice or label The column labels for the columns to extract. Returns ------- PandasOnRayDataframePartition A new ``PandasOnRayDataframePartition`` object. ",46,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mask(self, row_labels, col_labels): new_obj = super().mask(row_labels, col_labels) if isinstance(row_labels, slice) and isinstance( self._length_cache, ObjectIDType ): new_obj._length_cache = compute_sliced_len.remote( row_labels, self._length_cache ) if isinstance(col_labels, slice) and isinstance( self._width_cache, ObjectIDType ): new_obj._width_cache = compute_sliced_len.remote( col_labels, self._width_cache ) return new_obj ``` ###Assistant : Lazily create a mask that extracts the indices provided. Parameters ---------- row_labels : list-like, slice or label The row labels for the rows to extract. col_labels : list-like, slice or label The column labels for the columns to extract. Returns ------- PandasOnRayDataframePartition A new ``PandasOnRayDataframePartition`` object. " 1123,"def postprocess(data_out, label_list, top_k): output = [] for result in data_out: result_i = softmax(result) output_i = {} indexs = np.argsort(result_i)[::-1][0:top_k] for index in indexs: label = label_list[index].split(',')[0] output_i[label] = float(result_i[index]) output.append(output_i) return output "," Postprocess output of network, one image at a time. Args: data_out (numpy.ndarray): output data of network. label_list (list): list of label. top_k (int): Return top k results. ",27,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def postprocess(data_out, label_list, top_k): output = [] for result in data_out: result_i = softmax(result) output_i = {} indexs = np.argsort(result_i)[::-1][0:top_k] for index in indexs: label = label_list[index].split(',')[0] output_i[label] = float(result_i[index]) output.append(output_i) return output ``` ###Assistant : Postprocess output of network, one image at a time. Args: data_out (numpy.ndarray): output data of network. label_list (list): list of label. top_k (int): Return top k results. " 1124,"def requests(self): if hasattr(self, '_requests'): return self._requests else: if not hasattr(self, 'requests_by_class'): self.requests_by_class = {} if self.__class__.__name__ not in self.requests_by_class: self.requests_by_class[self.__class__.__name__] = {} # we need to copy so that different instances with different (requests) in input do not disturb one another self._requests = copy.copy(self.requests_by_class[self.__class__.__name__]) return self._requests "," Get the request dictionary corresponding to this specific class :return: Returns the requests corresponding to the specific Executor instance class ",20,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def requests(self): if hasattr(self, '_requests'): return self._requests else: if not hasattr(self, 'requests_by_class'): self.requests_by_class = {} if self.__class__.__name__ not in self.requests_by_class: self.requests_by_class[self.__class__.__name__] = {} # we need to copy so that different instances with different (requests) in input do not disturb one another self._requests = copy.copy(self.requests_by_class[self.__class__.__name__]) return self._requests ``` ###Assistant : Get the request dictionary corresponding to this specific class :return: Returns the requests corresponding to the specific Executor instance class " 1125,"def __call__(self, *args, **kwargs): r result = super().__call__(*args, **kwargs) if isinstance(args[0], list) and all(isinstance(el, str) for el in args[0]): return [res[0] for res in result] return result "," Generate the output text(s) using text(s) given as inputs. Args: args (`str` or `List[str]`): Input text for the encoder. return_tensors (`bool`, *optional*, defaults to `False`): Whether or not to include the tensors of predictions (as token indices) in the outputs. return_text (`bool`, *optional*, defaults to `True`): Whether or not to include the decoded texts in the outputs. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the potential extra spaces in the text output. truncation (`TruncationStrategy`, *optional*, defaults to `TruncationStrategy.DO_NOT_TRUNCATE`): The truncation strategy for the tokenization within the pipeline. `TruncationStrategy.DO_NOT_TRUNCATE` (default) will never truncate, but it is sometimes desirable to truncate the input to fit the model's max_length instead of throwing an error down the line. generate_kwargs: Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework [here](./model#generative-models)). Return: A list or a list of list of `dict`: Each result comes as a dictionary with the following keys: - **generated_text** (`str`, present when `return_text=True`) -- The generated text. - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token ids of the generated text. ",188,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __call__(self, *args, **kwargs): r result = super().__call__(*args, **kwargs) if isinstance(args[0], list) and all(isinstance(el, str) for el in args[0]): return [res[0] for res in result] return result ``` ###Assistant : Generate the output text(s) using text(s) given as inputs. Args: args (`str` or `List[str]`): Input text for the encoder. return_tensors (`bool`, *optional*, defaults to `False`): Whether or not to include the tensors of predictions (as token indices) in the outputs. return_text (`bool`, *optional*, defaults to `True`): Whether or not to include the decoded texts in the outputs. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the potential extra spaces in the text output. truncation (`TruncationStrategy`, *optional*, defaults to `TruncationStrategy.DO_NOT_TRUNCATE`): The truncation strategy for the tokenization within the pipeline. `TruncationStrategy.DO_NOT_TRUNCATE` (default) will never truncate, but it is sometimes desirable to truncate the input to fit the model's max_length instead of throwing an error down the line. generate_kwargs: Additional keyword arguments to pass along to the generate method of the model (see the generate method corresponding to your framework [here](./model#generative-models)). Return: A list or a list of list of `dict`: Each result comes as a dictionary with the following keys: - **generated_text** (`str`, present when `return_text=True`) -- The generated text. - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token ids of the generated text. " 1126,"def rewind_body(prepared_request): body_seek = getattr(prepared_request.body, ""seek"", None) if body_seek is not None and isinstance( prepared_request._body_position, integer_types ): try: body_seek(prepared_request._body_position) except OSError: raise UnrewindableBodyError( ""An error occurred when rewinding request body for redirect."" ) else: raise UnrewindableBodyError(""Unable to rewind request body for redirect."") ","Move file pointer back to its recorded starting position so it can be read again on redirect. ",17,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rewind_body(prepared_request): body_seek = getattr(prepared_request.body, ""seek"", None) if body_seek is not None and isinstance( prepared_request._body_position, integer_types ): try: body_seek(prepared_request._body_position) except OSError: raise UnrewindableBodyError( ""An error occurred when rewinding request body for redirect."" ) else: raise UnrewindableBodyError(""Unable to rewind request body for redirect."") ``` ###Assistant : Move file pointer back to its recorded starting position so it can be read again on redirect. " 1127,"def nested_concat(tensors, new_tensors, padding_index=-100): assert type(tensors) == type( new_tensors ), f""Expected `tensors` and `new_tensors` to have the same type but found {type(tensors)} and {type(new_tensors)}."" if isinstance(tensors, (list, tuple)): return type(tensors)(nested_concat( t, n, padding_index=padding_index) for t, n in zip(tensors, new_tensors)) elif isinstance(tensors, paddle.Tensor): return paddle_pad_and_concatenate( tensors, new_tensors, padding_index=padding_index) elif isinstance(tensors, np.ndarray): return numpy_pad_and_concatenate( tensors, new_tensors, padding_index=padding_index) else: raise TypeError( f""Unsupported type for concatenation: got {type(tensors)}"") "," Concat the `new_tensors` to `tensors` on the first dim and pad them on the second if needed. Works for tensors or nested list/tuples of tensors. ",25,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def nested_concat(tensors, new_tensors, padding_index=-100): assert type(tensors) == type( new_tensors ), f""Expected `tensors` and `new_tensors` to have the same type but found {type(tensors)} and {type(new_tensors)}."" if isinstance(tensors, (list, tuple)): return type(tensors)(nested_concat( t, n, padding_index=padding_index) for t, n in zip(tensors, new_tensors)) elif isinstance(tensors, paddle.Tensor): return paddle_pad_and_concatenate( tensors, new_tensors, padding_index=padding_index) elif isinstance(tensors, np.ndarray): return numpy_pad_and_concatenate( tensors, new_tensors, padding_index=padding_index) else: raise TypeError( f""Unsupported type for concatenation: got {type(tensors)}"") ``` ###Assistant : Concat the `new_tensors` to `tensors` on the first dim and pad them on the second if needed. Works for tensors or nested list/tuples of tensors. " 1128,"def kubernetes_manifest(): template = Template( (prefect.__module_path__ / ""cli"" / ""templates"" / ""kubernetes.yaml"").read_text() ) manifest = template.substitute( { ""image_name"": get_prefect_image_name(), } ) print(manifest) "," Generates a kubernetes manifest for to deploy Orion to a cluster. Example: $ prefect orion kubernetes-manifest | kubectl apply -f - ",21,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def kubernetes_manifest(): template = Template( (prefect.__module_path__ / ""cli"" / ""templates"" / ""kubernetes.yaml"").read_text() ) manifest = template.substitute( { ""image_name"": get_prefect_image_name(), } ) print(manifest) ``` ###Assistant : Generates a kubernetes manifest for to deploy Orion to a cluster. Example: $ prefect orion kubernetes-manifest | kubectl apply -f - " 1129,"def test_queries_when_requested_project_is_head_of_trace(self, mock_query, mock_querybuilder): # Case A: Head of trace project self.login_as(self.user) heart = self.create_project( name=""Heart"", slug=""heart"", teams=[self.team], fire_project_created=True ) mock_query.side_effect = [ {""data"": [{""count()"": 1000}]}, ] mock_querybuilder.side_effect = [ { ""data"": [ { ""trace"": ""6503ee33b7bc43aead1facaa625a5dba"", ""id"": ""6ddc83ee612b4e89b95b5278c8fd188f"", ""random_number() AS random_number"": 4255299100, ""is_root"": 1, }, { ""trace"": ""6503ee33b7bc43aead1facaa625a5dba"", ""id"": ""0b127a578f8440c793f9ba1de595229f"", ""random_number() AS random_number"": 3976019453, ""is_root"": 1, }, ] }, { ""data"": [ { ""project"": self.project.slug, ""project_id"": self.project.id, ""count"": 2, ""root_count"": 2, }, { ""project"": heart.slug, ""project_id"": heart.id, ""count"": 1, ""root_count"": 0, }, ] }, ] end_time = timezone.now() start_time = end_time - timedelta(hours=1) query = ""environment:dev"" requested_sample_size = 2 calls = self.generate_fetch_transactions_count_query( query, start_time, end_time, requested_sample_size ) snuba_query_random_transactions = random_transactions_snuba_query( query, requested_sample_size, start_time, end_time, self.project ) snuba_query_project_stats = project_stats_snuba_query( query, start_time, end_time, self.project, trace_ids=[""6503ee33b7bc43aead1facaa625a5dba""] * 2, ) with Feature({""organizations:server-side-sampling"": True}): response = self.client.get( f""{self.endpoint}?sampleSize={requested_sample_size}&query={query}"" ) assert response.status_code == 200 assert mock_query.mock_calls == calls assert len(mock_querybuilder.call_args_list) == 2 self.assert_mocked_query_calls( snuba_query_random_transactions, snuba_query_project_stats, mock_querybuilder ) response_data = response.json() assert response_data[""projectBreakdown""] == [ {""project_id"": self.project.id, ""project"": self.project.slug, ""count()"": 2}, {""project_id"": heart.id, ""project"": heart.slug, ""count()"": 1}, ] assert response_data[""parentProjectBreakdown""] == [ {""project"": self.project.slug, ""projectId"": self.project.id, ""percentage"": 1.0} ] "," Case A: Requesting for a project (bar) that is root but is a head of distributed traces Example of smart query response (DYNAMIC_SAMPLING_DISTRIBUTION_FETCH_PROJECT_STATS): |---------+-------+------| | project | count | root | |---------+-------+------| | bar | 100 | 100 | | heart | 5 | 0 | |---------+-------+------| ",47,183,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_queries_when_requested_project_is_head_of_trace(self, mock_query, mock_querybuilder): # Case A: Head of trace project self.login_as(self.user) heart = self.create_project( name=""Heart"", slug=""heart"", teams=[self.team], fire_project_created=True ) mock_query.side_effect = [ {""data"": [{""count()"": 1000}]}, ] mock_querybuilder.side_effect = [ { ""data"": [ { ""trace"": ""6503ee33b7bc43aead1facaa625a5dba"", ""id"": ""6ddc83ee612b4e89b95b5278c8fd188f"", ""random_number() AS random_number"": 4255299100, ""is_root"": 1, }, { ""trace"": ""6503ee33b7bc43aead1facaa625a5dba"", ""id"": ""0b127a578f8440c793f9ba1de595229f"", ""random_number() AS random_number"": 3976019453, ""is_root"": 1, }, ] }, { ""data"": [ { ""project"": self.project.slug, ""project_id"": self.project.id, ""count"": 2, ""root_count"": 2, }, { ""project"": heart.slug, ""project_id"": heart.id, ""count"": 1, ""root_count"": 0, }, ] }, ] end_time = timezone.now() start_time = end_time - timedelta(hours=1) query = ""environment:dev"" requested_sample_size = 2 calls = self.generate_fetch_transactions_count_query( query, start_time, end_time, requested_sample_size ) snuba_query_random_transactions = random_transactions_snuba_query( query, requested_sample_size, start_time, end_time, self.project ) snuba_query_project_stats = project_stats_snuba_query( query, start_time, end_time, self.project, trace_ids=[""6503ee33b7bc43aead1facaa625a5dba""] * 2, ) with Feature({""organizations:server-side-sampling"": True}): response = self.client.get( f""{self.endpoint}?sampleSize={requested_sample_size}&query={query}"" ) assert response.status_code == 200 assert mock_query.mock_calls == calls assert len(mock_querybuilder.call_args_list) == 2 self.assert_mocked_query_calls( snuba_query_random_transactions, snuba_query_project_stats, mock_querybuilder ) response_data = response.json() assert response_data[""projectBreakdown""] == [ {""project_id"": self.project.id, ""project"": self.project.slug, ""count()"": 2}, {""project_id"": heart.id, ""project"": heart.slug, ""count()"": 1}, ] assert response_data[""parentProjectBreakdown""] == [ {""project"": self.project.slug, ""projectId"": self.project.id, ""percentage"": 1.0} ] ``` ###Assistant : Case A: Requesting for a project (bar) that is root but is a head of distributed traces Example of smart query response (DYNAMIC_SAMPLING_DISTRIBUTION_FETCH_PROJECT_STATS): |---------+-------+------| | project | count | root | |---------+-------+------| | bar | 100 | 100 | | heart | 5 | 0 | |---------+-------+------| " 1130,"def validate_leave_allocation_against_leave_application(ledger): leave_application_records = frappe.db.sql_list( , (ledger.employee, ledger.leave_type, ledger.from_date, ledger.to_date), ) if leave_application_records: frappe.throw( _(""Leave allocation {0} is linked with the Leave Application {1}"").format( ledger.transaction_name, "", "".join(leave_application_records) ) ) ","Checks that leave allocation has no leave application against it SELECT transaction_name FROM `tabLeave Ledger Entry` WHERE employee=%s AND leave_type=%s AND transaction_type='Leave Application' AND from_date>=%s AND to_date<=%s ",27,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def validate_leave_allocation_against_leave_application(ledger): leave_application_records = frappe.db.sql_list( , (ledger.employee, ledger.leave_type, ledger.from_date, ledger.to_date), ) if leave_application_records: frappe.throw( _(""Leave allocation {0} is linked with the Leave Application {1}"").format( ledger.transaction_name, "", "".join(leave_application_records) ) ) ``` ###Assistant : Checks that leave allocation has no leave application against it SELECT transaction_name FROM `tabLeave Ledger Entry` WHERE employee=%s AND leave_type=%s AND transaction_type='Leave Application' AND from_date>=%s AND to_date<=%s " 1131,"def dce_rpc_endianess(pkt): try: endianness = pkt.underlayer.endian except AttributeError: # handle the case where a PNIO class is # built without its DCE-RPC under-layer # i.e there is no endianness indication return ""!"" if endianness == 0: # big endian return "">"" elif endianness == 1: # little endian return ""<"" else: return ""!"" ",determine the symbol for the endianness of a the DCE/RPC,10,53,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dce_rpc_endianess(pkt): try: endianness = pkt.underlayer.endian except AttributeError: # handle the case where a PNIO class is # built without its DCE-RPC under-layer # i.e there is no endianness indication return ""!"" if endianness == 0: # big endian return "">"" elif endianness == 1: # little endian return ""<"" else: return ""!"" ``` ###Assistant : determine the symbol for the endianness of a the DCE/RPC " 1132,"def _get_extra_hosts(self, docker_client) -> Dict[str, str]: if sys.platform == ""linux"" and ( # Do not warn if the user has specified a host manually that does not use # a local address ""PREFECT_API_URL"" not in self.env or re.search( "".*(localhost)|(127.0.0.1)|(host.docker.internal).*"", self.env[""PREFECT_API_URL""], ) ): user_version = packaging.version.parse(docker_client.version()[""Version""]) required_version = packaging.version.parse(""20.10.0"") if user_version < required_version: warnings.warn( ""`host.docker.internal` could not be automatically resolved to your "" ""local ip address. This feature is not supported on Docker Engine "" f""v{user_version}, upgrade to v{required_version}+ if you "" ""encounter issues."" ) return {} else: # Compatibility for linux -- https://github.com/docker/cli/issues/2290 # Only supported by Docker v20.10.0+ which is our minimum recommend version return {""host.docker.internal"": ""host-gateway""} "," A host.docker.internal -> host-gateway mapping is necessary for communicating with the API on Linux machines. Docker Desktop on macOS will automatically already have this mapping. ",25,108,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_extra_hosts(self, docker_client) -> Dict[str, str]: if sys.platform == ""linux"" and ( # Do not warn if the user has specified a host manually that does not use # a local address ""PREFECT_API_URL"" not in self.env or re.search( "".*(localhost)|(127.0.0.1)|(host.docker.internal).*"", self.env[""PREFECT_API_URL""], ) ): user_version = packaging.version.parse(docker_client.version()[""Version""]) required_version = packaging.version.parse(""20.10.0"") if user_version < required_version: warnings.warn( ""`host.docker.internal` could not be automatically resolved to your "" ""local ip address. This feature is not supported on Docker Engine "" f""v{user_version}, upgrade to v{required_version}+ if you "" ""encounter issues."" ) return {} else: # Compatibility for linux -- https://github.com/docker/cli/issues/2290 # Only supported by Docker v20.10.0+ which is our minimum recommend version return {""host.docker.internal"": ""host-gateway""} ``` ###Assistant : A host.docker.internal -> host-gateway mapping is necessary for communicating with the API on Linux machines. Docker Desktop on macOS will automatically already have this mapping. " 1133,"def test_del_store(): with patch(""salt.modules.win_certutil.get_cert_serial"") as cert_serial_mock: cmd_mock = MagicMock( return_value=( ""CertInfo\r\n"" ""================ Certificate 0 ================\r\n"" ""Serial Number: 180720d39cd2db3244ba037417241e90\r\n"" ""OtherStuff"" ) ) cache_mock = MagicMock(return_value=""/tmp/cert.cer"") cert_serial_mock.return_value = ""ABCDEF"" with patch.dict( certutil.__salt__, {""cmd.run"": cmd_mock, ""cp.cache_file"": cache_mock} ), patch(""os.path.exists"", MagicMock(return_value=True)): certutil.del_store(""salt://path/to/file"", ""TrustedPublisher"") cmd_mock.assert_called_once_with( 'certutil.exe -delstore TrustedPublisher ""ABCDEF""' ) cache_mock.assert_called_once_with(""salt://path/to/file"", ""base"") "," Test removing a certificate to a specific store ",8,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_del_store(): with patch(""salt.modules.win_certutil.get_cert_serial"") as cert_serial_mock: cmd_mock = MagicMock( return_value=( ""CertInfo\r\n"" ""================ Certificate 0 ================\r\n"" ""Serial Number: 180720d39cd2db3244ba037417241e90\r\n"" ""OtherStuff"" ) ) cache_mock = MagicMock(return_value=""/tmp/cert.cer"") cert_serial_mock.return_value = ""ABCDEF"" with patch.dict( certutil.__salt__, {""cmd.run"": cmd_mock, ""cp.cache_file"": cache_mock} ), patch(""os.path.exists"", MagicMock(return_value=True)): certutil.del_store(""salt://path/to/file"", ""TrustedPublisher"") cmd_mock.assert_called_once_with( 'certutil.exe -delstore TrustedPublisher ""ABCDEF""' ) cache_mock.assert_called_once_with(""salt://path/to/file"", ""base"") ``` ###Assistant : Test removing a certificate to a specific store " 1134,"def _select_backend(config): backend_arg = config.getoption('--qute-backend') backend_env = os.environ.get('QUTE_TESTS_BACKEND') backend = backend_arg or backend_env or _auto_select_backend() # Fail early if selected backend is not available # pylint: disable=unused-import if backend == 'webkit': import PyQt5.QtWebKitWidgets elif backend == 'webengine': import PyQt5.QtWebEngineWidgets else: raise utils.Unreachable(backend) return backend ","Select the backend for running tests. The backend is auto-selected in the following manner: 1. Use QtWebKit if available 2. Otherwise use QtWebEngine as a fallback Auto-selection is overridden by either passing a backend via `--qute-backend=` or setting the environment variable `QUTE_TESTS_BACKEND=`. Args: config: pytest config Raises: ImportError if the selected backend is not available. Returns: The selected backend as a string (e.g. 'webkit'). ",64,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _select_backend(config): backend_arg = config.getoption('--qute-backend') backend_env = os.environ.get('QUTE_TESTS_BACKEND') backend = backend_arg or backend_env or _auto_select_backend() # Fail early if selected backend is not available # pylint: disable=unused-import if backend == 'webkit': import PyQt5.QtWebKitWidgets elif backend == 'webengine': import PyQt5.QtWebEngineWidgets else: raise utils.Unreachable(backend) return backend ``` ###Assistant : Select the backend for running tests. The backend is auto-selected in the following manner: 1. Use QtWebKit if available 2. Otherwise use QtWebEngine as a fallback Auto-selection is overridden by either passing a backend via `--qute-backend=` or setting the environment variable `QUTE_TESTS_BACKEND=`. Args: config: pytest config Raises: ImportError if the selected backend is not available. Returns: The selected backend as a string (e.g. 'webkit'). " 1135,"def equals(self, other): from sympy.logic.inference import satisfiable from sympy.core.relational import Relational if self.has(Relational) or other.has(Relational): raise NotImplementedError('handling of relationals') return self.atoms() == other.atoms() and \ not satisfiable(Not(Equivalent(self, other))) "," Returns True if the given formulas have the same truth table. For two formulas to be equal they must have the same literals. Examples ======== >>> from sympy.abc import A, B, C >>> from sympy import And, Or, Not >>> (A >> B).equals(~B >> ~A) True >>> Not(And(A, B, C)).equals(And(Not(A), Not(B), Not(C))) False >>> Not(And(A, Not(A))).equals(Or(B, Not(B))) False ",58,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def equals(self, other): from sympy.logic.inference import satisfiable from sympy.core.relational import Relational if self.has(Relational) or other.has(Relational): raise NotImplementedError('handling of relationals') return self.atoms() == other.atoms() and \ not satisfiable(Not(Equivalent(self, other))) ``` ###Assistant : Returns True if the given formulas have the same truth table. For two formulas to be equal they must have the same literals. Examples ======== >>> from sympy.abc import A, B, C >>> from sympy import And, Or, Not >>> (A >> B).equals(~B >> ~A) True >>> Not(And(A, B, C)).equals(And(Not(A), Not(B), Not(C))) False >>> Not(And(A, Not(A))).equals(Or(B, Not(B))) False " 1136,"def get_latest_stock_qty(item_code, warehouse=None): values, condition = [item_code], """" if warehouse: lft, rgt, is_group = frappe.db.get_value(""Warehouse"", warehouse, [""lft"", ""rgt"", ""is_group""]) if is_group: values.extend([lft, rgt]) condition += ""and exists (\ select name from `tabWarehouse` wh where wh.name = tabBin.warehouse\ and wh.lft >= %s and wh.rgt <= %s)"" else: values.append(warehouse) condition += "" AND warehouse = %s"" actual_qty = frappe.db.sql( .format( condition ), values, )[0][0] return actual_qty ","select sum(actual_qty) from tabBin where item_code=%s {0}",7,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_latest_stock_qty(item_code, warehouse=None): values, condition = [item_code], """" if warehouse: lft, rgt, is_group = frappe.db.get_value(""Warehouse"", warehouse, [""lft"", ""rgt"", ""is_group""]) if is_group: values.extend([lft, rgt]) condition += ""and exists (\ select name from `tabWarehouse` wh where wh.name = tabBin.warehouse\ and wh.lft >= %s and wh.rgt <= %s)"" else: values.append(warehouse) condition += "" AND warehouse = %s"" actual_qty = frappe.db.sql( .format( condition ), values, )[0][0] return actual_qty ``` ###Assistant : select sum(actual_qty) from tabBin where item_code=%s {0} " 1137,"def get_feature_names_out(self, input_features=None): input_features = _check_feature_names_in( self, input_features, generate_names=True ) est_name = self.__class__.__name__.lower() names_list = [f""{est_name}_{name}_sqrt"" for name in input_features] for j in range(1, self.sample_steps): cos_names = [f""{est_name}_{name}_cos{j}"" for name in input_features] sin_names = [f""{est_name}_{name}_sin{j}"" for name in input_features] names_list.extend(cos_names + sin_names) return np.asarray(names_list, dtype=object) ","Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Only used to validate feature names with the names seen in :meth:`fit`. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. ",39,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_feature_names_out(self, input_features=None): input_features = _check_feature_names_in( self, input_features, generate_names=True ) est_name = self.__class__.__name__.lower() names_list = [f""{est_name}_{name}_sqrt"" for name in input_features] for j in range(1, self.sample_steps): cos_names = [f""{est_name}_{name}_cos{j}"" for name in input_features] sin_names = [f""{est_name}_{name}_sin{j}"" for name in input_features] names_list.extend(cos_names + sin_names) return np.asarray(names_list, dtype=object) ``` ###Assistant : Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Only used to validate feature names with the names seen in :meth:`fit`. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. " 1138,"def test_render_valid_image_as_context_variable(self): context = {""image"": self.image, ""image_node"": ""fake value""} node = ImageNode(Variable(""image""), ""original"", ""image_node"") rendered = node.render(context) self.assertEqual(rendered, """") self.assertIsInstance(context[""image_node""], Rendition) "," Tests that an ImageNode with a valid image and a context variable name renders an empty string and puts a rendition in the context variable ",25,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_render_valid_image_as_context_variable(self): context = {""image"": self.image, ""image_node"": ""fake value""} node = ImageNode(Variable(""image""), ""original"", ""image_node"") rendered = node.render(context) self.assertEqual(rendered, """") self.assertIsInstance(context[""image_node""], Rendition) ``` ###Assistant : Tests that an ImageNode with a valid image and a context variable name renders an empty string and puts a rendition in the context variable " 1139,"def dup_cauchy_lower_bound(f, K): g = dup_reverse(f) if len(g) < 2: raise PolynomialError('Polynomial has no non-zero roots.') if K.is_ZZ: K = K.get_field() b = dup_cauchy_upper_bound(g, K) return K.one / b ","Compute the Cauchy lower bound on the absolute value of all non-zero roots of f, real or complex.",18,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dup_cauchy_lower_bound(f, K): g = dup_reverse(f) if len(g) < 2: raise PolynomialError('Polynomial has no non-zero roots.') if K.is_ZZ: K = K.get_field() b = dup_cauchy_upper_bound(g, K) return K.one / b ``` ###Assistant : Compute the Cauchy lower bound on the absolute value of all non-zero roots of f, real or complex. " 1140,"def call_deploy(cls, fname, col_partitions, storage_options, **kwargs): from pyarrow.parquet import ParquetFile from modin.core.storage_formats.pandas.parsers import ParquetFileToRead # If we don't have any columns to read, we should just return an empty # set of references. if len(col_partitions) == 0: return [] filesystem, parquet_files = cls.get_fsspec_files(fname, storage_options) row_groups_per_file = [] num_row_groups = 0 # Count up the total number of row groups across all files and # keep track of row groups per file to use later. for file in parquet_files: with filesystem.open(file) as f: row_groups = ParquetFile(f).num_row_groups row_groups_per_file.append(row_groups) num_row_groups += row_groups # step determines how many row groups are going to be in a partition step = compute_chunksize( num_row_groups, NPartitions.get(), min_block_size=1, ) current_partition_size = 0 file_index = 0 partition_files = [] # 2D array - each element contains list of chunks to read row_groups_used_in_current_file = 0 total_row_groups_added = 0 # On each iteration, we add a chunk of one file. That will # take us either to the end of a partition, or to the end # of a file. while total_row_groups_added < num_row_groups: if current_partition_size == 0: partition_files.append([]) partition_file = partition_files[-1] file_path = parquet_files[file_index] row_group_start = row_groups_used_in_current_file row_groups_left_in_file = ( row_groups_per_file[file_index] - row_groups_used_in_current_file ) row_groups_left_for_this_partition = step - current_partition_size if row_groups_left_for_this_partition <= row_groups_left_in_file: # File has at least what we need to finish partition # So finish this partition and start a new one. num_row_groups_to_add = row_groups_left_for_this_partition current_partition_size = 0 else: # File doesn't have enough to complete this partition. Add # it into current partition and go to next file. num_row_groups_to_add = row_groups_left_in_file current_partition_size += num_row_groups_to_add if num_row_groups_to_add == row_groups_left_in_file: file_index += 1 row_groups_used_in_current_file = 0 else: row_groups_used_in_current_file += num_row_groups_to_add partition_file.append( ParquetFileToRead( file_path, row_group_start, row_group_start + num_row_groups_to_add ) ) total_row_groups_added += num_row_groups_to_add assert ( total_row_groups_added == num_row_groups ), ""row groups added does not match total num of row groups across parquet files"" all_partitions = [] for files_to_read in partition_files: all_partitions.append( [ cls.deploy( cls.parse, files_for_parser=files_to_read, columns=cols, num_returns=3, storage_options=storage_options, **kwargs, ) for cols in col_partitions ] ) return all_partitions "," Deploy remote tasks to the workers with passed parameters. Parameters ---------- fname : str, path object or file-like object Name of the file to read. col_partitions : list List of arrays with columns names that should be read by each partition. storage_options : dict Parameters for specific storage engine. **kwargs : dict Parameters of deploying read_* function. Returns ------- List Array with references to the task deploy result for each partition. ",71,327,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call_deploy(cls, fname, col_partitions, storage_options, **kwargs): from pyarrow.parquet import ParquetFile from modin.core.storage_formats.pandas.parsers import ParquetFileToRead # If we don't have any columns to read, we should just return an empty # set of references. if len(col_partitions) == 0: return [] filesystem, parquet_files = cls.get_fsspec_files(fname, storage_options) row_groups_per_file = [] num_row_groups = 0 # Count up the total number of row groups across all files and # keep track of row groups per file to use later. for file in parquet_files: with filesystem.open(file) as f: row_groups = ParquetFile(f).num_row_groups row_groups_per_file.append(row_groups) num_row_groups += row_groups # step determines how many row groups are going to be in a partition step = compute_chunksize( num_row_groups, NPartitions.get(), min_block_size=1, ) current_partition_size = 0 file_index = 0 partition_files = [] # 2D array - each element contains list of chunks to read row_groups_used_in_current_file = 0 total_row_groups_added = 0 # On each iteration, we add a chunk of one file. That will # take us either to the end of a partition, or to the end # of a file. while total_row_groups_added < num_row_groups: if current_partition_size == 0: partition_files.append([]) partition_file = partition_files[-1] file_path = parquet_files[file_index] row_group_start = row_groups_used_in_current_file row_groups_left_in_file = ( row_groups_per_file[file_index] - row_groups_used_in_current_file ) row_groups_left_for_this_partition = step - current_partition_size if row_groups_left_for_this_partition <= row_groups_left_in_file: # File has at least what we need to finish partition # So finish this partition and start a new one. num_row_groups_to_add = row_groups_left_for_this_partition current_partition_size = 0 else: # File doesn't have enough to complete this partition. Add # it into current partition and go to next file. num_row_groups_to_add = row_groups_left_in_file current_partition_size += num_row_groups_to_add if num_row_groups_to_add == row_groups_left_in_file: file_index += 1 row_groups_used_in_current_file = 0 else: row_groups_used_in_current_file += num_row_groups_to_add partition_file.append( ParquetFileToRead( file_path, row_group_start, row_group_start + num_row_groups_to_add ) ) total_row_groups_added += num_row_groups_to_add assert ( total_row_groups_added == num_row_groups ), ""row groups added does not match total num of row groups across parquet files"" all_partitions = [] for files_to_read in partition_files: all_partitions.append( [ cls.deploy( cls.parse, files_for_parser=files_to_read, columns=cols, num_returns=3, storage_options=storage_options, **kwargs, ) for cols in col_partitions ] ) return all_partitions ``` ###Assistant : Deploy remote tasks to the workers with passed parameters. Parameters ---------- fname : str, path object or file-like object Name of the file to read. col_partitions : list List of arrays with columns names that should be read by each partition. storage_options : dict Parameters for specific storage engine. **kwargs : dict Parameters of deploying read_* function. Returns ------- List Array with references to the task deploy result for each partition. " 1141,"def get_connection(self): if self.lib == _PSYCOPG_LIB_NAME: import psycopg2 return psycopg2.connect(*self.args, **self.kwargs) if self.lib == _SQLALCHEMY_LIB_NAME: from sqlalchemy import create_engine return create_engine(*self.args, **self.kwargs).connect() raise UnsupportedDatabaseException(""Unsupported database library"") "," Make the database connection and get it. For psycopg2, pass all arguments to psycopg2.connect() and return the result of psycopg2.connect(). For sqlalchemy, pass all arguments to sqlalchemy.create_engine() and return the result of calling connect() on the engine. Returns ------- Any The open database connection. ",44,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_connection(self): if self.lib == _PSYCOPG_LIB_NAME: import psycopg2 return psycopg2.connect(*self.args, **self.kwargs) if self.lib == _SQLALCHEMY_LIB_NAME: from sqlalchemy import create_engine return create_engine(*self.args, **self.kwargs).connect() raise UnsupportedDatabaseException(""Unsupported database library"") ``` ###Assistant : Make the database connection and get it. For psycopg2, pass all arguments to psycopg2.connect() and return the result of psycopg2.connect(). For sqlalchemy, pass all arguments to sqlalchemy.create_engine() and return the result of calling connect() on the engine. Returns ------- Any The open database connection. " 1142,"def argmax(x, axis=-1): return tf.argmax(x, axis) @keras_export(""keras.backend.argmin"") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs","Returns the index of the maximum value along an axis. Args: x: Tensor or variable. axis: axis along which to perform the reduction. Returns: A tensor. ",26,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def argmax(x, axis=-1): return tf.argmax(x, axis) @keras_export(""keras.backend.argmin"") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs ``` ###Assistant : Returns the index of the maximum value along an axis. Args: x: Tensor or variable. axis: axis along which to perform the reduction. Returns: A tensor. " 1143,"def print_rules(self) -> Iterator[str]: yield from self._defined_facts_lines() yield '' yield '' yield from self._full_implications_lines() yield '' yield '' yield from self._prereq_lines() yield '' yield '' yield from self._beta_rules_lines() yield '' yield '' yield ""generated_assumptions = {'defined_facts': defined_facts, 'full_implications': full_implications,"" yield "" 'prereq': prereq, 'beta_rules': beta_rules, 'beta_triggers': beta_triggers}"" yield '' yield '' ", Returns a generator with lines to represent the facts and rules ,11,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def print_rules(self) -> Iterator[str]: yield from self._defined_facts_lines() yield '' yield '' yield from self._full_implications_lines() yield '' yield '' yield from self._prereq_lines() yield '' yield '' yield from self._beta_rules_lines() yield '' yield '' yield ""generated_assumptions = {'defined_facts': defined_facts, 'full_implications': full_implications,"" yield "" 'prereq': prereq, 'beta_rules': beta_rules, 'beta_triggers': beta_triggers}"" yield '' yield '' ``` ###Assistant : Returns a generator with lines to represent the facts and rules " 1144,"def ensure_string(self, option, default=None): self._ensure_stringlike(option, ""string"", default) ","Ensure that 'option' is a string; if not defined, set it to 'default'. ",13,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ensure_string(self, option, default=None): self._ensure_stringlike(option, ""string"", default) ``` ###Assistant : Ensure that 'option' is a string; if not defined, set it to 'default'. " 1145,"def update_ema(target_params, source_params, rate=0.99): for targ, src in zip(target_params, source_params): targ.detach().mul_(rate).add_(src, alpha=1 - rate) "," Update target parameters to be closer to those of source parameters using an exponential moving average. :param target_params: the target parameter sequence. :param source_params: the source parameter sequence. :param rate: the EMA rate (closer to 1 means slower). ",38,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update_ema(target_params, source_params, rate=0.99): for targ, src in zip(target_params, source_params): targ.detach().mul_(rate).add_(src, alpha=1 - rate) ``` ###Assistant : Update target parameters to be closer to those of source parameters using an exponential moving average. :param target_params: the target parameter sequence. :param source_params: the source parameter sequence. :param rate: the EMA rate (closer to 1 means slower). " 1146,"def _android_folder() -> str | None: try: # First try to get path to android app via pyjnius from jnius import autoclass Context = autoclass(""android.content.Context"") # noqa: N806 result: str | None = Context.getFilesDir().getParentFile().getAbsolutePath() except Exception: # if fails find an android folder looking path on the sys.path pattern = re.compile(r""/data/(data|user/\d+)/(.+)/files"") for path in sys.path: if pattern.match(path): result = path.split(""/files"")[0] break else: result = None return result @lru_cache(maxsize=1)",:return: base folder for the Android OS or None if cannot be found,13,68,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _android_folder() -> str | None: try: # First try to get path to android app via pyjnius from jnius import autoclass Context = autoclass(""android.content.Context"") # noqa: N806 result: str | None = Context.getFilesDir().getParentFile().getAbsolutePath() except Exception: # if fails find an android folder looking path on the sys.path pattern = re.compile(r""/data/(data|user/\d+)/(.+)/files"") for path in sys.path: if pattern.match(path): result = path.split(""/files"")[0] break else: result = None return result @lru_cache(maxsize=1) ``` ###Assistant : :return: base folder for the Android OS or None if cannot be found " 1147,"def de_bruijn(charset, n, maxlen): # type: (str, int, int) -> str k = len(charset) a = [0] * k * n sequence = [] # type: List[str] "," Generate the De Bruijn Sequence up to `maxlen` characters for the charset `charset` and subsequences of length `n`. Algorithm modified from wikipedia https://en.wikipedia.org/wiki/De_Bruijn_sequence ",23,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def de_bruijn(charset, n, maxlen): # type: (str, int, int) -> str k = len(charset) a = [0] * k * n sequence = [] # type: List[str] ``` ###Assistant : Generate the De Bruijn Sequence up to `maxlen` characters for the charset `charset` and subsequences of length `n`. Algorithm modified from wikipedia https://en.wikipedia.org/wiki/De_Bruijn_sequence " 1148,"def _add_option_refresh(self) -> None: logger.debug(""Adding refresh option"") btnrefresh = ttk.Button(self.optsframe, image=get_images().icons[""reload""], command=lambda x=""update"": preview_trigger().set(x)) # type:ignore btnrefresh.pack(padx=2, side=tk.RIGHT) Tooltip(btnrefresh, text=_(""Preview updates at every model save. Click to refresh now.""), wrap_length=200) logger.debug(""Added refresh option"") ", Add refresh button to refresh preview immediately ,7,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _add_option_refresh(self) -> None: logger.debug(""Adding refresh option"") btnrefresh = ttk.Button(self.optsframe, image=get_images().icons[""reload""], command=lambda x=""update"": preview_trigger().set(x)) # type:ignore btnrefresh.pack(padx=2, side=tk.RIGHT) Tooltip(btnrefresh, text=_(""Preview updates at every model save. Click to refresh now.""), wrap_length=200) logger.debug(""Added refresh option"") ``` ###Assistant : Add refresh button to refresh preview immediately " 1149,"def close(self): try: if hasattr(self, ""_close__fp""): self._close__fp() if self.fp: self.fp.close() self.fp = None except Exception as msg: logger.debug(""Error closing: %s"", msg) if getattr(self, ""map"", None): self.map = None # Instead of simply setting to None, we're setting up a # deferred error that will better explain that the core image # object is gone. self.im = DeferredError(ValueError(""Operation on closed image"")) "," Closes the file pointer, if possible. This operation will destroy the image core and release its memory. The image data will be unusable afterward. This function is required to close images that have multiple frames or have not had their file read and closed by the :py:meth:`~PIL.Image.Image.load` method. See :ref:`file-handling` for more information. ",53,60,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def close(self): try: if hasattr(self, ""_close__fp""): self._close__fp() if self.fp: self.fp.close() self.fp = None except Exception as msg: logger.debug(""Error closing: %s"", msg) if getattr(self, ""map"", None): self.map = None # Instead of simply setting to None, we're setting up a # deferred error that will better explain that the core image # object is gone. self.im = DeferredError(ValueError(""Operation on closed image"")) ``` ###Assistant : Closes the file pointer, if possible. This operation will destroy the image core and release its memory. The image data will be unusable afterward. This function is required to close images that have multiple frames or have not had their file read and closed by the :py:meth:`~PIL.Image.Image.load` method. See :ref:`file-handling` for more information. " 1150,"def diop_general_sum_of_squares(eq, limit=1): r var, coeff, diop_type = classify_diop(eq, _dict=False) if diop_type == GeneralSumOfSquares.name: return set(GeneralSumOfSquares(eq).solve(limit=limit)) "," Solves the equation `x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`. Returns at most ``limit`` number of solutions. Usage ===== ``general_sum_of_squares(eq, limit)`` : Here ``eq`` is an expression which is assumed to be zero. Also, ``eq`` should be in the form, `x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`. Details ======= When `n = 3` if `k = 4^a(8m + 7)` for some `a, m \in Z` then there will be no solutions. Refer to [1]_ for more details. Examples ======== >>> from sympy.solvers.diophantine.diophantine import diop_general_sum_of_squares >>> from sympy.abc import a, b, c, d, e >>> diop_general_sum_of_squares(a**2 + b**2 + c**2 + d**2 + e**2 - 2345) {(15, 22, 22, 24, 24)} Reference ========= .. [1] Representing an integer as a sum of three squares, [online], Available: http://www.proofwiki.org/wiki/Integer_as_Sum_of_Three_Squares ",138,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def diop_general_sum_of_squares(eq, limit=1): r var, coeff, diop_type = classify_diop(eq, _dict=False) if diop_type == GeneralSumOfSquares.name: return set(GeneralSumOfSquares(eq).solve(limit=limit)) ``` ###Assistant : Solves the equation `x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`. Returns at most ``limit`` number of solutions. Usage ===== ``general_sum_of_squares(eq, limit)`` : Here ``eq`` is an expression which is assumed to be zero. Also, ``eq`` should be in the form, `x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`. Details ======= When `n = 3` if `k = 4^a(8m + 7)` for some `a, m \in Z` then there will be no solutions. Refer to [1]_ for more details. Examples ======== >>> from sympy.solvers.diophantine.diophantine import diop_general_sum_of_squares >>> from sympy.abc import a, b, c, d, e >>> diop_general_sum_of_squares(a**2 + b**2 + c**2 + d**2 + e**2 - 2345) {(15, 22, 22, 24, 24)} Reference ========= .. [1] Representing an integer as a sum of three squares, [online], Available: http://www.proofwiki.org/wiki/Integer_as_Sum_of_Three_Squares " 1151,"def resolved_combinations(self) -> Tuple[Tuple[ChallengeBody, ...], ...]: warnings.warn( ""acme.messages.Authorization.resolved_combinations is deprecated and will be "" ""removed in a future release."", DeprecationWarning) return tuple(tuple(self.challenges[idx] for idx in combo) for combo in self.combinations) # pylint: disable=not-an-iterable @Directory.register","Combinations with challenges instead of indices. .. deprecated: 1.30.0 ",9,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def resolved_combinations(self) -> Tuple[Tuple[ChallengeBody, ...], ...]: warnings.warn( ""acme.messages.Authorization.resolved_combinations is deprecated and will be "" ""removed in a future release."", DeprecationWarning) return tuple(tuple(self.challenges[idx] for idx in combo) for combo in self.combinations) # pylint: disable=not-an-iterable @Directory.register ``` ###Assistant : Combinations with challenges instead of indices. .. deprecated: 1.30.0 " 1152,"def test_tweedie_log_identity_consistency(p): half_tweedie_log = HalfTweedieLoss(power=p) half_tweedie_identity = HalfTweedieLossIdentity(power=p) n_samples = 10 y_true, raw_prediction = random_y_true_raw_prediction( loss=half_tweedie_log, n_samples=n_samples, seed=42 ) y_pred = half_tweedie_log.link.inverse(raw_prediction) # exp(raw_prediction) # Let's compare the loss values, up to some constant term that is dropped # in HalfTweedieLoss but not in HalfTweedieLossIdentity. loss_log = half_tweedie_log.loss( y_true=y_true, raw_prediction=raw_prediction ) + half_tweedie_log.constant_to_optimal_zero(y_true) loss_identity = half_tweedie_identity.loss( y_true=y_true, raw_prediction=y_pred ) + half_tweedie_identity.constant_to_optimal_zero(y_true) # Note that HalfTweedieLoss ignores different constant terms than # HalfTweedieLossIdentity. Constant terms means terms not depending on # raw_prediction. By adding these terms, `constant_to_optimal_zero`, both losses # give the same values. assert_allclose(loss_log, loss_identity) # For gradients and hessians, the constant terms do not matter. We have, however, # to account for the chain rule, i.e. with x=raw_prediction # gradient_log(x) = d/dx loss_log(x) # = d/dx loss_identity(exp(x)) # = exp(x) * gradient_identity(exp(x)) # Similarly, # hessian_log(x) = exp(x) * gradient_identity(exp(x)) # + exp(x)**2 * hessian_identity(x) gradient_log, hessian_log = half_tweedie_log.gradient_hessian( y_true=y_true, raw_prediction=raw_prediction ) gradient_identity, hessian_identity = half_tweedie_identity.gradient_hessian( y_true=y_true, raw_prediction=y_pred ) assert_allclose(gradient_log, y_pred * gradient_identity) assert_allclose( hessian_log, y_pred * gradient_identity + y_pred**2 * hessian_identity ) ",Test for identical losses when only the link function is different.,11,174,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_tweedie_log_identity_consistency(p): half_tweedie_log = HalfTweedieLoss(power=p) half_tweedie_identity = HalfTweedieLossIdentity(power=p) n_samples = 10 y_true, raw_prediction = random_y_true_raw_prediction( loss=half_tweedie_log, n_samples=n_samples, seed=42 ) y_pred = half_tweedie_log.link.inverse(raw_prediction) # exp(raw_prediction) # Let's compare the loss values, up to some constant term that is dropped # in HalfTweedieLoss but not in HalfTweedieLossIdentity. loss_log = half_tweedie_log.loss( y_true=y_true, raw_prediction=raw_prediction ) + half_tweedie_log.constant_to_optimal_zero(y_true) loss_identity = half_tweedie_identity.loss( y_true=y_true, raw_prediction=y_pred ) + half_tweedie_identity.constant_to_optimal_zero(y_true) # Note that HalfTweedieLoss ignores different constant terms than # HalfTweedieLossIdentity. Constant terms means terms not depending on # raw_prediction. By adding these terms, `constant_to_optimal_zero`, both losses # give the same values. assert_allclose(loss_log, loss_identity) # For gradients and hessians, the constant terms do not matter. We have, however, # to account for the chain rule, i.e. with x=raw_prediction # gradient_log(x) = d/dx loss_log(x) # = d/dx loss_identity(exp(x)) # = exp(x) * gradient_identity(exp(x)) # Similarly, # hessian_log(x) = exp(x) * gradient_identity(exp(x)) # + exp(x)**2 * hessian_identity(x) gradient_log, hessian_log = half_tweedie_log.gradient_hessian( y_true=y_true, raw_prediction=raw_prediction ) gradient_identity, hessian_identity = half_tweedie_identity.gradient_hessian( y_true=y_true, raw_prediction=y_pred ) assert_allclose(gradient_log, y_pred * gradient_identity) assert_allclose( hessian_log, y_pred * gradient_identity + y_pred**2 * hessian_identity ) ``` ###Assistant : Test for identical losses when only the link function is different. " 1153,"def laplace_transform(f, t, s, legacy_matrix=True, **hints): r debug('\n***** laplace_transform(%s, %s, %s)'%(f, t, s)) if isinstance(f, MatrixBase) and hasattr(f, 'applyfunc'): conds = not hints.get('noconds', False) if conds and legacy_matrix: SymPyDeprecationWarning( feature=""laplace_transform of a Matrix with noconds=False (default)"", useinstead=""the option legacy_matrix=False to get the new behaviour"", issue=21504, deprecated_since_version=""1.9"" ).warn() return f.applyfunc(lambda fij: laplace_transform(fij, t, s, **hints)) else: elements_trans = [laplace_transform(fij, t, s, **hints) for fij in f] if conds: elements, avals, conditions = zip(*elements_trans) f_laplace = type(f)(*f.shape, elements) return f_laplace, Max(*avals), And(*conditions) else: return type(f)(*f.shape, elements_trans) return LaplaceTransform(f, t, s).doit(**hints) @_noconds_(True)"," Compute the Laplace Transform `F(s)` of `f(t)`, .. math :: F(s) = \int_{0^{-}}^\infty e^{-st} f(t) \mathrm{d}t. Explanation =========== For all sensible functions, this converges absolutely in a half-plane .. math :: a < \operatorname{Re}(s) This function returns ``(F, a, cond)`` where ``F`` is the Laplace transform of ``f``, `a` is the half-plane of convergence, and `cond` are auxiliary convergence conditions. The implementation is rule-based, and if you are interested in which rules are applied, and whether integration is attemped, you can switch debug information on by setting ``sympy.SYMPY_DEBUG=True``. The lower bound is `0-`, meaning that this bound should be approached from the lower side. This is only necessary if distributions are involved. At present, it is only done if `f(t)` contains ``DiracDelta``, in which case the Laplace transform is computed implicitly as .. math :: F(s) = \lim_{\tau\to 0^{-}} \int_{\tau}^\infty e^{-st} f(t) \mathrm{d}t by applying rules. If the integral cannot be fully computed in closed form, this function returns an unevaluated :class:`LaplaceTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=True``, only `F` will be returned (i.e. not ``cond``, and also not the plane ``a``). .. deprecated:: 1.9 Legacy behavior for matrices where ``laplace_transform`` with ``noconds=False`` (the default) returns a Matrix whose elements are tuples. The behavior of ``laplace_transform`` for matrices will change in a future release of SymPy to return a tuple of the transformed Matrix and the convergence conditions for the matrix as a whole. Use ``legacy_matrix=False`` to enable the new behavior. Examples ======== >>> from sympy import DiracDelta, exp, laplace_transform >>> from sympy.abc import t, s, a >>> laplace_transform(t**4, t, s) (24/s**5, 0, True) >>> laplace_transform(t**a, t, s) (gamma(a + 1)/(s*s**a), 0, re(a) > -1) >>> laplace_transform(DiracDelta(t)-a*exp(-a*t),t,s) (s/(a + s), Max(0, -a), True) See Also ======== inverse_laplace_transform, mellin_transform, fourier_transform hankel_transform, inverse_hankel_transform ",300,89,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def laplace_transform(f, t, s, legacy_matrix=True, **hints): r debug('\n***** laplace_transform(%s, %s, %s)'%(f, t, s)) if isinstance(f, MatrixBase) and hasattr(f, 'applyfunc'): conds = not hints.get('noconds', False) if conds and legacy_matrix: SymPyDeprecationWarning( feature=""laplace_transform of a Matrix with noconds=False (default)"", useinstead=""the option legacy_matrix=False to get the new behaviour"", issue=21504, deprecated_since_version=""1.9"" ).warn() return f.applyfunc(lambda fij: laplace_transform(fij, t, s, **hints)) else: elements_trans = [laplace_transform(fij, t, s, **hints) for fij in f] if conds: elements, avals, conditions = zip(*elements_trans) f_laplace = type(f)(*f.shape, elements) return f_laplace, Max(*avals), And(*conditions) else: return type(f)(*f.shape, elements_trans) return LaplaceTransform(f, t, s).doit(**hints) @_noconds_(True) ``` ###Assistant : Compute the Laplace Transform `F(s)` of `f(t)`, .. math :: F(s) = \int_{0^{-}}^\infty e^{-st} f(t) \mathrm{d}t. Explanation =========== For all sensible functions, this converges absolutely in a half-plane .. math :: a < \operatorname{Re}(s) This function returns ``(F, a, cond)`` where ``F`` is the Laplace transform of ``f``, `a` is the half-plane of convergence, and `cond` are auxiliary convergence conditions. The implementation is rule-based, and if you are interested in which rules are applied, and whether integration is attemped, you can switch debug information on by setting ``sympy.SYMPY_DEBUG=True``. The lower bound is `0-`, meaning that this bound should be approached from the lower side. This is only necessary if distributions are involved. At present, it is only done if `f(t)` contains ``DiracDelta``, in which case the Laplace transform is computed implicitly as .. math :: F(s) = \lim_{\tau\to 0^{-}} \int_{\tau}^\infty e^{-st} f(t) \mathrm{d}t by applying rules. If the integral cannot be fully computed in closed form, this function returns an unevaluated :class:`LaplaceTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=True``, only `F` will be returned (i.e. not ``cond``, and also not the plane ``a``). .. deprecated:: 1.9 Legacy behavior for matrices where ``laplace_transform`` with ``noconds=False`` (the default) returns a Matrix whose elements are tuples. The behavior of ``laplace_transform`` for matrices will change in a future release of SymPy to return a tuple of the transformed Matrix and the convergence conditions for the matrix as a whole. Use ``legacy_matrix=False`` to enable the new behavior. Examples ======== >>> from sympy import DiracDelta, exp, laplace_transform >>> from sympy.abc import t, s, a >>> laplace_transform(t**4, t, s) (24/s**5, 0, True) >>> laplace_transform(t**a, t, s) (gamma(a + 1)/(s*s**a), 0, re(a) > -1) >>> laplace_transform(DiracDelta(t)-a*exp(-a*t),t,s) (s/(a + s), Max(0, -a), True) See Also ======== inverse_laplace_transform, mellin_transform, fourier_transform hankel_transform, inverse_hankel_transform " 1154,"def reduce_annotations(anno_types, answers): for at in set(anno_types): assert at in (""no_answer"", ""short_answer"") if anno_types.count(""short_answer"") >= anno_types.count(""no_answer""): majority = ""short_answer"" is_impossible = False else: majority = ""no_answer"" is_impossible = True answers = [a for at, a in zip(anno_types, answers) if at == majority] reduction = len(anno_types) - len(answers) assert reduction < 3 if not is_impossible: global n_no_ans n_no_ans += reduction else: global n_short n_short += reduction answers = [] return answers, is_impossible "," In cases where there is annotator disagreement, this fn picks either only the short_answers or only the no_answers, depending on which is more numerous, with a bias towards picking short_answers. Note: By this stage, all long_answer annotations and all samples with yes/no answer have been removed. This leaves just no_answer and short_answers",52,71,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def reduce_annotations(anno_types, answers): for at in set(anno_types): assert at in (""no_answer"", ""short_answer"") if anno_types.count(""short_answer"") >= anno_types.count(""no_answer""): majority = ""short_answer"" is_impossible = False else: majority = ""no_answer"" is_impossible = True answers = [a for at, a in zip(anno_types, answers) if at == majority] reduction = len(anno_types) - len(answers) assert reduction < 3 if not is_impossible: global n_no_ans n_no_ans += reduction else: global n_short n_short += reduction answers = [] return answers, is_impossible ``` ###Assistant : In cases where there is annotator disagreement, this fn picks either only the short_answers or only the no_answers, depending on which is more numerous, with a bias towards picking short_answers. Note: By this stage, all long_answer annotations and all samples with yes/no answer have been removed. This leaves just no_answer and short_answers " 1155,"async def async_open_cover(self, **kwargs): await mqtt.async_publish( self.hass, self._config.get(CONF_COMMAND_TOPIC), self._config[CONF_PAYLOAD_OPEN], self._config[CONF_QOS], self._config[CONF_RETAIN], self._config[CONF_ENCODING], ) if self._optimistic: # Optimistically assume that cover has changed state. self._state = STATE_OPEN if self._config.get(CONF_GET_POSITION_TOPIC): self._position = self.find_percentage_in_range( self._config[CONF_POSITION_OPEN], COVER_PAYLOAD ) self.async_write_ha_state() ","Move the cover up. This method is a coroutine. ",9,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_open_cover(self, **kwargs): await mqtt.async_publish( self.hass, self._config.get(CONF_COMMAND_TOPIC), self._config[CONF_PAYLOAD_OPEN], self._config[CONF_QOS], self._config[CONF_RETAIN], self._config[CONF_ENCODING], ) if self._optimistic: # Optimistically assume that cover has changed state. self._state = STATE_OPEN if self._config.get(CONF_GET_POSITION_TOPIC): self._position = self.find_percentage_in_range( self._config[CONF_POSITION_OPEN], COVER_PAYLOAD ) self.async_write_ha_state() ``` ###Assistant : Move the cover up. This method is a coroutine. " 1156,"def _parse_distro_release_content(line): # type: (str) -> Dict[str, str] matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1]) distro_info = {} if matches: # regexp ensures non-None distro_info[""name""] = matches.group(3)[::-1] if matches.group(2): distro_info[""version_id""] = matches.group(2)[::-1] if matches.group(1): distro_info[""codename""] = matches.group(1)[::-1] elif line: distro_info[""name""] = line.strip() return distro_info _distro = LinuxDistribution() "," Parse a line from a distro release file. Parameters: * line: Line from the distro release file. Must be a unicode string or a UTF-8 encoded byte string. Returns: A dictionary containing all information items. ",35,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _parse_distro_release_content(line): # type: (str) -> Dict[str, str] matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1]) distro_info = {} if matches: # regexp ensures non-None distro_info[""name""] = matches.group(3)[::-1] if matches.group(2): distro_info[""version_id""] = matches.group(2)[::-1] if matches.group(1): distro_info[""codename""] = matches.group(1)[::-1] elif line: distro_info[""name""] = line.strip() return distro_info _distro = LinuxDistribution() ``` ###Assistant : Parse a line from a distro release file. Parameters: * line: Line from the distro release file. Must be a unicode string or a UTF-8 encoded byte string. Returns: A dictionary containing all information items. " 1157,"def load_sharded_checkpoint(model, folder, strict=True): # Load the index index_file = os.path.join(folder, WEIGHTS_INDEX_NAME) if not os.path.isfile(index_file): raise ValueError(f""Can't find a checkpoint index ({WEIGHTS_INDEX_NAME}) in {folder}."") with open(index_file, ""r"", encoding=""utf-8"") as f: index = json.load(f) shard_files = list(set(index[""weight_map""].values())) # If strict=True, error before loading any of the state dicts. loaded_keys = index[""weight_map""].keys() model_keys = model.state_dict().keys() missing_keys = [key for key in model_keys if key not in loaded_keys] unexpected_keys = [key for key in loaded_keys if key not in model_keys] if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0): error_message = f""Error(s) in loading state_dict for {model.__class__.__name__}"" if len(missing_keys) > 0: str_missing_keys = "","".join([f'""{k}""' for k in missing_keys]) error_message += f""\nMissing key(s): {str_missing_keys}."" if len(unexpected_keys) > 0: str_unexpected_keys = "","".join([f'""{k}""' for k in unexpected_keys]) error_message += f""\nMissing key(s): {str_unexpected_keys}."" raise RuntimeError(error_message) for shard_file in shard_files: state_dict = torch.load(os.path.join(folder, shard_file)) model.load_state_dict(state_dict, strict=False) # Make sure memory is fred before we load the next state dict. del state_dict gc.collect() # Return the same thing as PyTorch load_state_dict function. return torch.nn.modules.module._IncompatibleKeys(missing_keys, unexpected_keys) "," This is the same as [`torch.nn.Module.load_state_dict`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html?highlight=load_state_dict#torch.nn.Module.load_state_dict) but for a sharded checkpoint. This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being loaded in the model. Args: model (`torch.nn.Module`): The model in which to load the checkpoint. folder (`str` or `os.PathLike`): A path to a folder containing the sharded checkpoint. strict (`bool`, *optional`, defaults to `True`): Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint. Returns: `NamedTuple`: A named tuple with `missing_keys` and `unexpected_keys` fields - `missing_keys` is a list of str containing the missing keys - `unexpected_keys` is a list of str containing the unexpected keys ",115,167,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_sharded_checkpoint(model, folder, strict=True): # Load the index index_file = os.path.join(folder, WEIGHTS_INDEX_NAME) if not os.path.isfile(index_file): raise ValueError(f""Can't find a checkpoint index ({WEIGHTS_INDEX_NAME}) in {folder}."") with open(index_file, ""r"", encoding=""utf-8"") as f: index = json.load(f) shard_files = list(set(index[""weight_map""].values())) # If strict=True, error before loading any of the state dicts. loaded_keys = index[""weight_map""].keys() model_keys = model.state_dict().keys() missing_keys = [key for key in model_keys if key not in loaded_keys] unexpected_keys = [key for key in loaded_keys if key not in model_keys] if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0): error_message = f""Error(s) in loading state_dict for {model.__class__.__name__}"" if len(missing_keys) > 0: str_missing_keys = "","".join([f'""{k}""' for k in missing_keys]) error_message += f""\nMissing key(s): {str_missing_keys}."" if len(unexpected_keys) > 0: str_unexpected_keys = "","".join([f'""{k}""' for k in unexpected_keys]) error_message += f""\nMissing key(s): {str_unexpected_keys}."" raise RuntimeError(error_message) for shard_file in shard_files: state_dict = torch.load(os.path.join(folder, shard_file)) model.load_state_dict(state_dict, strict=False) # Make sure memory is fred before we load the next state dict. del state_dict gc.collect() # Return the same thing as PyTorch load_state_dict function. return torch.nn.modules.module._IncompatibleKeys(missing_keys, unexpected_keys) ``` ###Assistant : This is the same as [`torch.nn.Module.load_state_dict`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html?highlight=load_state_dict#torch.nn.Module.load_state_dict) but for a sharded checkpoint. This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being loaded in the model. Args: model (`torch.nn.Module`): The model in which to load the checkpoint. folder (`str` or `os.PathLike`): A path to a folder containing the sharded checkpoint. strict (`bool`, *optional`, defaults to `True`): Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint. Returns: `NamedTuple`: A named tuple with `missing_keys` and `unexpected_keys` fields - `missing_keys` is a list of str containing the missing keys - `unexpected_keys` is a list of str containing the unexpected keys " 1158,"def customer_query(doctype, txt, searchfield, start, page_len, filters): conditions = [] cust_master_name = frappe.defaults.get_user_default(""cust_master_name"") if cust_master_name == ""Customer Name"": fields = [""name"", ""customer_group"", ""territory""] else: fields = [""name"", ""customer_name"", ""customer_group"", ""territory""] fields = get_fields(""Customer"", fields) searchfields = frappe.get_meta(""Customer"").get_search_fields() searchfields = "" or "".join(field + "" like %(txt)s"" for field in searchfields) return frappe.db.sql( .format( **{ ""fields"": "", "".join(fields), ""scond"": searchfields, ""mcond"": get_match_cond(doctype), ""fcond"": get_filters_cond(doctype, filters, conditions).replace(""%"", ""%%""), } ), {""txt"": ""%%%s%%"" % txt, ""_txt"": txt.replace(""%"", """"), ""start"": start, ""page_len"": page_len}, ) # searches for supplier @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs","select {fields} from `tabCustomer` where docstatus < 2 and ({scond}) and disabled=0 {fcond} {mcond} order by if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999), if(locate(%(_txt)s, customer_name), locate(%(_txt)s, customer_name), 99999), idx desc, name, customer_name limit %(start)s, %(page_len)s",33,86,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def customer_query(doctype, txt, searchfield, start, page_len, filters): conditions = [] cust_master_name = frappe.defaults.get_user_default(""cust_master_name"") if cust_master_name == ""Customer Name"": fields = [""name"", ""customer_group"", ""territory""] else: fields = [""name"", ""customer_name"", ""customer_group"", ""territory""] fields = get_fields(""Customer"", fields) searchfields = frappe.get_meta(""Customer"").get_search_fields() searchfields = "" or "".join(field + "" like %(txt)s"" for field in searchfields) return frappe.db.sql( .format( **{ ""fields"": "", "".join(fields), ""scond"": searchfields, ""mcond"": get_match_cond(doctype), ""fcond"": get_filters_cond(doctype, filters, conditions).replace(""%"", ""%%""), } ), {""txt"": ""%%%s%%"" % txt, ""_txt"": txt.replace(""%"", """"), ""start"": start, ""page_len"": page_len}, ) # searches for supplier @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs ``` ###Assistant : select {fields} from `tabCustomer` where docstatus < 2 and ({scond}) and disabled=0 {fcond} {mcond} order by if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999), if(locate(%(_txt)s, customer_name), locate(%(_txt)s, customer_name), 99999), idx desc, name, customer_name limit %(start)s, %(page_len)s " 1159,"def get_group_tag_value_count(self, group, environment_id, key): raise NotImplementedError "," >>> get_group_tag_value_count(group, 3, 'key1') ",4,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_group_tag_value_count(self, group, environment_id, key): raise NotImplementedError ``` ###Assistant : >>> get_group_tag_value_count(group, 3, 'key1') " 1160,"def test_enqueue_task_instances_sets_ti_state_to_None_if_dagrun_in_finish_state(self, state, dag_maker): dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state' task_id_1 = 'dummy' session = settings.Session() with dag_maker(dag_id=dag_id, start_date=DEFAULT_DATE, session=session): task1 = EmptyOperator(task_id=task_id_1) self.scheduler_job = SchedulerJob(subdir=os.devnull) dr1 = dag_maker.create_dagrun(state=state) ti = dr1.get_task_instance(task1.task_id, session) ti.state = State.SCHEDULED session.merge(ti) session.commit() with patch.object(BaseExecutor, 'queue_command') as mock_queue_command: self.scheduler_job._enqueue_task_instances_with_queued_state([ti]) ti.refresh_from_db() assert ti.state == State.NONE mock_queue_command.assert_not_called() ",This tests that task instances whose dagrun is in finished state are not queued,14,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_enqueue_task_instances_sets_ti_state_to_None_if_dagrun_in_finish_state(self, state, dag_maker): dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state' task_id_1 = 'dummy' session = settings.Session() with dag_maker(dag_id=dag_id, start_date=DEFAULT_DATE, session=session): task1 = EmptyOperator(task_id=task_id_1) self.scheduler_job = SchedulerJob(subdir=os.devnull) dr1 = dag_maker.create_dagrun(state=state) ti = dr1.get_task_instance(task1.task_id, session) ti.state = State.SCHEDULED session.merge(ti) session.commit() with patch.object(BaseExecutor, 'queue_command') as mock_queue_command: self.scheduler_job._enqueue_task_instances_with_queued_state([ti]) ti.refresh_from_db() assert ti.state == State.NONE mock_queue_command.assert_not_called() ``` ###Assistant : This tests that task instances whose dagrun is in finished state are not queued " 1161,"def match_hostname(cert, hostname): if not cert: raise ValueError(""empty or no certificate, match_hostname needs a "" ""SSL socket or SSL context with either "" ""CERT_OPTIONAL or CERT_REQUIRED"") dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if _dnsname_match(value, hostname): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError(""hostname %r "" ""doesn't match either of %s"" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError(""hostname %r "" ""doesn't match %r"" % (hostname, dnsnames[0])) else: raise CertificateError(""no appropriate commonName or "" ""subjectAltName fields were found"") try: from types import SimpleNamespace as Container except ImportError: # pragma: no cover","Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. ",40,155,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def match_hostname(cert, hostname): if not cert: raise ValueError(""empty or no certificate, match_hostname needs a "" ""SSL socket or SSL context with either "" ""CERT_OPTIONAL or CERT_REQUIRED"") dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if _dnsname_match(value, hostname): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError(""hostname %r "" ""doesn't match either of %s"" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError(""hostname %r "" ""doesn't match %r"" % (hostname, dnsnames[0])) else: raise CertificateError(""no appropriate commonName or "" ""subjectAltName fields were found"") try: from types import SimpleNamespace as Container except ImportError: # pragma: no cover ``` ###Assistant : Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. " 1162,"def take(self, n) -> ""IterableDataset"": ex_iterable = TakeExamplesIterable(self._ex_iterable, n) return iterable_dataset( ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, format_type=self._format_type, shuffling=copy.deepcopy(self._shuffling), token_per_repo_id=self._token_per_repo_id, ) "," Create a new IterableDataset with only the first ``n`` elements. Args: n (:obj:`int`): number of elements to take. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset(""rotten_tomatoes"", split=""train"", streaming=True) >>> small_ds = ds.take(2) >>> list(small_ds) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new "" conan "" and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of "" the lord of the rings "" trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}] ``` ",117,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def take(self, n) -> ""IterableDataset"": ex_iterable = TakeExamplesIterable(self._ex_iterable, n) return iterable_dataset( ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, format_type=self._format_type, shuffling=copy.deepcopy(self._shuffling), token_per_repo_id=self._token_per_repo_id, ) ``` ###Assistant : Create a new IterableDataset with only the first ``n`` elements. Args: n (:obj:`int`): number of elements to take. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset(""rotten_tomatoes"", split=""train"", streaming=True) >>> small_ds = ds.take(2) >>> list(small_ds) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new "" conan "" and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of "" the lord of the rings "" trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}] ``` " 1163,"def test_category_match_group(self): from sentry.grouping.enhancer import Enhancements enhancement = Enhancements.from_config_string( , ) event = make_event( platform=""native"", exception={ ""values"": [ { ""type"": ""Hello"", ""stacktrace"": { ""frames"": [ { ""function"": ""foo"", }, { ""function"": ""bar"", }, ] }, } ] }, ) manager = EventManager(event) manager.normalize() grouping_config = { ""enhancements"": enhancement.dumps(), ""id"": ""mobile:2021-02-12"", } manager.get_data()[""grouping_config""] = grouping_config event1 = manager.save(self.project.id) event2 = Event(event1.project_id, event1.event_id, data=event1.data) assert event1.get_hashes().hashes == event2.get_hashes(grouping_config).hashes "," Regression test to ensure categories are applied consistently and don't produce hash mismatches. function:foo category=foo_like category:foo_like -group ",17,66,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_category_match_group(self): from sentry.grouping.enhancer import Enhancements enhancement = Enhancements.from_config_string( , ) event = make_event( platform=""native"", exception={ ""values"": [ { ""type"": ""Hello"", ""stacktrace"": { ""frames"": [ { ""function"": ""foo"", }, { ""function"": ""bar"", }, ] }, } ] }, ) manager = EventManager(event) manager.normalize() grouping_config = { ""enhancements"": enhancement.dumps(), ""id"": ""mobile:2021-02-12"", } manager.get_data()[""grouping_config""] = grouping_config event1 = manager.save(self.project.id) event2 = Event(event1.project_id, event1.event_id, data=event1.data) assert event1.get_hashes().hashes == event2.get_hashes(grouping_config).hashes ``` ###Assistant : Regression test to ensure categories are applied consistently and don't produce hash mismatches. function:foo category=foo_like category:foo_like -group " 1164,"def notify(self, notification, raise_exception=False): event = notification.event try: return self.notify_users( event.group, event, triggering_rules=[r.label for r in notification.rules] ) except ( ApiError, HTTPError, InvalidIdentity, PluginError, SSLError, UrllibHTTPError, ) as err: self.logger.info( ""notification-plugin.notify-failed"", extra={ ""error"": str(err), ""plugin"": self.slug, ""project_id"": event.group.project_id, ""organization_id"": event.group.project.organization_id, }, ) if raise_exception: raise err return False "," This calls the notify_users method of the plugin. Normally this method eats the error and logs it but if we set raise_exception=True like we do for the test plugin button, the exception is raised ",34,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def notify(self, notification, raise_exception=False): event = notification.event try: return self.notify_users( event.group, event, triggering_rules=[r.label for r in notification.rules] ) except ( ApiError, HTTPError, InvalidIdentity, PluginError, SSLError, UrllibHTTPError, ) as err: self.logger.info( ""notification-plugin.notify-failed"", extra={ ""error"": str(err), ""plugin"": self.slug, ""project_id"": event.group.project_id, ""organization_id"": event.group.project.organization_id, }, ) if raise_exception: raise err return False ``` ###Assistant : This calls the notify_users method of the plugin. Normally this method eats the error and logs it but if we set raise_exception=True like we do for the test plugin button, the exception is raised " 1165,"def test_processors(self): from djangocms_text_ckeditor.cms_plugins import TextPlugin from cms.plugin_pool import plugin_pool instance = CMSPlugin.objects.all()[0].get_plugin_instance()[0] load_from_string = self.load_template_from_string "," Tests that plugin processors and plugin context processors can be defined in settings and are working and that extra plugin context processors can be passed to PluginContext. ",27,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_processors(self): from djangocms_text_ckeditor.cms_plugins import TextPlugin from cms.plugin_pool import plugin_pool instance = CMSPlugin.objects.all()[0].get_plugin_instance()[0] load_from_string = self.load_template_from_string ``` ###Assistant : Tests that plugin processors and plugin context processors can be defined in settings and are working and that extra plugin context processors can be passed to PluginContext. " 1166,"async def test_unique_id_ignore(hass, manager): async_setup_entry = AsyncMock(return_value=False) mock_integration(hass, MockModule(""comp"", async_setup_entry=async_setup_entry)) mock_entity_platform(hass, ""config_flow.comp"", None) ",Test that we can ignore flows that are in progress and have a unique ID.,15,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_unique_id_ignore(hass, manager): async_setup_entry = AsyncMock(return_value=False) mock_integration(hass, MockModule(""comp"", async_setup_entry=async_setup_entry)) mock_entity_platform(hass, ""config_flow.comp"", None) ``` ###Assistant : Test that we can ignore flows that are in progress and have a unique ID. " 1167,"def bind(self, bind_string, key, propagate=True): if not self._is_window_created('tried Window.bind'): return self.TKroot.bind(bind_string, lambda evt: self._user_bind_callback(bind_string, evt, propagate)) self.user_bind_dict[bind_string] = key "," Used to add tkinter events to a Window. The tkinter specific data is in the Window's member variable user_bind_event :param bind_string: The string tkinter expected in its bind function :type bind_string: (str) :param key: The event that will be generated when the tkinter event occurs :type key: str | int | tuple | object :param propagate: If True then tkinter will be told to propagate the event :type propagate: (bool) ",70,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def bind(self, bind_string, key, propagate=True): if not self._is_window_created('tried Window.bind'): return self.TKroot.bind(bind_string, lambda evt: self._user_bind_callback(bind_string, evt, propagate)) self.user_bind_dict[bind_string] = key ``` ###Assistant : Used to add tkinter events to a Window. The tkinter specific data is in the Window's member variable user_bind_event :param bind_string: The string tkinter expected in its bind function :type bind_string: (str) :param key: The event that will be generated when the tkinter event occurs :type key: str | int | tuple | object :param propagate: If True then tkinter will be told to propagate the event :type propagate: (bool) " 1168,"def forward(self, body_feats=None, rois=None, rois_num=None, inputs=None): targets = [] if self.training: rois, rois_num, targets = self.bbox_assigner(rois, rois_num, inputs) targets_list = [targets] self.assigned_rois = (rois, rois_num) self.assigned_targets = targets pred_bbox = None head_out_list = [] for i in range(self.num_cascade_stages): if i > 0: rois, rois_num = self._get_rois_from_boxes(pred_bbox, inputs['im_shape']) if self.training: rois, rois_num, targets = self.bbox_assigner( rois, rois_num, inputs, i, is_cascade=True) targets_list.append(targets) rois_feat = self.roi_extractor(body_feats, rois, rois_num) bbox_feat = self.head(rois_feat, i) scores = self.bbox_score_list[i](bbox_feat) deltas = self.bbox_delta_list[i](bbox_feat) # TODO (lyuwenyu) Is it correct for only one class ? if not self.reg_class_agnostic and i < self.num_cascade_stages - 1: deltas = deltas.reshape([-1, self.num_classes, 4]) labels = scores[:, :-1].argmax(axis=-1) deltas = deltas[paddle.arange(deltas.shape[0]), labels] head_out_list.append([scores, deltas, rois]) pred_bbox = self._get_pred_bbox(deltas, rois, self.bbox_weight[i]) if self.training: loss = {} for stage, value in enumerate(zip(head_out_list, targets_list)): (scores, deltas, rois), targets = value loss_stage = self.get_loss(scores, deltas, targets, rois, self.bbox_weight[stage]) for k, v in loss_stage.items(): loss[k + ""_stage{}"".format( stage)] = v / self.num_cascade_stages return loss, bbox_feat else: scores, deltas, self.refined_rois = self.get_prediction( head_out_list) return (deltas, scores), self.head "," body_feats (list[Tensor]): Feature maps from backbone rois (Tensor): RoIs generated from RPN module rois_num (Tensor): The number of RoIs in each image inputs (dict{Tensor}): The ground-truth of image ",28,167,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def forward(self, body_feats=None, rois=None, rois_num=None, inputs=None): targets = [] if self.training: rois, rois_num, targets = self.bbox_assigner(rois, rois_num, inputs) targets_list = [targets] self.assigned_rois = (rois, rois_num) self.assigned_targets = targets pred_bbox = None head_out_list = [] for i in range(self.num_cascade_stages): if i > 0: rois, rois_num = self._get_rois_from_boxes(pred_bbox, inputs['im_shape']) if self.training: rois, rois_num, targets = self.bbox_assigner( rois, rois_num, inputs, i, is_cascade=True) targets_list.append(targets) rois_feat = self.roi_extractor(body_feats, rois, rois_num) bbox_feat = self.head(rois_feat, i) scores = self.bbox_score_list[i](bbox_feat) deltas = self.bbox_delta_list[i](bbox_feat) # TODO (lyuwenyu) Is it correct for only one class ? if not self.reg_class_agnostic and i < self.num_cascade_stages - 1: deltas = deltas.reshape([-1, self.num_classes, 4]) labels = scores[:, :-1].argmax(axis=-1) deltas = deltas[paddle.arange(deltas.shape[0]), labels] head_out_list.append([scores, deltas, rois]) pred_bbox = self._get_pred_bbox(deltas, rois, self.bbox_weight[i]) if self.training: loss = {} for stage, value in enumerate(zip(head_out_list, targets_list)): (scores, deltas, rois), targets = value loss_stage = self.get_loss(scores, deltas, targets, rois, self.bbox_weight[stage]) for k, v in loss_stage.items(): loss[k + ""_stage{}"".format( stage)] = v / self.num_cascade_stages return loss, bbox_feat else: scores, deltas, self.refined_rois = self.get_prediction( head_out_list) return (deltas, scores), self.head ``` ###Assistant : body_feats (list[Tensor]): Feature maps from backbone rois (Tensor): RoIs generated from RPN module rois_num (Tensor): The number of RoIs in each image inputs (dict{Tensor}): The ground-truth of image " 1169,"def get_fws(value): newvalue = value.lstrip() fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws') return fws, newvalue ","FWS = 1*WSP This isn't the RFC definition. We're using fws to represent tokens where folding can be done, but when we are parsing the *un*folding has already been done so we don't need to watch out for CRLF. ",39,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_fws(value): newvalue = value.lstrip() fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws') return fws, newvalue ``` ###Assistant : FWS = 1*WSP This isn't the RFC definition. We're using fws to represent tokens where folding can be done, but when we are parsing the *un*folding has already been done so we don't need to watch out for CRLF. " 1170,"def load_data_snli(batch_size, num_steps=50): num_workers = d2l.get_dataloader_workers() data_dir = d2l.download_extract('SNLI') train_data = read_snli(data_dir, True) test_data = read_snli(data_dir, False) train_set = SNLIDataset(train_data, num_steps) test_set = SNLIDataset(test_data, num_steps, train_set.vocab) train_iter = gluon.data.DataLoader(train_set, batch_size, shuffle=True, num_workers=num_workers) test_iter = gluon.data.DataLoader(test_set, batch_size, shuffle=False, num_workers=num_workers) return train_iter, test_iter, train_set.vocab ","Download the SNLI dataset and return data iterators and vocabulary. Defined in :numref:`sec_natural-language-inference-and-dataset`",13,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_data_snli(batch_size, num_steps=50): num_workers = d2l.get_dataloader_workers() data_dir = d2l.download_extract('SNLI') train_data = read_snli(data_dir, True) test_data = read_snli(data_dir, False) train_set = SNLIDataset(train_data, num_steps) test_set = SNLIDataset(test_data, num_steps, train_set.vocab) train_iter = gluon.data.DataLoader(train_set, batch_size, shuffle=True, num_workers=num_workers) test_iter = gluon.data.DataLoader(test_set, batch_size, shuffle=False, num_workers=num_workers) return train_iter, test_iter, train_set.vocab ``` ###Assistant : Download the SNLI dataset and return data iterators and vocabulary. Defined in :numref:`sec_natural-language-inference-and-dataset` " 1171,"def plot_feature_importance(model, feature_names, pair, train_dir, count_max=50) -> None: try: import plotly.graph_objects as go from plotly.subplots import make_subplots except ImportError: logger.exception(""Module plotly not found \n Please install using `pip3 install plotly`"") exit(1) from freqtrade.plot.plotting import store_plot_file # Gather feature importance from model if ""catboost.core"" in str(model.__class__): feature_importance = model.get_feature_importance() elif ""lightgbm.sklearn"" in str(model.__class__): feature_importance = model.feature_importances_ else: raise NotImplementedError(f""Cannot extract feature importance for {model.__class__}"") # Data preparation fi_df = pd.DataFrame({ ""feature_names"": np.array(feature_names), ""feature_importance"": np.array(feature_importance) }) fi_df_top = fi_df.nlargest(count_max, ""feature_importance"")[::-1] fi_df_worst = fi_df.nsmallest(count_max, ""feature_importance"")[::-1] # Plotting"," Plot Best and Worst Features by importance for CatBoost model. Called once per sub-train. Usage: plot_feature_importance( model=model, feature_names=dk.training_features_list, pair=pair, train_dir=dk.data_path) ",20,84,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def plot_feature_importance(model, feature_names, pair, train_dir, count_max=50) -> None: try: import plotly.graph_objects as go from plotly.subplots import make_subplots except ImportError: logger.exception(""Module plotly not found \n Please install using `pip3 install plotly`"") exit(1) from freqtrade.plot.plotting import store_plot_file # Gather feature importance from model if ""catboost.core"" in str(model.__class__): feature_importance = model.get_feature_importance() elif ""lightgbm.sklearn"" in str(model.__class__): feature_importance = model.feature_importances_ else: raise NotImplementedError(f""Cannot extract feature importance for {model.__class__}"") # Data preparation fi_df = pd.DataFrame({ ""feature_names"": np.array(feature_names), ""feature_importance"": np.array(feature_importance) }) fi_df_top = fi_df.nlargest(count_max, ""feature_importance"")[::-1] fi_df_worst = fi_df.nsmallest(count_max, ""feature_importance"")[::-1] # Plotting ``` ###Assistant : Plot Best and Worst Features by importance for CatBoost model. Called once per sub-train. Usage: plot_feature_importance( model=model, feature_names=dk.training_features_list, pair=pair, train_dir=dk.data_path) " 1172,"def get_primary_key_column(self, cursor, table_name): cursor.execute( ""PRAGMA table_info(%s)"" % self.connection.ops.quote_name(table_name) ) for _, name, *_, pk in cursor.fetchall(): if pk: return name return None ",Return the column name of the primary key for the given table.,12,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_primary_key_column(self, cursor, table_name): cursor.execute( ""PRAGMA table_info(%s)"" % self.connection.ops.quote_name(table_name) ) for _, name, *_, pk in cursor.fetchall(): if pk: return name return None ``` ###Assistant : Return the column name of the primary key for the given table. " 1173,"def validate_settings(): try: django_backend = [x for x in settings.TEMPLATES if x['BACKEND'] == 'django.template.backends.django.DjangoTemplates'][0] except IndexError: raise ImproperlyConfigured( ""django CMS requires django.template.context_processors.request in "" ""'django.template.backends.django.DjangoTemplates' context processors."" ) context_processors = django_backend.get('OPTIONS', {}).get('context_processors', []) if ('django.core.context_processors.request' not in context_processors and # noqa: W504 'django.template.context_processors.request' not in context_processors): raise ImproperlyConfigured(""django CMS requires django.template.context_processors.request in "" ""'django.template.backends.django.DjangoTemplates' context processors."") if ( hasattr(settings, ""SEND_BROKEN_LINK_EMAILS"") and # noqa: W504 ""django.middleware.common.BrokenLinkEmailsMiddleware"" not in getattr(settings, ""MIDDLEWARE"", []) ): warnings.warn('The setting ""SEND_BROKEN_LINK_EMAILS"" will not be honored by django CMS as of version 4.1. ' 'Add ""django.middleware.common.BrokenLinkEmailsMiddleware"" to your MIDDLEWARE settings ' 'instead.', DeprecationWarning) "," Check project settings file for required options ",7,95,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def validate_settings(): try: django_backend = [x for x in settings.TEMPLATES if x['BACKEND'] == 'django.template.backends.django.DjangoTemplates'][0] except IndexError: raise ImproperlyConfigured( ""django CMS requires django.template.context_processors.request in "" ""'django.template.backends.django.DjangoTemplates' context processors."" ) context_processors = django_backend.get('OPTIONS', {}).get('context_processors', []) if ('django.core.context_processors.request' not in context_processors and # noqa: W504 'django.template.context_processors.request' not in context_processors): raise ImproperlyConfigured(""django CMS requires django.template.context_processors.request in "" ""'django.template.backends.django.DjangoTemplates' context processors."") if ( hasattr(settings, ""SEND_BROKEN_LINK_EMAILS"") and # noqa: W504 ""django.middleware.common.BrokenLinkEmailsMiddleware"" not in getattr(settings, ""MIDDLEWARE"", []) ): warnings.warn('The setting ""SEND_BROKEN_LINK_EMAILS"" will not be honored by django CMS as of version 4.1. ' 'Add ""django.middleware.common.BrokenLinkEmailsMiddleware"" to your MIDDLEWARE settings ' 'instead.', DeprecationWarning) ``` ###Assistant : Check project settings file for required options " 1174,"def get(cls): min_partition_size = super().get() assert min_partition_size > 0, ""`min_partition_size` should be > 0"" return min_partition_size "," Get ``MinPartitionSize`` with extra checks. Returns ------- int ",8,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get(cls): min_partition_size = super().get() assert min_partition_size > 0, ""`min_partition_size` should be > 0"" return min_partition_size ``` ###Assistant : Get ``MinPartitionSize`` with extra checks. Returns ------- int " 1175,"def pbt_function(config): lr = config[""lr""] accuracy = 0.0 # end = 1000 start = 0 if session.get_checkpoint(): state = session.get_checkpoint().to_dict() accuracy = state[""acc""] start = state[""step""] midpoint = 100 # lr starts decreasing after acc > midpoint q_tolerance = 3 # penalize exceeding lr by more than this multiple noise_level = 2 # add gaussian noise to the acc increase # triangle wave: # - start at 0.001 @ t=0, # - peak at 0.01 @ t=midpoint, # - end at 0.001 @ t=midpoint * 2, for step in range(start, 100): if accuracy < midpoint: optimal_lr = 0.01 * accuracy / midpoint else: optimal_lr = 0.01 - 0.01 * (accuracy - midpoint) / midpoint optimal_lr = min(0.01, max(0.001, optimal_lr)) # compute accuracy increase q_err = max(lr, optimal_lr) / min(lr, optimal_lr) if q_err < q_tolerance: accuracy += (1.0 / q_err) * random.random() elif lr > optimal_lr: accuracy -= (q_err - q_tolerance) * random.random() accuracy += noise_level * np.random.normal() accuracy = max(0, accuracy) checkpoint = None if step % 3 == 0: checkpoint = Checkpoint.from_dict({""acc"": accuracy, ""step"": start}) session.report( { ""mean_accuracy"": accuracy, ""cur_lr"": lr, ""optimal_lr"": optimal_lr, # for debugging ""q_err"": q_err, # for debugging ""done"": accuracy > midpoint * 2, # this stops the training process }, checkpoint=checkpoint, ) ","Toy PBT problem for benchmarking adaptive learning rate. The goal is to optimize this trainable's accuracy. The accuracy increases fastest at the optimal lr, which is a function of the current accuracy. The optimal lr schedule for this problem is the triangle wave as follows. Note that many lr schedules for real models also follow this shape: best lr ^ | /\ | / \ | / \ | / \ ------------> accuracy In this problem, using PBT with a population of 2-4 is sufficient to roughly approximate this lr schedule. Higher population sizes will yield faster convergence. Training will not converge without PBT. ",104,207,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def pbt_function(config): lr = config[""lr""] accuracy = 0.0 # end = 1000 start = 0 if session.get_checkpoint(): state = session.get_checkpoint().to_dict() accuracy = state[""acc""] start = state[""step""] midpoint = 100 # lr starts decreasing after acc > midpoint q_tolerance = 3 # penalize exceeding lr by more than this multiple noise_level = 2 # add gaussian noise to the acc increase # triangle wave: # - start at 0.001 @ t=0, # - peak at 0.01 @ t=midpoint, # - end at 0.001 @ t=midpoint * 2, for step in range(start, 100): if accuracy < midpoint: optimal_lr = 0.01 * accuracy / midpoint else: optimal_lr = 0.01 - 0.01 * (accuracy - midpoint) / midpoint optimal_lr = min(0.01, max(0.001, optimal_lr)) # compute accuracy increase q_err = max(lr, optimal_lr) / min(lr, optimal_lr) if q_err < q_tolerance: accuracy += (1.0 / q_err) * random.random() elif lr > optimal_lr: accuracy -= (q_err - q_tolerance) * random.random() accuracy += noise_level * np.random.normal() accuracy = max(0, accuracy) checkpoint = None if step % 3 == 0: checkpoint = Checkpoint.from_dict({""acc"": accuracy, ""step"": start}) session.report( { ""mean_accuracy"": accuracy, ""cur_lr"": lr, ""optimal_lr"": optimal_lr, # for debugging ""q_err"": q_err, # for debugging ""done"": accuracy > midpoint * 2, # this stops the training process }, checkpoint=checkpoint, ) ``` ###Assistant : Toy PBT problem for benchmarking adaptive learning rate. The goal is to optimize this trainable's accuracy. The accuracy increases fastest at the optimal lr, which is a function of the current accuracy. The optimal lr schedule for this problem is the triangle wave as follows. Note that many lr schedules for real models also follow this shape: best lr ^ | /\ | / \ | / \ | / \ ------------> accuracy In this problem, using PBT with a population of 2-4 is sufficient to roughly approximate this lr schedule. Higher population sizes will yield faster convergence. Training will not converge without PBT. " 1176,"def add_hedge_option(price, implied_volatility, strike, days, side): # Determine delta position given the option delta = calc_delta(price, implied_volatility, strike, days, 0, side) # Determine gamma position given the option gamma = calc_gamma(price, implied_volatility, strike, days, 0) # Determine vega position given the option vega = calc_vega(price, implied_volatility, strike, days, 0) return delta, gamma, vega ","Determine the delta, gamma and vega value of the portfolio and/or options. Parameters ---------- price: int The price. implied_volatility: float The implied volatility. strike: float The strike price. days: float The amount of days until expiration. Use annual notation thus a month would be 30 / 360. sign: int Whether you have a long (1) or short (-1) position Returns ------- delta: float gamma: float portfolio: float ",67,53,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add_hedge_option(price, implied_volatility, strike, days, side): # Determine delta position given the option delta = calc_delta(price, implied_volatility, strike, days, 0, side) # Determine gamma position given the option gamma = calc_gamma(price, implied_volatility, strike, days, 0) # Determine vega position given the option vega = calc_vega(price, implied_volatility, strike, days, 0) return delta, gamma, vega ``` ###Assistant : Determine the delta, gamma and vega value of the portfolio and/or options. Parameters ---------- price: int The price. implied_volatility: float The implied volatility. strike: float The strike price. days: float The amount of days until expiration. Use annual notation thus a month would be 30 / 360. sign: int Whether you have a long (1) or short (-1) position Returns ------- delta: float gamma: float portfolio: float " 1177,"def header_encode(self, string): codec = self.output_codec or 'us-ascii' header_bytes = _encode(string, codec) # 7bit/8bit encodings return the string unchanged (modulo conversions) encoder_module = self._get_encoder(header_bytes) if encoder_module is None: return string return encoder_module.header_encode(header_bytes, codec) ","Header-encode a string by converting it first to bytes. The type of encoding (base64 or quoted-printable) will be based on this charset's `header_encoding`. :param string: A unicode string for the header. It must be possible to encode this string to bytes using the character set's output codec. :return: The encoded string, with RFC 2047 chrome. ",55,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def header_encode(self, string): codec = self.output_codec or 'us-ascii' header_bytes = _encode(string, codec) # 7bit/8bit encodings return the string unchanged (modulo conversions) encoder_module = self._get_encoder(header_bytes) if encoder_module is None: return string return encoder_module.header_encode(header_bytes, codec) ``` ###Assistant : Header-encode a string by converting it first to bytes. The type of encoding (base64 or quoted-printable) will be based on this charset's `header_encoding`. :param string: A unicode string for the header. It must be possible to encode this string to bytes using the character set's output codec. :return: The encoded string, with RFC 2047 chrome. " 1178,"def data_dict(self, records): self.version = records[0].replace(""File-Date:"", """").strip() dic = {} dic[""deprecated""] = {} for label in [ ""language"", ""extlang"", ""script"", ""region"", ""variant"", ""redundant"", ""grandfathered"", ]: dic[""deprecated""][label] = {} for record in records[1:]: fields = [field.split("": "") for field in record.strip().split(""\n"")] typ = fields[0][1] tag = fields[1][1] if typ not in dic: dic[typ] = {} subfields = {} for field in fields[2:]: if len(field) == 2: [key, val] = field if key not in subfields: subfields[key] = [val] else: # multiple value subfields[key].append(val) else: # multiline field subfields[key][-1] += "" "" + field[0].strip() if ( ""Deprecated"" not in record and typ == ""language"" and key == ""Description"" ): self.langcode[subfields[key][-1]] = tag for key in subfields: if len(subfields[key]) == 1: # single value subfields[key] = subfields[key][0] if ""Deprecated"" in record: dic[""deprecated""][typ][tag] = subfields else: dic[typ][tag] = subfields return dic ",Convert the BCP-47 language subtag registry to a dictionary,9,137,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def data_dict(self, records): self.version = records[0].replace(""File-Date:"", """").strip() dic = {} dic[""deprecated""] = {} for label in [ ""language"", ""extlang"", ""script"", ""region"", ""variant"", ""redundant"", ""grandfathered"", ]: dic[""deprecated""][label] = {} for record in records[1:]: fields = [field.split("": "") for field in record.strip().split(""\n"")] typ = fields[0][1] tag = fields[1][1] if typ not in dic: dic[typ] = {} subfields = {} for field in fields[2:]: if len(field) == 2: [key, val] = field if key not in subfields: subfields[key] = [val] else: # multiple value subfields[key].append(val) else: # multiline field subfields[key][-1] += "" "" + field[0].strip() if ( ""Deprecated"" not in record and typ == ""language"" and key == ""Description"" ): self.langcode[subfields[key][-1]] = tag for key in subfields: if len(subfields[key]) == 1: # single value subfields[key] = subfields[key][0] if ""Deprecated"" in record: dic[""deprecated""][typ][tag] = subfields else: dic[typ][tag] = subfields return dic ``` ###Assistant : Convert the BCP-47 language subtag registry to a dictionary " 1179,"def test_async_add_hass_job_schedule_partial_coroutinefunction(event_loop): hass = MagicMock(loop=MagicMock(wraps=event_loop)) ",Test that we schedule partial coros and add jobs to the job pool.,13,5,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_async_add_hass_job_schedule_partial_coroutinefunction(event_loop): hass = MagicMock(loop=MagicMock(wraps=event_loop)) ``` ###Assistant : Test that we schedule partial coros and add jobs to the job pool. " 1180,"def to_label_objs(self, answer_type=""generative""): df_labels = self.df[[""id"", ""question"", ""answer_text"", ""answer_start"", ""context"", ""document_id""]] record_dicts = df_labels.to_dict(""records"") labels = [ Label( query=record[""question""], answer=Answer(answer=record[""answer_text""], answer_type=answer_type), is_correct_answer=True, is_correct_document=True, id=record[""id""], origin=record.get(""origin"", ""gold-label""), document=Document(content=record.get(""context""), id=str(record[""document_id""])), ) for record in record_dicts ] return labels ",Export all labels stored in this object to haystack.Label objects,10,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def to_label_objs(self, answer_type=""generative""): df_labels = self.df[[""id"", ""question"", ""answer_text"", ""answer_start"", ""context"", ""document_id""]] record_dicts = df_labels.to_dict(""records"") labels = [ Label( query=record[""question""], answer=Answer(answer=record[""answer_text""], answer_type=answer_type), is_correct_answer=True, is_correct_document=True, id=record[""id""], origin=record.get(""origin"", ""gold-label""), document=Document(content=record.get(""context""), id=str(record[""document_id""])), ) for record in record_dicts ] return labels ``` ###Assistant : Export all labels stored in this object to haystack.Label objects " 1181,"def test_api_get_storage_path(self): response = self.client.get(""/api/storage_paths/"", format=""json"") self.assertEqual(response.status_code, 200) self.assertEqual(response.status_code, 200) self.assertEqual(response.data[""count""], 1) resp_storage_path = response.data[""results""][0] self.assertEqual(resp_storage_path[""id""], self.sp1.id) self.assertEqual(resp_storage_path[""path""], self.sp1.path) "," GIVEN: - API request to get all storage paths WHEN: - API is called THEN: - Existing storage paths are returned ",21,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_api_get_storage_path(self): response = self.client.get(""/api/storage_paths/"", format=""json"") self.assertEqual(response.status_code, 200) self.assertEqual(response.status_code, 200) self.assertEqual(response.data[""count""], 1) resp_storage_path = response.data[""results""][0] self.assertEqual(resp_storage_path[""id""], self.sp1.id) self.assertEqual(resp_storage_path[""path""], self.sp1.path) ``` ###Assistant : GIVEN: - API request to get all storage paths WHEN: - API is called THEN: - Existing storage paths are returned " 1182,"def test_normalize_metric_warning(): msg = ""Normalized stress is not supported"" sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) with pytest.raises(ValueError, match=msg): mds.smacof(sim, metric=True, normalized_stress=True) "," Test that a UserWarning is emitted when using normalized stress with metric-MDS. ",12,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_normalize_metric_warning(): msg = ""Normalized stress is not supported"" sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) with pytest.raises(ValueError, match=msg): mds.smacof(sim, metric=True, normalized_stress=True) ``` ###Assistant : Test that a UserWarning is emitted when using normalized stress with metric-MDS. " 1183,"def delete_and_patch_duplicate_bins(): duplicate_bins = frappe.db.sql(, as_dict=1) for duplicate_bin in duplicate_bins: existing_bins = frappe.get_list(""Bin"", filters={ ""item_code"": duplicate_bin.item_code, ""warehouse"": duplicate_bin.warehouse }, fields=[""name""], order_by=""creation"",) # keep last one existing_bins.pop() for broken_bin in existing_bins: frappe.delete_doc(""Bin"", broken_bin.name) qty_dict = { ""reserved_qty"": get_reserved_qty(duplicate_bin.item_code, duplicate_bin.warehouse), ""indented_qty"": get_indented_qty(duplicate_bin.item_code, duplicate_bin.warehouse), ""ordered_qty"": get_ordered_qty(duplicate_bin.item_code, duplicate_bin.warehouse), ""planned_qty"": get_planned_qty(duplicate_bin.item_code, duplicate_bin.warehouse), ""actual_qty"": get_balance_qty_from_sle(duplicate_bin.item_code, duplicate_bin.warehouse) } update_bin_qty(duplicate_bin.item_code, duplicate_bin.warehouse, qty_dict) "," SELECT item_code, warehouse, count(*) as bin_count FROM tabBin GROUP BY item_code, warehouse HAVING bin_count > 1 ",16,54,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def delete_and_patch_duplicate_bins(): duplicate_bins = frappe.db.sql(, as_dict=1) for duplicate_bin in duplicate_bins: existing_bins = frappe.get_list(""Bin"", filters={ ""item_code"": duplicate_bin.item_code, ""warehouse"": duplicate_bin.warehouse }, fields=[""name""], order_by=""creation"",) # keep last one existing_bins.pop() for broken_bin in existing_bins: frappe.delete_doc(""Bin"", broken_bin.name) qty_dict = { ""reserved_qty"": get_reserved_qty(duplicate_bin.item_code, duplicate_bin.warehouse), ""indented_qty"": get_indented_qty(duplicate_bin.item_code, duplicate_bin.warehouse), ""ordered_qty"": get_ordered_qty(duplicate_bin.item_code, duplicate_bin.warehouse), ""planned_qty"": get_planned_qty(duplicate_bin.item_code, duplicate_bin.warehouse), ""actual_qty"": get_balance_qty_from_sle(duplicate_bin.item_code, duplicate_bin.warehouse) } update_bin_qty(duplicate_bin.item_code, duplicate_bin.warehouse, qty_dict) ``` ###Assistant : SELECT item_code, warehouse, count(*) as bin_count FROM tabBin GROUP BY item_code, warehouse HAVING bin_count > 1 " 1184,"async def test_stop_long_running_job(job_sdk_client): agent_client, head_client = job_sdk_client with tempfile.TemporaryDirectory() as tmp_dir: path = Path(tmp_dir) driver_script = test_script_file = path / ""test_script.py"" with open(test_script_file, ""w+"") as file: file.write(driver_script) runtime_env = {""working_dir"": tmp_dir} runtime_env = upload_working_dir_if_needed(runtime_env, tmp_dir, logger=logger) runtime_env = RuntimeEnv(**runtime_env).to_dict() request = validate_request_type( {""runtime_env"": runtime_env, ""entrypoint"": ""python test_script.py""}, JobSubmitRequest, ) submit_result = await agent_client.submit_job_internal(request) job_id = submit_result.submission_id resp = await agent_client.stop_job_internal(job_id) assert resp.stopped is True wait_for_condition( partial( _check_job, client=head_client, job_id=job_id, status=JobStatus.STOPPED ), timeout=10, ) @pytest.mark.asyncio"," Submit a job that runs for a while and stop it in the middle. print('Hello !') import time time.sleep(300) # This should never finish raise RuntimeError('Intentionally failed.') ",27,74,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_stop_long_running_job(job_sdk_client): agent_client, head_client = job_sdk_client with tempfile.TemporaryDirectory() as tmp_dir: path = Path(tmp_dir) driver_script = test_script_file = path / ""test_script.py"" with open(test_script_file, ""w+"") as file: file.write(driver_script) runtime_env = {""working_dir"": tmp_dir} runtime_env = upload_working_dir_if_needed(runtime_env, tmp_dir, logger=logger) runtime_env = RuntimeEnv(**runtime_env).to_dict() request = validate_request_type( {""runtime_env"": runtime_env, ""entrypoint"": ""python test_script.py""}, JobSubmitRequest, ) submit_result = await agent_client.submit_job_internal(request) job_id = submit_result.submission_id resp = await agent_client.stop_job_internal(job_id) assert resp.stopped is True wait_for_condition( partial( _check_job, client=head_client, job_id=job_id, status=JobStatus.STOPPED ), timeout=10, ) @pytest.mark.asyncio ``` ###Assistant : Submit a job that runs for a while and stop it in the middle. print('Hello !') import time time.sleep(300) # This should never finish raise RuntimeError('Intentionally failed.') " 1185,"def print_help(self): source_txt = CRYPTO_SOURCES.get(self.source, ""?"") if self.source != """" else """" help_text = f console.print(text=help_text, menu=""Stocks - Due Diligence"") ","Print help[cmds] load load a specific cryptocurrency for analysis [param]Coin: [/param]{self.current_coin} [param]Source: [/param]{source_txt} [src]Glassnode[/src] active active addresses nonzero addresses with non-zero balances change 30d change of supply held on exchange wallets eb total balance held on exchanges (in percentage and units) [src]Coinglass[/src] oi open interest per exchange [src]CoinPaprika[/src] basic basic information about loaded coin ps price and supply related metrics for loaded coin mkt all markets for loaded coin ex all exchanges where loaded coin is listed twitter tweets for loaded coin events events related to loaded coin [src]CoinGecko[/src] info basic information about loaded coin market market stats about loaded coin ath all time high related stats for loaded coin atl all time low related stats for loaded coin web found websites for loaded coin e.g forum, homepage social social portals urls for loaded coin, e.g reddit, twitter score different kind of scores for loaded coin, e.g developer score, sentiment score dev github, bitbucket coin development statistics bc links to blockchain explorers for loaded coin [src]Binance[/src] binbook show order book balance show coin balance [src]Coinbase[/src] cbbook show order book trades show last trades stats show coin stats[/cmds] ",187,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def print_help(self): source_txt = CRYPTO_SOURCES.get(self.source, ""?"") if self.source != """" else """" help_text = f console.print(text=help_text, menu=""Stocks - Due Diligence"") ``` ###Assistant : Print help[cmds] load load a specific cryptocurrency for analysis [param]Coin: [/param]{self.current_coin} [param]Source: [/param]{source_txt} [src]Glassnode[/src] active active addresses nonzero addresses with non-zero balances change 30d change of supply held on exchange wallets eb total balance held on exchanges (in percentage and units) [src]Coinglass[/src] oi open interest per exchange [src]CoinPaprika[/src] basic basic information about loaded coin ps price and supply related metrics for loaded coin mkt all markets for loaded coin ex all exchanges where loaded coin is listed twitter tweets for loaded coin events events related to loaded coin [src]CoinGecko[/src] info basic information about loaded coin market market stats about loaded coin ath all time high related stats for loaded coin atl all time low related stats for loaded coin web found websites for loaded coin e.g forum, homepage social social portals urls for loaded coin, e.g reddit, twitter score different kind of scores for loaded coin, e.g developer score, sentiment score dev github, bitbucket coin development statistics bc links to blockchain explorers for loaded coin [src]Binance[/src] binbook show order book balance show coin balance [src]Coinbase[/src] cbbook show order book trades show last trades stats show coin stats[/cmds] " 1186,"def test_get_cached_repo_files_with_all_files(self): responses.add( method=responses.GET, url=f""https://api.github.com/repos/{self.repo.name}/git/trees/master?recursive=1"", status=200, json={ ""tree"": [ {""type"": ""blob"", ""path"": ""src/foo.py""}, {""type"": ""blob"", ""path"": ""README""}, ] }, ) repo_key = f""github:repo:{self.repo.name}:all"" assert cache.get(repo_key) is None with mock.patch(""sentry.integrations.github.client.get_jwt"", return_value=b""jwt_token_1""): files = self.client.get_cached_repo_files(self.repo.name, ""master"") assert files == [""src/foo.py""] ",Fetch files for repo. All files rather than just source code files,12,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_cached_repo_files_with_all_files(self): responses.add( method=responses.GET, url=f""https://api.github.com/repos/{self.repo.name}/git/trees/master?recursive=1"", status=200, json={ ""tree"": [ {""type"": ""blob"", ""path"": ""src/foo.py""}, {""type"": ""blob"", ""path"": ""README""}, ] }, ) repo_key = f""github:repo:{self.repo.name}:all"" assert cache.get(repo_key) is None with mock.patch(""sentry.integrations.github.client.get_jwt"", return_value=b""jwt_token_1""): files = self.client.get_cached_repo_files(self.repo.name, ""master"") assert files == [""src/foo.py""] ``` ###Assistant : Fetch files for repo. All files rather than just source code files " 1187,"def check_migrations(self): from django.db.migrations.executor import MigrationExecutor try: executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS]) except ImproperlyConfigured: # No databases are configured (or the dummy one) return plan = executor.migration_plan(executor.loader.graph.leaf_nodes()) if plan: apps_waiting_migration = sorted( {migration.app_label for migration, backwards in plan} ) self.stdout.write( self.style.NOTICE( ""\nYou have %(unapplied_migration_count)s unapplied migration(s). "" ""Your project may not work properly until you apply the "" ""migrations for app(s): %(apps_waiting_migration)s."" % { ""unapplied_migration_count"": len(plan), ""apps_waiting_migration"": "", "".join(apps_waiting_migration), } ) ) self.stdout.write( self.style.NOTICE(""Run 'python manage.py migrate' to apply them."") ) "," Print a warning if the set of migrations on disk don't match the migrations in the database. ",17,79,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_migrations(self): from django.db.migrations.executor import MigrationExecutor try: executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS]) except ImproperlyConfigured: # No databases are configured (or the dummy one) return plan = executor.migration_plan(executor.loader.graph.leaf_nodes()) if plan: apps_waiting_migration = sorted( {migration.app_label for migration, backwards in plan} ) self.stdout.write( self.style.NOTICE( ""\nYou have %(unapplied_migration_count)s unapplied migration(s). "" ""Your project may not work properly until you apply the "" ""migrations for app(s): %(apps_waiting_migration)s."" % { ""unapplied_migration_count"": len(plan), ""apps_waiting_migration"": "", "".join(apps_waiting_migration), } ) ) self.stdout.write( self.style.NOTICE(""Run 'python manage.py migrate' to apply them."") ) ``` ###Assistant : Print a warning if the set of migrations on disk don't match the migrations in the database. " 1188,"def find_induced_nodes(G, s, t, treewidth_bound=sys.maxsize): if not is_chordal(G): raise nx.NetworkXError(""Input graph is not chordal."") H = nx.Graph(G) H.add_edge(s, t) induced_nodes = set() triplet = _find_chordality_breaker(H, s, treewidth_bound) while triplet: (u, v, w) = triplet induced_nodes.update(triplet) for n in triplet: if n != s: H.add_edge(s, n) triplet = _find_chordality_breaker(H, s, treewidth_bound) if induced_nodes: # Add t and the second node in the induced path from s to t. induced_nodes.add(t) for u in G[s]: if len(induced_nodes & set(G[u])) == 2: induced_nodes.add(u) break return induced_nodes ","Returns the set of induced nodes in the path from s to t. Parameters ---------- G : graph A chordal NetworkX graph s : node Source node to look for induced nodes t : node Destination node to look for induced nodes treewidth_bound: float Maximum treewidth acceptable for the graph H. The search for induced nodes will end as soon as the treewidth_bound is exceeded. Returns ------- induced_nodes : Set of nodes The set of induced nodes in the path from s to t in G Raises ------ NetworkXError The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. If the input graph is an instance of one of these classes, a :exc:`NetworkXError` is raised. The algorithm can only be applied to chordal graphs. If the input graph is found to be non-chordal, a :exc:`NetworkXError` is raised. Examples -------- >>> G = nx.Graph() >>> G = nx.generators.classic.path_graph(10) >>> induced_nodes = nx.find_induced_nodes(G, 1, 9, 2) >>> sorted(induced_nodes) [1, 2, 3, 4, 5, 6, 7, 8, 9] Notes ----- G must be a chordal graph and (s,t) an edge that is not in G. If a treewidth_bound is provided, the search for induced nodes will end as soon as the treewidth_bound is exceeded. The algorithm is inspired by Algorithm 4 in [1]_. A formal definition of induced node can also be found on that reference. References ---------- .. [1] Learning Bounded Treewidth Bayesian Networks. Gal Elidan, Stephen Gould; JMLR, 9(Dec):2699--2731, 2008. http://jmlr.csail.mit.edu/papers/volume9/elidan08a/elidan08a.pdf ",239,82,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_induced_nodes(G, s, t, treewidth_bound=sys.maxsize): if not is_chordal(G): raise nx.NetworkXError(""Input graph is not chordal."") H = nx.Graph(G) H.add_edge(s, t) induced_nodes = set() triplet = _find_chordality_breaker(H, s, treewidth_bound) while triplet: (u, v, w) = triplet induced_nodes.update(triplet) for n in triplet: if n != s: H.add_edge(s, n) triplet = _find_chordality_breaker(H, s, treewidth_bound) if induced_nodes: # Add t and the second node in the induced path from s to t. induced_nodes.add(t) for u in G[s]: if len(induced_nodes & set(G[u])) == 2: induced_nodes.add(u) break return induced_nodes ``` ###Assistant : Returns the set of induced nodes in the path from s to t. Parameters ---------- G : graph A chordal NetworkX graph s : node Source node to look for induced nodes t : node Destination node to look for induced nodes treewidth_bound: float Maximum treewidth acceptable for the graph H. The search for induced nodes will end as soon as the treewidth_bound is exceeded. Returns ------- induced_nodes : Set of nodes The set of induced nodes in the path from s to t in G Raises ------ NetworkXError The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. If the input graph is an instance of one of these classes, a :exc:`NetworkXError` is raised. The algorithm can only be applied to chordal graphs. If the input graph is found to be non-chordal, a :exc:`NetworkXError` is raised. Examples -------- >>> G = nx.Graph() >>> G = nx.generators.classic.path_graph(10) >>> induced_nodes = nx.find_induced_nodes(G, 1, 9, 2) >>> sorted(induced_nodes) [1, 2, 3, 4, 5, 6, 7, 8, 9] Notes ----- G must be a chordal graph and (s,t) an edge that is not in G. If a treewidth_bound is provided, the search for induced nodes will end as soon as the treewidth_bound is exceeded. The algorithm is inspired by Algorithm 4 in [1]_. A formal definition of induced node can also be found on that reference. References ---------- .. [1] Learning Bounded Treewidth Bayesian Networks. Gal Elidan, Stephen Gould; JMLR, 9(Dec):2699--2731, 2008. http://jmlr.csail.mit.edu/papers/volume9/elidan08a/elidan08a.pdf " 1189,"def any_skipna_inferred_dtype(request): inferred_dtype, values = request.param values = np.array(values, dtype=object) # object dtype to avoid casting # correctness of inference tested in tests/dtypes/test_inference.py return inferred_dtype, values # ---------------------------------------------------------------- # Misc # ---------------------------------------------------------------- @pytest.fixture"," Fixture for all inferred dtypes from _libs.lib.infer_dtype The covered (inferred) types are: * 'string' * 'empty' * 'bytes' * 'mixed' * 'mixed-integer' * 'mixed-integer-float' * 'floating' * 'integer' * 'decimal' * 'boolean' * 'datetime64' * 'datetime' * 'date' * 'timedelta' * 'time' * 'period' * 'interval' Returns ------- inferred_dtype : str The string for the inferred dtype from _libs.lib.infer_dtype values : np.ndarray An array of object dtype that will be inferred to have `inferred_dtype` Examples -------- >>> from pandas._libs import lib >>> >>> def test_something(any_skipna_inferred_dtype): ... inferred_dtype, values = any_skipna_inferred_dtype ... # will pass ... assert lib.infer_dtype(values, skipna=True) == inferred_dtype ",100,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def any_skipna_inferred_dtype(request): inferred_dtype, values = request.param values = np.array(values, dtype=object) # object dtype to avoid casting # correctness of inference tested in tests/dtypes/test_inference.py return inferred_dtype, values # ---------------------------------------------------------------- # Misc # ---------------------------------------------------------------- @pytest.fixture ``` ###Assistant : Fixture for all inferred dtypes from _libs.lib.infer_dtype The covered (inferred) types are: * 'string' * 'empty' * 'bytes' * 'mixed' * 'mixed-integer' * 'mixed-integer-float' * 'floating' * 'integer' * 'decimal' * 'boolean' * 'datetime64' * 'datetime' * 'date' * 'timedelta' * 'time' * 'period' * 'interval' Returns ------- inferred_dtype : str The string for the inferred dtype from _libs.lib.infer_dtype values : np.ndarray An array of object dtype that will be inferred to have `inferred_dtype` Examples -------- >>> from pandas._libs import lib >>> >>> def test_something(any_skipna_inferred_dtype): ... inferred_dtype, values = any_skipna_inferred_dtype ... # will pass ... assert lib.infer_dtype(values, skipna=True) == inferred_dtype " 1190,"def downsample_2d(x, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'): r assert isinstance(factor, int) and factor >= 1 if k is None: k = [1] * factor k = _setup_kernel(k) * gain p = k.shape[0] - factor return _simple_upfirdn_2d(x, k, down=factor, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl) #---------------------------------------------------------------------------- ","Downsample a batch of 2D images with the given filter. Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and downsamples each image with the given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its shape is a multiple of the downsampling factor. Args: x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. factor: Integer downsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`). impl: Name of the implementation to use. Can be `""ref""` or `""cuda""` (default). Returns: Tensor of the shape `[N, C, H // factor, W // factor]` or `[N, H // factor, W // factor, C]`, and same datatype as `x`. ",181,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def downsample_2d(x, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'): r assert isinstance(factor, int) and factor >= 1 if k is None: k = [1] * factor k = _setup_kernel(k) * gain p = k.shape[0] - factor return _simple_upfirdn_2d(x, k, down=factor, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl) #---------------------------------------------------------------------------- ``` ###Assistant : Downsample a batch of 2D images with the given filter. Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and downsamples each image with the given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its shape is a multiple of the downsampling factor. Args: x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. factor: Integer downsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`). impl: Name of the implementation to use. Can be `""ref""` or `""cuda""` (default). Returns: Tensor of the shape `[N, C, H // factor, W // factor]` or `[N, H // factor, W // factor, C]`, and same datatype as `x`. " 1191,"def _check_deprecated_resample_kwargs(kwargs, origin): # Deprecation warning of `base` and `loffset` since v1.1.0: # we are raising the warning here to be able to set the `stacklevel` # properly since we need to raise the `base` and `loffset` deprecation # warning from three different cases: # core/generic.py::NDFrame.resample # core/groupby/groupby.py::GroupBy.resample # core/groupby/grouper.py::Grouper # raising these warnings from TimeGrouper directly would fail the test: # tests/resample/test_deprecated.py::test_deprecating_on_loffset_and_base if kwargs.get(""base"", None) is not None: warnings.warn( ""'base' in .resample() and in Grouper() is deprecated.\n"" ""The new arguments that you should use are 'offset' or 'origin'.\n"" '\n>>> df.resample(freq=""3s"", base=2)\n' ""\nbecomes:\n"" '\n>>> df.resample(freq=""3s"", offset=""2s"")\n', FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) if kwargs.get(""loffset"", None) is not None: warnings.warn( ""'loffset' in .resample() and in Grouper() is deprecated.\n"" '\n>>> df.resample(freq=""3s"", loffset=""8H"")\n' ""\nbecomes:\n"" ""\n>>> from pandas.tseries.frequencies import to_offset"" '\n>>> df = df.resample(freq=""3s"").mean()' '\n>>> df.index = df.index.to_timestamp() + to_offset(""8H"")\n', FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) "," Check for use of deprecated parameters in ``resample`` and related functions. Raises the appropriate warnings if these parameters are detected. Only sets an approximate ``stacklevel`` for the warnings (see #37603, #36629). Parameters ---------- kwargs : dict Dictionary of keyword arguments to check for deprecated parameters. origin : object From where this function is being called; either Grouper or TimeGrouper. Used to determine an approximate stacklevel. ",65,136,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_deprecated_resample_kwargs(kwargs, origin): # Deprecation warning of `base` and `loffset` since v1.1.0: # we are raising the warning here to be able to set the `stacklevel` # properly since we need to raise the `base` and `loffset` deprecation # warning from three different cases: # core/generic.py::NDFrame.resample # core/groupby/groupby.py::GroupBy.resample # core/groupby/grouper.py::Grouper # raising these warnings from TimeGrouper directly would fail the test: # tests/resample/test_deprecated.py::test_deprecating_on_loffset_and_base if kwargs.get(""base"", None) is not None: warnings.warn( ""'base' in .resample() and in Grouper() is deprecated.\n"" ""The new arguments that you should use are 'offset' or 'origin'.\n"" '\n>>> df.resample(freq=""3s"", base=2)\n' ""\nbecomes:\n"" '\n>>> df.resample(freq=""3s"", offset=""2s"")\n', FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) if kwargs.get(""loffset"", None) is not None: warnings.warn( ""'loffset' in .resample() and in Grouper() is deprecated.\n"" '\n>>> df.resample(freq=""3s"", loffset=""8H"")\n' ""\nbecomes:\n"" ""\n>>> from pandas.tseries.frequencies import to_offset"" '\n>>> df = df.resample(freq=""3s"").mean()' '\n>>> df.index = df.index.to_timestamp() + to_offset(""8H"")\n', FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) ``` ###Assistant : Check for use of deprecated parameters in ``resample`` and related functions. Raises the appropriate warnings if these parameters are detected. Only sets an approximate ``stacklevel`` for the warnings (see #37603, #36629). Parameters ---------- kwargs : dict Dictionary of keyword arguments to check for deprecated parameters. origin : object From where this function is being called; either Grouper or TimeGrouper. Used to determine an approximate stacklevel. " 1192,"def get_conn(self) -> DataCatalogClient: if not self._client: self._client = DataCatalogClient(credentials=self._get_credentials(), client_info=CLIENT_INFO) return self._client ",Retrieves client library object that allow access to Cloud Data Catalog service.,12,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_conn(self) -> DataCatalogClient: if not self._client: self._client = DataCatalogClient(credentials=self._get_credentials(), client_info=CLIENT_INFO) return self._client ``` ###Assistant : Retrieves client library object that allow access to Cloud Data Catalog service. " 1193,"def get_api_client(self) -> ApiClient: try: return new_client_from_config_dict( config_dict=self.config, context=self.context ) except ConfigException: raise "," Returns an instance of the kubernetes api client with a specific context ",12,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_api_client(self) -> ApiClient: try: return new_client_from_config_dict( config_dict=self.config, context=self.context ) except ConfigException: raise ``` ###Assistant : Returns an instance of the kubernetes api client with a specific context " 1194,"def cuts(self) -> list[list[int]]: if self._cuts is not None: return self._cuts width = self.width height = self.height screen_region = Region(0, 0, width, height) cuts_sets = [{0, width} for _ in range(height)] if self.map is not None: for region, order, clip in self.map.values(): region = region.intersection(clip) if region and (region in screen_region): region_cuts = region.x_extents for y in region.y_range: cuts_sets[y].update(region_cuts) # Sort the cuts for each line self._cuts = [sorted(cut_set) for cut_set in cuts_sets] return self._cuts ","Get vertical cuts. A cut is every point on a line where a widget starts or ends. Returns: list[list[int]]: A list of cuts for every line. ",26,75,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cuts(self) -> list[list[int]]: if self._cuts is not None: return self._cuts width = self.width height = self.height screen_region = Region(0, 0, width, height) cuts_sets = [{0, width} for _ in range(height)] if self.map is not None: for region, order, clip in self.map.values(): region = region.intersection(clip) if region and (region in screen_region): region_cuts = region.x_extents for y in region.y_range: cuts_sets[y].update(region_cuts) # Sort the cuts for each line self._cuts = [sorted(cut_set) for cut_set in cuts_sets] return self._cuts ``` ###Assistant : Get vertical cuts. A cut is every point on a line where a widget starts or ends. Returns: list[list[int]]: A list of cuts for every line. " 1195,"def is_subclassed(layer): return ( layer.__module__.find(""keras.engine"") == -1 and layer.__module__.find(""keras.layers"") == -1 ) ",Returns True if the object is a subclassed layer or subclassed model.,12,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_subclassed(layer): return ( layer.__module__.find(""keras.engine"") == -1 and layer.__module__.find(""keras.layers"") == -1 ) ``` ###Assistant : Returns True if the object is a subclassed layer or subclassed model. " 1196,"def _mat(self): sympy_deprecation_warning( , deprecated_since_version=""1.9"", active_deprecations_target=""deprecated-private-matrix-attributes"" ) return self.flat() "," The private _mat attribute of Matrix is deprecated. Use the .flat() method instead. ",13,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _mat(self): sympy_deprecation_warning( , deprecated_since_version=""1.9"", active_deprecations_target=""deprecated-private-matrix-attributes"" ) return self.flat() ``` ###Assistant : The private _mat attribute of Matrix is deprecated. Use the .flat() method instead. " 1197,"def test_missing_required_field(self): cf3 = CustomField(type=CustomFieldTypeChoices.TYPE_TEXT, name='baz', required=True) cf3.save() cf3.content_types.set([ContentType.objects.get_for_model(Site)]) site = Site(name='Test Site', slug='test-site') # Set custom field data with a required field omitted site.custom_field_data['foo'] = 'abc' with self.assertRaises(ValidationError): site.clean() site.custom_field_data['baz'] = 'def' site.clean() "," Check that a ValidationError is raised if any required custom fields are not present. ",14,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_missing_required_field(self): cf3 = CustomField(type=CustomFieldTypeChoices.TYPE_TEXT, name='baz', required=True) cf3.save() cf3.content_types.set([ContentType.objects.get_for_model(Site)]) site = Site(name='Test Site', slug='test-site') # Set custom field data with a required field omitted site.custom_field_data['foo'] = 'abc' with self.assertRaises(ValidationError): site.clean() site.custom_field_data['baz'] = 'def' site.clean() ``` ###Assistant : Check that a ValidationError is raised if any required custom fields are not present. " 1198,"def test_basic(self): context = Context({}) template = expected = self.assertHTMLEqual(expected, Template(template).render(context)) "," {% load wagtailadmin_tags %} {% fragment as my_fragment %}

    Hello, World

    {% endfragment %} Text coming after: {{ my_fragment }} Text coming after:

    Hello, World

    ",25,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_basic(self): context = Context({}) template = expected = self.assertHTMLEqual(expected, Template(template).render(context)) ``` ###Assistant : {% load wagtailadmin_tags %} {% fragment as my_fragment %}

    Hello, World

    {% endfragment %} Text coming after: {{ my_fragment }} Text coming after:

    Hello, World

    " 1199,"def to_dense(self) -> Series: from pandas import Series return Series( self._parent.array.to_dense(), index=self._parent.index, name=self._parent.name, ) "," Convert a Series from sparse values to dense. .. versionadded:: 0.25.0 Returns ------- Series: A Series with the same values, stored as a dense array. Examples -------- >>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0])) >>> series 0 0 1 1 2 0 dtype: Sparse[int64, 0] >>> series.sparse.to_dense() 0 0 1 1 2 0 dtype: int64 ",54,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def to_dense(self) -> Series: from pandas import Series return Series( self._parent.array.to_dense(), index=self._parent.index, name=self._parent.name, ) ``` ###Assistant : Convert a Series from sparse values to dense. .. versionadded:: 0.25.0 Returns ------- Series: A Series with the same values, stored as a dense array. Examples -------- >>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0])) >>> series 0 0 1 1 2 0 dtype: Sparse[int64, 0] >>> series.sparse.to_dense() 0 0 1 1 2 0 dtype: int64 " 1200,"def replaceHTMLEntity(t): return _htmlEntityMap.get(t.entity) # it's easy to get these comment structures wrong - they're very common, so may as well make them available cStyleComment = Combine(Regex(r""/\*(?:[^*]|\*(?!/))*"") + '*/').setName(""C style comment"") ""Comment of the form ``/* ... */``"" htmlComment = Regex(r"""").setName(""HTML comment"") ""Comment of the form ````"" restOfLine = Regex(r"".*"").leaveWhitespace().setName(""rest of line"") dblSlashComment = Regex(r""//(?:\\\n|[^\n])*"").setName(""// comment"") ""Comment of the form ``// ... (to end of line)``"" cppStyleComment = Combine(Regex(r""/\*(?:[^*]|\*(?!/))*"") + '*/' | dblSlashComment).setName(""C++ style comment"") ""Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`"" javaStyleComment = cppStyleComment ""Same as :class:`cppStyleComment`"" pythonStyleComment = Regex(r""#.*"").setName(""Python style comment"") ""Comment of the form ``# ... (to end of line)``"" _commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + Optional(Word("" \t"") + ~Literal("","") + ~LineEnd()))).streamline().setName(""commaItem"") commaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default="""")).setName(""commaSeparatedList"") # some other useful expressions - using lower-case class name since we are really using this as a namespace","Helper parser action to replace common HTML entities with their special characters(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`. ",34,141,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def replaceHTMLEntity(t): return _htmlEntityMap.get(t.entity) # it's easy to get these comment structures wrong - they're very common, so may as well make them available cStyleComment = Combine(Regex(r""/\*(?:[^*]|\*(?!/))*"") + '*/').setName(""C style comment"") ""Comment of the form ``/* ... */``"" htmlComment = Regex(r"""").setName(""HTML comment"") ""Comment of the form ````"" restOfLine = Regex(r"".*"").leaveWhitespace().setName(""rest of line"") dblSlashComment = Regex(r""//(?:\\\n|[^\n])*"").setName(""// comment"") ""Comment of the form ``// ... (to end of line)``"" cppStyleComment = Combine(Regex(r""/\*(?:[^*]|\*(?!/))*"") + '*/' | dblSlashComment).setName(""C++ style comment"") ""Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`"" javaStyleComment = cppStyleComment ""Same as :class:`cppStyleComment`"" pythonStyleComment = Regex(r""#.*"").setName(""Python style comment"") ""Comment of the form ``# ... (to end of line)``"" _commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + Optional(Word("" \t"") + ~Literal("","") + ~LineEnd()))).streamline().setName(""commaItem"") commaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default="""")).setName(""commaSeparatedList"") # some other useful expressions - using lower-case class name since we are really using this as a namespace ``` ###Assistant : Helper parser action to replace common HTML entities with their special characters(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`. " 1201,"def read_csv_with_nan(path, nan_percent=0.0): df = pd.read_csv(path) if nan_percent > 0: num_rows = len(df) for col in df.columns: for row in random.sample(range(num_rows), int(round(nan_percent * num_rows))): df[col].iloc[row] = np.nan return df ",Converts `nan_percent` of samples in each row of the CSV at `path` to NaNs.,14,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def read_csv_with_nan(path, nan_percent=0.0): df = pd.read_csv(path) if nan_percent > 0: num_rows = len(df) for col in df.columns: for row in random.sample(range(num_rows), int(round(nan_percent * num_rows))): df[col].iloc[row] = np.nan return df ``` ###Assistant : Converts `nan_percent` of samples in each row of the CSV at `path` to NaNs. " 1202,"def _upsample_2d(self, x, w=None, k=None, factor=2, gain=1): assert isinstance(factor, int) and factor >= 1 # Setup filter kernel. if k is None: k = [1] * factor # setup kernel k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) k = k * (gain * (factor**2)) if self.use_conv: convH = w.shape[2] convW = w.shape[3] inC = w.shape[1] p = (k.shape[0] - factor) - (convW - 1) stride = (factor, factor) # Determine data dimensions. stride = [1, 1, factor, factor] output_shape = ((x.shape[2] - 1) * factor + convH, (x.shape[3] - 1) * factor + convW) output_padding = ( output_shape[0] - (x.shape[2] - 1) * stride[0] - convH, output_shape[1] - (x.shape[3] - 1) * stride[1] - convW, ) assert output_padding[0] >= 0 and output_padding[1] >= 0 inC = w.shape[1] num_groups = x.shape[1] // inC # Transpose weights. w = paddle.reshape(w, (num_groups, -1, inC, convH, convW)) w = w[..., ::-1, ::-1].transpose([0, 2, 1, 3, 4]) w = paddle.reshape(w, (num_groups * inC, -1, convH, convW)) x = F.conv2d_transpose(x, w, stride=stride, output_padding=output_padding, padding=0) x = upfirdn2d_native(x, paddle.to_tensor(k), pad=((p + 1) // 2 + factor - 1, p // 2 + 1)) else: p = k.shape[0] - factor x = upfirdn2d_native(x, paddle.to_tensor(k), up=factor, pad=((p + 1) // 2 + factor - 1, p // 2)) return x ","Fused `upsample_2d()` followed by `Conv2d()`. Args: Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary: order. x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as `x`. ",139,219,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _upsample_2d(self, x, w=None, k=None, factor=2, gain=1): assert isinstance(factor, int) and factor >= 1 # Setup filter kernel. if k is None: k = [1] * factor # setup kernel k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) k = k * (gain * (factor**2)) if self.use_conv: convH = w.shape[2] convW = w.shape[3] inC = w.shape[1] p = (k.shape[0] - factor) - (convW - 1) stride = (factor, factor) # Determine data dimensions. stride = [1, 1, factor, factor] output_shape = ((x.shape[2] - 1) * factor + convH, (x.shape[3] - 1) * factor + convW) output_padding = ( output_shape[0] - (x.shape[2] - 1) * stride[0] - convH, output_shape[1] - (x.shape[3] - 1) * stride[1] - convW, ) assert output_padding[0] >= 0 and output_padding[1] >= 0 inC = w.shape[1] num_groups = x.shape[1] // inC # Transpose weights. w = paddle.reshape(w, (num_groups, -1, inC, convH, convW)) w = w[..., ::-1, ::-1].transpose([0, 2, 1, 3, 4]) w = paddle.reshape(w, (num_groups * inC, -1, convH, convW)) x = F.conv2d_transpose(x, w, stride=stride, output_padding=output_padding, padding=0) x = upfirdn2d_native(x, paddle.to_tensor(k), pad=((p + 1) // 2 + factor - 1, p // 2 + 1)) else: p = k.shape[0] - factor x = upfirdn2d_native(x, paddle.to_tensor(k), up=factor, pad=((p + 1) // 2 + factor - 1, p // 2)) return x ``` ###Assistant : Fused `upsample_2d()` followed by `Conv2d()`. Args: Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary: order. x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as `x`. " 1203,"def get_network_names(self) -> t.Optional[t.List[str]]: if self.networks is None: return None return sorted(self.networks) ",Return a list of the network names the container is attached to.,12,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_network_names(self) -> t.Optional[t.List[str]]: if self.networks is None: return None return sorted(self.networks) ``` ###Assistant : Return a list of the network names the container is attached to. " 1204,"def get_delivered_items_cost(): dn_items = frappe.db.sql( , as_dict=1, ) si_items = frappe.db.sql( , as_dict=1, ) dn_item_map = {} for item in dn_items: dn_item_map.setdefault(item.project, item.amount) for item in si_items: dn_item_map.setdefault(item.project, item.amount) return dn_item_map ","select dn.project, sum(dn_item.base_net_amount) as amount from `tabDelivery Note` dn, `tabDelivery Note Item` dn_item where dn.name = dn_item.parent and dn.docstatus = 1 and ifnull(dn.project, '') != '' group by dn.projectselect si.project, sum(si_item.base_net_amount) as amount from `tabSales Invoice` si, `tabSales Invoice Item` si_item where si.name = si_item.parent and si.docstatus = 1 and si.update_stock = 1 and si.is_pos = 1 and ifnull(si.project, '') != '' group by si.project",65,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_delivered_items_cost(): dn_items = frappe.db.sql( , as_dict=1, ) si_items = frappe.db.sql( , as_dict=1, ) dn_item_map = {} for item in dn_items: dn_item_map.setdefault(item.project, item.amount) for item in si_items: dn_item_map.setdefault(item.project, item.amount) return dn_item_map ``` ###Assistant : select dn.project, sum(dn_item.base_net_amount) as amount from `tabDelivery Note` dn, `tabDelivery Note Item` dn_item where dn.name = dn_item.parent and dn.docstatus = 1 and ifnull(dn.project, '') != '' group by dn.projectselect si.project, sum(si_item.base_net_amount) as amount from `tabSales Invoice` si, `tabSales Invoice Item` si_item where si.name = si_item.parent and si.docstatus = 1 and si.update_stock = 1 and si.is_pos = 1 and ifnull(si.project, '') != '' group by si.project " 1205,"def __new__(cls, *args, **kw_args): is_canon_bp = kw_args.get('is_canon_bp', False) args = list(map(_sympify, args)) free = [get_free_indices(arg) for arg in args] free = set(itertools.chain(*free)) #flatten free newargs = [] for arg in args: dum_this = set(get_dummy_indices(arg)) dum_other = [get_dummy_indices(a) for a in newargs] dum_other = set(itertools.chain(*dum_other)) #flatten dum_other free_this = set(get_free_indices(arg)) if len(dum_this.intersection(free)) > 0: exclude = free_this.union(free, dum_other) newarg = TensMul._dedupe_indices(arg, exclude, arg._index_structure) else: newarg = arg newargs.append(newarg) args = newargs # Flatten: args = [i for arg in args for i in (arg.args if isinstance(arg, (TensMul, Mul)) else [arg])] args, indices, free, dum = TensMul._tensMul_contract_indices(args, replace_indices=False) # Data for indices: index_types = [i.tensor_index_type for i in indices] index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp) obj = TensExpr.__new__(cls, *args) obj._indices = indices obj._index_types = index_types[:] obj._index_structure = index_structure obj._free = index_structure.free[:] obj._dum = index_structure.dum[:] obj._free_indices = {x[0] for x in obj.free} obj._rank = len(obj.free) obj._ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum) obj._coeff = S.One obj._is_canon_bp = is_canon_bp return obj index_types = property(lambda self: self._index_types) free = property(lambda self: self._free) dum = property(lambda self: self._dum) free_indices = property(lambda self: self._free_indices) rank = property(lambda self: self._rank) ext_rank = property(lambda self: self._ext_rank) "," If the internal dummy indices in one arg conflict with the free indices of the remaining args, we need to rename those internal dummy indices. ",25,186,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __new__(cls, *args, **kw_args): is_canon_bp = kw_args.get('is_canon_bp', False) args = list(map(_sympify, args)) free = [get_free_indices(arg) for arg in args] free = set(itertools.chain(*free)) #flatten free newargs = [] for arg in args: dum_this = set(get_dummy_indices(arg)) dum_other = [get_dummy_indices(a) for a in newargs] dum_other = set(itertools.chain(*dum_other)) #flatten dum_other free_this = set(get_free_indices(arg)) if len(dum_this.intersection(free)) > 0: exclude = free_this.union(free, dum_other) newarg = TensMul._dedupe_indices(arg, exclude, arg._index_structure) else: newarg = arg newargs.append(newarg) args = newargs # Flatten: args = [i for arg in args for i in (arg.args if isinstance(arg, (TensMul, Mul)) else [arg])] args, indices, free, dum = TensMul._tensMul_contract_indices(args, replace_indices=False) # Data for indices: index_types = [i.tensor_index_type for i in indices] index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp) obj = TensExpr.__new__(cls, *args) obj._indices = indices obj._index_types = index_types[:] obj._index_structure = index_structure obj._free = index_structure.free[:] obj._dum = index_structure.dum[:] obj._free_indices = {x[0] for x in obj.free} obj._rank = len(obj.free) obj._ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum) obj._coeff = S.One obj._is_canon_bp = is_canon_bp return obj index_types = property(lambda self: self._index_types) free = property(lambda self: self._free) dum = property(lambda self: self._dum) free_indices = property(lambda self: self._free_indices) rank = property(lambda self: self._rank) ext_rank = property(lambda self: self._ext_rank) ``` ###Assistant : If the internal dummy indices in one arg conflict with the free indices of the remaining args, we need to rename those internal dummy indices. " 1206,"def test_context_for_crash_rate_alert(self): status = TriggerStatus.ACTIVE incident = self.create_incident() alert_rule = self.create_alert_rule( aggregate=""percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate"" ) alert_rule_trigger = self.create_alert_rule_trigger(alert_rule) action = self.create_alert_rule_trigger_action( alert_rule_trigger=alert_rule_trigger, triggered_for_incident=incident ) assert ( generate_incident_trigger_email_context( self.project, incident, action.alert_rule_trigger, status, IncidentStatus.CRITICAL )[""aggregate""] == ""percentage(sessions_crashed, sessions)"" ) "," Test that ensures the metric name for Crash rate alerts excludes the alias ",13,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_context_for_crash_rate_alert(self): status = TriggerStatus.ACTIVE incident = self.create_incident() alert_rule = self.create_alert_rule( aggregate=""percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate"" ) alert_rule_trigger = self.create_alert_rule_trigger(alert_rule) action = self.create_alert_rule_trigger_action( alert_rule_trigger=alert_rule_trigger, triggered_for_incident=incident ) assert ( generate_incident_trigger_email_context( self.project, incident, action.alert_rule_trigger, status, IncidentStatus.CRITICAL )[""aggregate""] == ""percentage(sessions_crashed, sessions)"" ) ``` ###Assistant : Test that ensures the metric name for Crash rate alerts excludes the alias " 1207,"def get_template(self, template_name, skip=None): tried = [] for origin in self.get_template_sources(template_name): if skip is not None and origin in skip: tried.append((origin, ""Skipped to avoid recursion"")) continue try: contents = self.get_contents(origin) except TemplateDoesNotExist: tried.append((origin, ""Source does not exist"")) continue else: return Template( contents, origin, origin.template_name, self.engine, ) raise TemplateDoesNotExist(template_name, tried=tried) "," Call self.get_template_sources() and return a Template object for the first template matching template_name. If skip is provided, ignore template origins in skip. This is used to avoid recursion during template extending. ",31,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_template(self, template_name, skip=None): tried = [] for origin in self.get_template_sources(template_name): if skip is not None and origin in skip: tried.append((origin, ""Skipped to avoid recursion"")) continue try: contents = self.get_contents(origin) except TemplateDoesNotExist: tried.append((origin, ""Source does not exist"")) continue else: return Template( contents, origin, origin.template_name, self.engine, ) raise TemplateDoesNotExist(template_name, tried=tried) ``` ###Assistant : Call self.get_template_sources() and return a Template object for the first template matching template_name. If skip is provided, ignore template origins in skip. This is used to avoid recursion during template extending. " 1208,"def from_config(cls, config): if ""learning_rate"" in config: if isinstance(config[""learning_rate""], dict): config[""learning_rate""] = learning_rate_schedule.deserialize( config[""learning_rate""] ) return cls(**config) base_optimizer_keyword_args = # pylint: disable=g-classes-have-attributes @keras_export(""keras.optimizers.experimental.Optimizer"", v1=[])","Creates an optimizer from its config. This method is the reverse of `get_config`, capable of instantiating the same optimizer from the config dictionary. Args: config: A Python dictionary, typically the output of get_config. Returns: An optimizer instance. name: String. The name to use for momentum accumulator weights created by the optimizer. clipnorm: Float. If set, the gradient of each weight is individually clipped so that its norm is no higher than this value. clipvalue: Float. If set, the gradient of each weight is clipped to be no higher than this value. global_clipnorm: Float. If set, the gradient of all weights is clipped so that their global norm is no higher than this value. use_ema: Boolean, defaults to False. If True, exponential moving average (EMA) is applied. EMA consists of computing an exponential moving average of the weights of the model (as the weight values change after each training batch), and periodically overwriting the weights with their moving average. ema_momentum: Float, defaults to 0.99. Only used if `use_ema=True`. This is the momentum to use when computing the EMA of the model's weights: `new_average = ema_momentum * old_average + (1 - ema_momentum) * current_variable_value`. ema_overwrite_frequency: Int or None, defaults to None. Only used if `use_ema=True`. Every `ema_overwrite_frequency` steps of iterations, we overwrite the model variable by its moving average. If None, the optimizer does not overwrite model variables in the middle of training, and you need to explicitly overwrite the variables at the end of training by calling `optimizer.finalize_variable_values()` (which updates the model variables in-place). When using the built-in `fit()` training loop, this happens automatically after the last epoch, and you don't need to do anything. jit_compile: Boolean, defaults to True. If True, the optimizer will use XLA compilation. If no GPU device is found, this flag will be ignored. **kwargs: keyword arguments only used for backward compatibility.",306,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def from_config(cls, config): if ""learning_rate"" in config: if isinstance(config[""learning_rate""], dict): config[""learning_rate""] = learning_rate_schedule.deserialize( config[""learning_rate""] ) return cls(**config) base_optimizer_keyword_args = # pylint: disable=g-classes-have-attributes @keras_export(""keras.optimizers.experimental.Optimizer"", v1=[]) ``` ###Assistant : Creates an optimizer from its config. This method is the reverse of `get_config`, capable of instantiating the same optimizer from the config dictionary. Args: config: A Python dictionary, typically the output of get_config. Returns: An optimizer instance. name: String. The name to use for momentum accumulator weights created by the optimizer. clipnorm: Float. If set, the gradient of each weight is individually clipped so that its norm is no higher than this value. clipvalue: Float. If set, the gradient of each weight is clipped to be no higher than this value. global_clipnorm: Float. If set, the gradient of all weights is clipped so that their global norm is no higher than this value. use_ema: Boolean, defaults to False. If True, exponential moving average (EMA) is applied. EMA consists of computing an exponential moving average of the weights of the model (as the weight values change after each training batch), and periodically overwriting the weights with their moving average. ema_momentum: Float, defaults to 0.99. Only used if `use_ema=True`. This is the momentum to use when computing the EMA of the model's weights: `new_average = ema_momentum * old_average + (1 - ema_momentum) * current_variable_value`. ema_overwrite_frequency: Int or None, defaults to None. Only used if `use_ema=True`. Every `ema_overwrite_frequency` steps of iterations, we overwrite the model variable by its moving average. If None, the optimizer does not overwrite model variables in the middle of training, and you need to explicitly overwrite the variables at the end of training by calling `optimizer.finalize_variable_values()` (which updates the model variables in-place). When using the built-in `fit()` training loop, this happens automatically after the last epoch, and you don't need to do anything. jit_compile: Boolean, defaults to True. If True, the optimizer will use XLA compilation. If no GPU device is found, this flag will be ignored. **kwargs: keyword arguments only used for backward compatibility. " 1209,"def close(self): # When application exit, system shuts down all handlers by # calling close method. Here we check if logger is already # closed to prevent uploading the log to remote storage multiple # times when `logging.shutdown` is called. if self.closed: return super().close() if not self.upload_on_close: return local_loc = os.path.join(self.local_base, self.log_relative_path) remote_loc = os.path.join(self.remote_base, self.log_relative_path) if os.path.exists(local_loc): # read log and remove old logs to get just the latest additions log = pathlib.Path(local_loc).read_text() self.s3_write(log, remote_loc) # Mark closed so we don't double write if close is called twice self.closed = True ",Close and upload local log file to remote storage S3.,10,92,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def close(self): # When application exit, system shuts down all handlers by # calling close method. Here we check if logger is already # closed to prevent uploading the log to remote storage multiple # times when `logging.shutdown` is called. if self.closed: return super().close() if not self.upload_on_close: return local_loc = os.path.join(self.local_base, self.log_relative_path) remote_loc = os.path.join(self.remote_base, self.log_relative_path) if os.path.exists(local_loc): # read log and remove old logs to get just the latest additions log = pathlib.Path(local_loc).read_text() self.s3_write(log, remote_loc) # Mark closed so we don't double write if close is called twice self.closed = True ``` ###Assistant : Close and upload local log file to remote storage S3. " 1210,"def safestring_in_template_exception(request): template = Template('{% extends """" %}') try: template.render(Context()) except Exception: return technical_500_response(request, *sys.exc_info()) "," Trigger an exception in the template machinery which causes a SafeString to be inserted as args[0] of the Exception. ",19,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def safestring_in_template_exception(request): template = Template('{% extends """" %}') try: template.render(Context()) except Exception: return technical_500_response(request, *sys.exc_info()) ``` ###Assistant : Trigger an exception in the template machinery which causes a SafeString to be inserted as args[0] of the Exception. " 1211,"async def test_unique_id_in_progress(hass, manager): mock_integration(hass, MockModule(""comp"")) mock_entity_platform(hass, ""config_flow.comp"", None) ",Test that we abort if there is already a flow in progress with same unique id.,16,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_unique_id_in_progress(hass, manager): mock_integration(hass, MockModule(""comp"")) mock_entity_platform(hass, ""config_flow.comp"", None) ``` ###Assistant : Test that we abort if there is already a flow in progress with same unique id. " 1212,"def test_simple(self) -> None: event_factory = self.hs.get_event_builder_factory() bob = ""@creator:test"" alice = ""@alice:test"" room_id = ""!room:test"" # Ensure that we have a rooms entry so that we generate the chain index. self.get_success( self.store.store_room( room_id=room_id, room_creator_user_id="""", is_public=True, room_version=RoomVersions.V6, ) ) create = self.get_success( event_factory.for_room_version( RoomVersions.V6, { ""type"": EventTypes.Create, ""state_key"": """", ""sender"": bob, ""room_id"": room_id, ""content"": {""tag"": ""create""}, }, ).build(prev_event_ids=[], auth_event_ids=[]) ) bob_join = self.get_success( event_factory.for_room_version( RoomVersions.V6, { ""type"": EventTypes.Member, ""state_key"": bob, ""sender"": bob, ""room_id"": room_id, ""content"": {""tag"": ""bob_join""}, }, ).build(prev_event_ids=[], auth_event_ids=[create.event_id]) ) power = self.get_success( event_factory.for_room_version( RoomVersions.V6, { ""type"": EventTypes.PowerLevels, ""state_key"": """", ""sender"": bob, ""room_id"": room_id, ""content"": {""tag"": ""power""}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, bob_join.event_id], ) ) alice_invite = self.get_success( event_factory.for_room_version( RoomVersions.V6, { ""type"": EventTypes.Member, ""state_key"": alice, ""sender"": bob, ""room_id"": room_id, ""content"": {""tag"": ""alice_invite""}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, bob_join.event_id, power.event_id], ) ) alice_join = self.get_success( event_factory.for_room_version( RoomVersions.V6, { ""type"": EventTypes.Member, ""state_key"": alice, ""sender"": alice, ""room_id"": room_id, ""content"": {""tag"": ""alice_join""}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, alice_invite.event_id, power.event_id], ) ) power_2 = self.get_success( event_factory.for_room_version( RoomVersions.V6, { ""type"": EventTypes.PowerLevels, ""state_key"": """", ""sender"": bob, ""room_id"": room_id, ""content"": {""tag"": ""power_2""}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, bob_join.event_id, power.event_id], ) ) bob_join_2 = self.get_success( event_factory.for_room_version( RoomVersions.V6, { ""type"": EventTypes.Member, ""state_key"": bob, ""sender"": bob, ""room_id"": room_id, ""content"": {""tag"": ""bob_join_2""}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, bob_join.event_id, power.event_id], ) ) alice_join2 = self.get_success( event_factory.for_room_version( RoomVersions.V6, { ""type"": EventTypes.Member, ""state_key"": alice, ""sender"": alice, ""room_id"": room_id, ""content"": {""tag"": ""alice_join2""}, }, ).build( prev_event_ids=[], auth_event_ids=[ create.event_id, alice_join.event_id, power_2.event_id, ], ) ) events = [ create, bob_join, power, alice_invite, alice_join, bob_join_2, power_2, alice_join2, ] expected_links = [ (bob_join, create), (power, create), (power, bob_join), (alice_invite, create), (alice_invite, power), (alice_invite, bob_join), (bob_join_2, power), (alice_join2, power_2), ] self.persist(events) chain_map, link_map = self.fetch_chains(events) # Check that the expected links and only the expected links have been # added. self.assertEqual(len(expected_links), len(list(link_map.get_additions()))) for start, end in expected_links: start_id, start_seq = chain_map[start.event_id] end_id, end_seq = chain_map[end.event_id] self.assertIn( (start_seq, end_seq), list(link_map.get_links_between(start_id, end_id)) ) # Test that everything can reach the create event, but the create event # can't reach anything. for event in events[1:]: self.assertTrue( link_map.exists_path_from( chain_map[event.event_id], chain_map[create.event_id] ), ) self.assertFalse( link_map.exists_path_from( chain_map[create.event_id], chain_map[event.event_id], ), ) ","Test that the example in `docs/auth_chain_difference_algorithm.md` works. ",7,338,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_simple(self) -> None: event_factory = self.hs.get_event_builder_factory() bob = ""@creator:test"" alice = ""@alice:test"" room_id = ""!room:test"" # Ensure that we have a rooms entry so that we generate the chain index. self.get_success( self.store.store_room( room_id=room_id, room_creator_user_id="""", is_public=True, room_version=RoomVersions.V6, ) ) create = self.get_success( event_factory.for_room_version( RoomVersions.V6, { ""type"": EventTypes.Create, ""state_key"": """", ""sender"": bob, ""room_id"": room_id, ""content"": {""tag"": ""create""}, }, ).build(prev_event_ids=[], auth_event_ids=[]) ) bob_join = self.get_success( event_factory.for_room_version( RoomVersions.V6, { ""type"": EventTypes.Member, ""state_key"": bob, ""sender"": bob, ""room_id"": room_id, ""content"": {""tag"": ""bob_join""}, }, ).build(prev_event_ids=[], auth_event_ids=[create.event_id]) ) power = self.get_success( event_factory.for_room_version( RoomVersions.V6, { ""type"": EventTypes.PowerLevels, ""state_key"": """", ""sender"": bob, ""room_id"": room_id, ""content"": {""tag"": ""power""}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, bob_join.event_id], ) ) alice_invite = self.get_success( event_factory.for_room_version( RoomVersions.V6, { ""type"": EventTypes.Member, ""state_key"": alice, ""sender"": bob, ""room_id"": room_id, ""content"": {""tag"": ""alice_invite""}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, bob_join.event_id, power.event_id], ) ) alice_join = self.get_success( event_factory.for_room_version( RoomVersions.V6, { ""type"": EventTypes.Member, ""state_key"": alice, ""sender"": alice, ""room_id"": room_id, ""content"": {""tag"": ""alice_join""}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, alice_invite.event_id, power.event_id], ) ) power_2 = self.get_success( event_factory.for_room_version( RoomVersions.V6, { ""type"": EventTypes.PowerLevels, ""state_key"": """", ""sender"": bob, ""room_id"": room_id, ""content"": {""tag"": ""power_2""}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, bob_join.event_id, power.event_id], ) ) bob_join_2 = self.get_success( event_factory.for_room_version( RoomVersions.V6, { ""type"": EventTypes.Member, ""state_key"": bob, ""sender"": bob, ""room_id"": room_id, ""content"": {""tag"": ""bob_join_2""}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, bob_join.event_id, power.event_id], ) ) alice_join2 = self.get_success( event_factory.for_room_version( RoomVersions.V6, { ""type"": EventTypes.Member, ""state_key"": alice, ""sender"": alice, ""room_id"": room_id, ""content"": {""tag"": ""alice_join2""}, }, ).build( prev_event_ids=[], auth_event_ids=[ create.event_id, alice_join.event_id, power_2.event_id, ], ) ) events = [ create, bob_join, power, alice_invite, alice_join, bob_join_2, power_2, alice_join2, ] expected_links = [ (bob_join, create), (power, create), (power, bob_join), (alice_invite, create), (alice_invite, power), (alice_invite, bob_join), (bob_join_2, power), (alice_join2, power_2), ] self.persist(events) chain_map, link_map = self.fetch_chains(events) # Check that the expected links and only the expected links have been # added. self.assertEqual(len(expected_links), len(list(link_map.get_additions()))) for start, end in expected_links: start_id, start_seq = chain_map[start.event_id] end_id, end_seq = chain_map[end.event_id] self.assertIn( (start_seq, end_seq), list(link_map.get_links_between(start_id, end_id)) ) # Test that everything can reach the create event, but the create event # can't reach anything. for event in events[1:]: self.assertTrue( link_map.exists_path_from( chain_map[event.event_id], chain_map[create.event_id] ), ) self.assertFalse( link_map.exists_path_from( chain_map[create.event_id], chain_map[event.event_id], ), ) ``` ###Assistant : Test that the example in `docs/auth_chain_difference_algorithm.md` works. " 1213,"async def _do_retry(self, func, attempts=3) -> Any: # pylint: disable=no-self-use exception = None for attempt in range(1, attempts + 1): _LOGGER.debug(""Attempt %s of %s"", attempt, attempts) try: return await func() except Exception as exception1: # pylint: disable=broad-except _LOGGER.debug( ""Failed attempt %s of %s (%s)"", attempt, attempts, exception1 ) # Make each backoff pause a little bit longer await asyncio.sleep(0.5 * attempt) exception = exception1 continue if exception: raise exception ","Retry a function call. Withings' API occasionally and incorrectly throws errors. Retrying the call tends to work. ",17,69,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def _do_retry(self, func, attempts=3) -> Any: # pylint: disable=no-self-use exception = None for attempt in range(1, attempts + 1): _LOGGER.debug(""Attempt %s of %s"", attempt, attempts) try: return await func() except Exception as exception1: # pylint: disable=broad-except _LOGGER.debug( ""Failed attempt %s of %s (%s)"", attempt, attempts, exception1 ) # Make each backoff pause a little bit longer await asyncio.sleep(0.5 * attempt) exception = exception1 continue if exception: raise exception ``` ###Assistant : Retry a function call. Withings' API occasionally and incorrectly throws errors. Retrying the call tends to work. " 1214,"def refactor_doctest(self, block, lineno, indent, filename): try: tree = self.parse_block(block, lineno, indent) except Exception as err: if self.logger.isEnabledFor(logging.DEBUG): for line in block: self.log_debug(""Source: %s"", line.rstrip(""\n"")) self.log_error(""Can't parse docstring in %s line %s: %s: %s"", filename, lineno, err.__class__.__name__, err) return block if self.refactor_tree(tree, filename): new = str(tree).splitlines(keepends=True) # Undo the adjustment of the line numbers in wrap_toks() below. clipped, new = new[:lineno-1], new[lineno-1:] assert clipped == [""\n""] * (lineno-1), clipped if not new[-1].endswith(""\n""): new[-1] += ""\n"" block = [indent + self.PS1 + new.pop(0)] if new: block += [indent + self.PS2 + line for line in new] return block ","Refactors one doctest. A doctest is given as a block of lines, the first of which starts with "">>>"" (possibly indented), while the remaining lines start with ""..."" (identically indented). ",30,97,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def refactor_doctest(self, block, lineno, indent, filename): try: tree = self.parse_block(block, lineno, indent) except Exception as err: if self.logger.isEnabledFor(logging.DEBUG): for line in block: self.log_debug(""Source: %s"", line.rstrip(""\n"")) self.log_error(""Can't parse docstring in %s line %s: %s: %s"", filename, lineno, err.__class__.__name__, err) return block if self.refactor_tree(tree, filename): new = str(tree).splitlines(keepends=True) # Undo the adjustment of the line numbers in wrap_toks() below. clipped, new = new[:lineno-1], new[lineno-1:] assert clipped == [""\n""] * (lineno-1), clipped if not new[-1].endswith(""\n""): new[-1] += ""\n"" block = [indent + self.PS1 + new.pop(0)] if new: block += [indent + self.PS2 + line for line in new] return block ``` ###Assistant : Refactors one doctest. A doctest is given as a block of lines, the first of which starts with "">>>"" (possibly indented), while the remaining lines start with ""..."" (identically indented). " 1215,"def get_tables(self, dataset_id) -> Response: client = self.connect() result = client.list_tables(dataset_id) return result "," Get a list with all of the tabels in BigQuery ",10,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_tables(self, dataset_id) -> Response: client = self.connect() result = client.list_tables(dataset_id) return result ``` ###Assistant : Get a list with all of the tabels in BigQuery " 1216,"def mac_set_relative_dylib_deps(libname, distname): from macholib import util from macholib.MachO import MachO # Ignore bootloader; otherwise PyInstaller fails with exception like # 'ValueError: total_size > low_offset (288 > 0)' if os.path.basename(libname) in _BOOTLOADER_FNAMES: return # Determine how many directories up ('../') is the directory with shared dynamic libraries. # E.g., ./qt4_plugins/images/ -> ./../../ parent_dir = '' # Check if distname is not only base filename. if os.path.dirname(distname): parent_level = len(os.path.dirname(distname).split(os.sep)) parent_dir = parent_level * (os.pardir + os.sep) "," On Mac OS set relative paths to dynamic library dependencies of `libname`. Relative paths allow to avoid using environment variable DYLD_LIBRARY_PATH. There are known some issues with DYLD_LIBRARY_PATH. Relative paths is more flexible mechanism. Current location of dependent libraries is derived from the location of the library path (paths start with '@loader_path'). 'distname' path of the library relative to dist directory of frozen executable. We need this to determine the level of directory level for @loader_path of binaries not found in dist directory. For example, Qt5 plugins are not in the same directory as Qt*.dylib files. Without using '@loader_path/../..' for Qt plugins, Mac OS would not be able to resolve shared library dependencies, and Qt plugins will not be loaded. ",120,76,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mac_set_relative_dylib_deps(libname, distname): from macholib import util from macholib.MachO import MachO # Ignore bootloader; otherwise PyInstaller fails with exception like # 'ValueError: total_size > low_offset (288 > 0)' if os.path.basename(libname) in _BOOTLOADER_FNAMES: return # Determine how many directories up ('../') is the directory with shared dynamic libraries. # E.g., ./qt4_plugins/images/ -> ./../../ parent_dir = '' # Check if distname is not only base filename. if os.path.dirname(distname): parent_level = len(os.path.dirname(distname).split(os.sep)) parent_dir = parent_level * (os.pardir + os.sep) ``` ###Assistant : On Mac OS set relative paths to dynamic library dependencies of `libname`. Relative paths allow to avoid using environment variable DYLD_LIBRARY_PATH. There are known some issues with DYLD_LIBRARY_PATH. Relative paths is more flexible mechanism. Current location of dependent libraries is derived from the location of the library path (paths start with '@loader_path'). 'distname' path of the library relative to dist directory of frozen executable. We need this to determine the level of directory level for @loader_path of binaries not found in dist directory. For example, Qt5 plugins are not in the same directory as Qt*.dylib files. Without using '@loader_path/../..' for Qt plugins, Mac OS would not be able to resolve shared library dependencies, and Qt plugins will not be loaded. " 1217,"def get_v2_optimizer(name, **kwargs): try: return _V2_OPTIMIZER_MAP[name](**kwargs) except KeyError: raise ValueError( ""Could not find requested v2 optimizer: {}\nValid choices: {}"".format( name, list(_V2_OPTIMIZER_MAP.keys()) ) ) ","Get the v2 optimizer requested. This is only necessary until v2 are the default, as we are testing in Eager, and Eager + v1 optimizers fail tests. When we are in v2, the strings alone should be sufficient, and this mapping can theoretically be removed. Args: name: string name of Keras v2 optimizer. **kwargs: any kwargs to pass to the optimizer constructor. Returns: Initialized Keras v2 optimizer. Raises: ValueError: if an unknown name was passed. ",75,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_v2_optimizer(name, **kwargs): try: return _V2_OPTIMIZER_MAP[name](**kwargs) except KeyError: raise ValueError( ""Could not find requested v2 optimizer: {}\nValid choices: {}"".format( name, list(_V2_OPTIMIZER_MAP.keys()) ) ) ``` ###Assistant : Get the v2 optimizer requested. This is only necessary until v2 are the default, as we are testing in Eager, and Eager + v1 optimizers fail tests. When we are in v2, the strings alone should be sufficient, and this mapping can theoretically be removed. Args: name: string name of Keras v2 optimizer. **kwargs: any kwargs to pass to the optimizer constructor. Returns: Initialized Keras v2 optimizer. Raises: ValueError: if an unknown name was passed. " 1218,"def _prefix_from_ip_int(cls, ip_int): trailing_zeroes = _count_righthand_zero_bits(ip_int, cls._max_prefixlen) prefixlen = cls._max_prefixlen - trailing_zeroes leading_ones = ip_int >> trailing_zeroes all_ones = (1 << prefixlen) - 1 if leading_ones != all_ones: byteslen = cls._max_prefixlen // 8 details = ip_int.to_bytes(byteslen, 'big') msg = 'Netmask pattern %r mixes zeroes & ones' raise ValueError(msg % details) return prefixlen ","Return prefix length from the bitwise netmask. Args: ip_int: An integer, the netmask in expanded bitwise format Returns: An integer, the prefix length. Raises: ValueError: If the input intermingles zeroes & ones ",32,52,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _prefix_from_ip_int(cls, ip_int): trailing_zeroes = _count_righthand_zero_bits(ip_int, cls._max_prefixlen) prefixlen = cls._max_prefixlen - trailing_zeroes leading_ones = ip_int >> trailing_zeroes all_ones = (1 << prefixlen) - 1 if leading_ones != all_ones: byteslen = cls._max_prefixlen // 8 details = ip_int.to_bytes(byteslen, 'big') msg = 'Netmask pattern %r mixes zeroes & ones' raise ValueError(msg % details) return prefixlen ``` ###Assistant : Return prefix length from the bitwise netmask. Args: ip_int: An integer, the netmask in expanded bitwise format Returns: An integer, the prefix length. Raises: ValueError: If the input intermingles zeroes & ones " 1219,"def batch_has_learnable_example(self, examples): for eg in examples: for ent in eg.predicted.ents: candidates = list(self.get_candidates(self.kb, ent)) if candidates: return True return False ","Check if a batch contains a learnable example. If one isn't present, then the update step needs to be skipped. ",20,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def batch_has_learnable_example(self, examples): for eg in examples: for ent in eg.predicted.ents: candidates = list(self.get_candidates(self.kb, ent)) if candidates: return True return False ``` ###Assistant : Check if a batch contains a learnable example. If one isn't present, then the update step needs to be skipped. " 1220,"def get_delayed_update_fields(self): self.extra_update_fields['emitted_events'] = self.event_ct if 'got an unexpected keyword argument' in self.extra_update_fields.get('result_traceback', ''): self.delay_update(result_traceback=ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE) return self.extra_update_fields ",Return finalized dict of all fields that should be saved along with the job status change,16,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_delayed_update_fields(self): self.extra_update_fields['emitted_events'] = self.event_ct if 'got an unexpected keyword argument' in self.extra_update_fields.get('result_traceback', ''): self.delay_update(result_traceback=ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE) return self.extra_update_fields ``` ###Assistant : Return finalized dict of all fields that should be saved along with the job status change " 1221,"def get_checks_result(warning_id=None): checks_result = checks.run_checks() if warning_id: return [ warning for warning in checks_result if warning.id == warning_id] return checks_result ",Run Django checks on any with the 'search' tag used when registering the check,14,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_checks_result(warning_id=None): checks_result = checks.run_checks() if warning_id: return [ warning for warning in checks_result if warning.id == warning_id] return checks_result ``` ###Assistant : Run Django checks on any with the 'search' tag used when registering the check " 1222,"def use_bottleneck_cb(key) -> None: from pandas.core import nanops nanops.set_use_bottleneck(cf.get_option(key)) use_numexpr_doc = "," : bool Use the numexpr library to accelerate computation if it is installed, the default is True Valid values: False,True ",20,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def use_bottleneck_cb(key) -> None: from pandas.core import nanops nanops.set_use_bottleneck(cf.get_option(key)) use_numexpr_doc = ``` ###Assistant : : bool Use the numexpr library to accelerate computation if it is installed, the default is True Valid values: False,True " 1223,"def test_repeated_column_labels(self, datapath): # GH 13923, 25772 msg = with pytest.raises(ValueError, match=msg): read_stata( datapath(""io"", ""data"", ""stata"", ""stata15.dta""), convert_categoricals=True, ) "," Value labels for column ethnicsn are not unique. These cannot be converted to pandas categoricals. Either read the file with `convert_categoricals` set to False or use the low level interface in `StataReader` to separately read the values and the value_labels. The repeated labels are:\n-+\nwolof ",44,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_repeated_column_labels(self, datapath): # GH 13923, 25772 msg = with pytest.raises(ValueError, match=msg): read_stata( datapath(""io"", ""data"", ""stata"", ""stata15.dta""), convert_categoricals=True, ) ``` ###Assistant : Value labels for column ethnicsn are not unique. These cannot be converted to pandas categoricals. Either read the file with `convert_categoricals` set to False or use the low level interface in `StataReader` to separately read the values and the value_labels. The repeated labels are:\n-+\nwolof " 1224,"def string_width_in_pixels(cls, font, string): # if no windows have been created (there is no hidden master root to rely on) then temporarily make a window so the measurement can happen if Window.NumOpenWindows == 0: root = tk.Tk() else: root = None size = 0 try: size = tkinter.font.Font(font=font).measure(string) # string's width except Exception as e: _error_popup_with_traceback('Exception retrieving string width in pixels', e) if root is not None: root.destroy() return size "," Get the with of the supplied string in pixels for the font being passed in. If an error occurs, 0 will be returned :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike, to be measured :type font: (str or (str, int[, str]) or None) :param string: the string to measure :type string: str :return: Width in pixels of string :rtype: (int) ",76,70,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def string_width_in_pixels(cls, font, string): # if no windows have been created (there is no hidden master root to rely on) then temporarily make a window so the measurement can happen if Window.NumOpenWindows == 0: root = tk.Tk() else: root = None size = 0 try: size = tkinter.font.Font(font=font).measure(string) # string's width except Exception as e: _error_popup_with_traceback('Exception retrieving string width in pixels', e) if root is not None: root.destroy() return size ``` ###Assistant : Get the with of the supplied string in pixels for the font being passed in. If an error occurs, 0 will be returned :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike, to be measured :type font: (str or (str, int[, str]) or None) :param string: the string to measure :type string: str :return: Width in pixels of string :rtype: (int) " 1225,"def model_call_inputs(model, keep_original_batch_size=False): input_specs = model.save_spec(dynamic_batch=not keep_original_batch_size) if input_specs is None: return None, None input_specs = _enforce_names_consistency(input_specs) return input_specs ","Inspect model to get its input signature. The model's input signature is a list with a single (possibly-nested) object. This is due to the Keras-enforced restriction that tensor inputs must be passed in as the first argument. For example, a model with input {'feature1': , 'feature2': } will have input signature: [{'feature1': TensorSpec, 'feature2': TensorSpec}] Args: model: Keras Model object. keep_original_batch_size: A boolean indicating whether we want to keep using the original batch size or set it to None. Default is `False`, which means that the batch dim of the returned input signature will always be set to `None`. Returns: A tuple containing `(args, kwargs)` TensorSpecs of the model call function inputs. `kwargs` does not contain the `training` argument. ",119,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def model_call_inputs(model, keep_original_batch_size=False): input_specs = model.save_spec(dynamic_batch=not keep_original_batch_size) if input_specs is None: return None, None input_specs = _enforce_names_consistency(input_specs) return input_specs ``` ###Assistant : Inspect model to get its input signature. The model's input signature is a list with a single (possibly-nested) object. This is due to the Keras-enforced restriction that tensor inputs must be passed in as the first argument. For example, a model with input {'feature1': , 'feature2': } will have input signature: [{'feature1': TensorSpec, 'feature2': TensorSpec}] Args: model: Keras Model object. keep_original_batch_size: A boolean indicating whether we want to keep using the original batch size or set it to None. Default is `False`, which means that the batch dim of the returned input signature will always be set to `None`. Returns: A tuple containing `(args, kwargs)` TensorSpecs of the model call function inputs. `kwargs` does not contain the `training` argument. " 1226,"def _getSubDirectoryFolders(self, module, sub_dirs): module_dir = module.getCompileTimeDirectory() file_list = [] data_dirs = [os.path.join(module_dir, subdir) for subdir in sub_dirs] # Gather the full file list, probably makes no sense to include bytecode files file_list = sum( ( getFileList( data_dir, ignore_dirs=(""__pycache__"",), ignore_suffixes=("".pyc"",) ) for data_dir in data_dirs ), [], ) if not file_list: msg = ""No files or folders found for '%s' in subfolder(s) %r (%r)."" % ( module.getFullName(), sub_dirs, data_dirs, ) self.warning(msg) is_package = ( module.isCompiledPythonPackage() or module.isUncompiledPythonPackage() ) # We need to preserve the package target path in the dist folder. if is_package: package_part = module.getFullName().asPath() else: package = module.getFullName().getPackageName() if package is None: package_part = """" else: package_part = package.asPath() item_set = OrderedSet() for f in file_list: target = os.path.join(package_part, os.path.relpath(f, module_dir)) dir_name = os.path.dirname(target) item_set.add(dir_name) return self.makeIncludedEmptyDirectories( source_path=module_dir, dest_paths=item_set, reason=""Subdirectories of module %s"" % module.getFullName(), tags=""config"", ) ","Get dirnames in given subdirs of the module. Notes: All dirnames in folders below one of the sub_dirs are recursively retrieved and returned shortened to begin with the string of subdir. Args: module: module object sub_dirs: sub folder name(s) - tuple Returns: makeIncludedEmptyDirectories of found dirnames. ",46,139,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _getSubDirectoryFolders(self, module, sub_dirs): module_dir = module.getCompileTimeDirectory() file_list = [] data_dirs = [os.path.join(module_dir, subdir) for subdir in sub_dirs] # Gather the full file list, probably makes no sense to include bytecode files file_list = sum( ( getFileList( data_dir, ignore_dirs=(""__pycache__"",), ignore_suffixes=("".pyc"",) ) for data_dir in data_dirs ), [], ) if not file_list: msg = ""No files or folders found for '%s' in subfolder(s) %r (%r)."" % ( module.getFullName(), sub_dirs, data_dirs, ) self.warning(msg) is_package = ( module.isCompiledPythonPackage() or module.isUncompiledPythonPackage() ) # We need to preserve the package target path in the dist folder. if is_package: package_part = module.getFullName().asPath() else: package = module.getFullName().getPackageName() if package is None: package_part = """" else: package_part = package.asPath() item_set = OrderedSet() for f in file_list: target = os.path.join(package_part, os.path.relpath(f, module_dir)) dir_name = os.path.dirname(target) item_set.add(dir_name) return self.makeIncludedEmptyDirectories( source_path=module_dir, dest_paths=item_set, reason=""Subdirectories of module %s"" % module.getFullName(), tags=""config"", ) ``` ###Assistant : Get dirnames in given subdirs of the module. Notes: All dirnames in folders below one of the sub_dirs are recursively retrieved and returned shortened to begin with the string of subdir. Args: module: module object sub_dirs: sub folder name(s) - tuple Returns: makeIncludedEmptyDirectories of found dirnames. " 1227,"def test_orderby_percentile_with_many_fields_one_entity_no_data(self): for metric in [ TransactionMRI.MEASUREMENTS_FCP.value, ""transaction"", ]: perf_indexer_record(self.organization.id, metric) response = self.get_success_response( self.organization.slug, field=[ f""p50({TransactionMetricKey.MEASUREMENTS_LCP.value})"", f""p50({TransactionMetricKey.MEASUREMENTS_FCP.value})"", ], statsPeriod=""1h"", interval=""1h"", groupBy=[""project_id"", ""transaction""], orderBy=f""p50({TransactionMetricKey.MEASUREMENTS_LCP.value})"", useCase=""performance"", ) groups = response.data[""groups""] assert len(groups) == 0 "," Test that ensures that when metrics data is available then an empty response is returned gracefully ",16,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_orderby_percentile_with_many_fields_one_entity_no_data(self): for metric in [ TransactionMRI.MEASUREMENTS_FCP.value, ""transaction"", ]: perf_indexer_record(self.organization.id, metric) response = self.get_success_response( self.organization.slug, field=[ f""p50({TransactionMetricKey.MEASUREMENTS_LCP.value})"", f""p50({TransactionMetricKey.MEASUREMENTS_FCP.value})"", ], statsPeriod=""1h"", interval=""1h"", groupBy=[""project_id"", ""transaction""], orderBy=f""p50({TransactionMetricKey.MEASUREMENTS_LCP.value})"", useCase=""performance"", ) groups = response.data[""groups""] assert len(groups) == 0 ``` ###Assistant : Test that ensures that when metrics data is available then an empty response is returned gracefully " 1228,"def _add_save_button(self) -> None: logger.debug(""Adding save button"") button = tk.Button(self, text=""Save"", cursor=""hand2"", command=lambda: self.save_var.set(True)) button.pack(side=tk.LEFT) logger.debug(""Added save burron: '%s'"", button) ", Add a save button for saving out original preview ,9,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _add_save_button(self) -> None: logger.debug(""Adding save button"") button = tk.Button(self, text=""Save"", cursor=""hand2"", command=lambda: self.save_var.set(True)) button.pack(side=tk.LEFT) logger.debug(""Added save burron: '%s'"", button) ``` ###Assistant : Add a save button for saving out original preview " 1229,"def temporary_environ_defaults(**kwargs): old_env = os.environ.copy() try: for var in kwargs: # TODO: Consider warning on conflicts os.environ.setdefault(var, str(kwargs[var])) yield {var: os.environ[var] for var in kwargs} finally: for var in kwargs: if old_env.get(var): os.environ[var] = old_env[var] else: os.environ.pop(var, None) "," Temporarily override default values in os.environ. Yields a dictionary of the key/value pairs matching the provided keys. ",17,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def temporary_environ_defaults(**kwargs): old_env = os.environ.copy() try: for var in kwargs: # TODO: Consider warning on conflicts os.environ.setdefault(var, str(kwargs[var])) yield {var: os.environ[var] for var in kwargs} finally: for var in kwargs: if old_env.get(var): os.environ[var] = old_env[var] else: os.environ.pop(var, None) ``` ###Assistant : Temporarily override default values in os.environ. Yields a dictionary of the key/value pairs matching the provided keys. " 1230,"def test_same_entity_multiple_metric_ids_missing_data(self, mocked_derived_metrics): mocked_derived_metrics.return_value = MOCKED_DERIVED_METRICS_2 _indexer_record(self.organization.id, ""metric_foo_doe"") self.store_session( self.build_session( project_id=self.project.id, started=(time.time() // 60) * 60, status=""ok"", release=""foobar@2.0"", errors=2, ) ) response = self.get_response( self.organization.slug, ""derived_metric.multiple_metrics"", ) assert response.status_code == 404 assert response.json()[""detail""] == ( ""Not all the requested metrics or the constituent metrics in "" ""['derived_metric.multiple_metrics'] have data in the dataset"" ) "," Test when not requested metrics have data in the dataset ",10,53,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_same_entity_multiple_metric_ids_missing_data(self, mocked_derived_metrics): mocked_derived_metrics.return_value = MOCKED_DERIVED_METRICS_2 _indexer_record(self.organization.id, ""metric_foo_doe"") self.store_session( self.build_session( project_id=self.project.id, started=(time.time() // 60) * 60, status=""ok"", release=""foobar@2.0"", errors=2, ) ) response = self.get_response( self.organization.slug, ""derived_metric.multiple_metrics"", ) assert response.status_code == 404 assert response.json()[""detail""] == ( ""Not all the requested metrics or the constituent metrics in "" ""['derived_metric.multiple_metrics'] have data in the dataset"" ) ``` ###Assistant : Test when not requested metrics have data in the dataset " 1231,"def _align_matrices(x, y): x_matrix = _to_matrix(x) y_matrix = _to_matrix(y) x_shape = x_matrix.shape y_shape = y_matrix.shape if y_shape[1] != x_shape[1]: # dimensions do not match. raise ValueError( ""The outermost dimensions of the input tensors should match. "" f""Received y = {y_shape[1]} vs x = {x_shape[1]}."" ) x_tile = tf.tile(tf.expand_dims(x_matrix, 1), [1, y_shape[0], 1]) y_tile = tf.tile(tf.expand_dims(y_matrix, 0), [x_shape[0], 1, 1]) return x_tile, y_tile ",Aligns x and y tensors to allow computations over pairs of their rows.,13,62,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _align_matrices(x, y): x_matrix = _to_matrix(x) y_matrix = _to_matrix(y) x_shape = x_matrix.shape y_shape = y_matrix.shape if y_shape[1] != x_shape[1]: # dimensions do not match. raise ValueError( ""The outermost dimensions of the input tensors should match. "" f""Received y = {y_shape[1]} vs x = {x_shape[1]}."" ) x_tile = tf.tile(tf.expand_dims(x_matrix, 1), [1, y_shape[0], 1]) y_tile = tf.tile(tf.expand_dims(y_matrix, 0), [x_shape[0], 1, 1]) return x_tile, y_tile ``` ###Assistant : Aligns x and y tensors to allow computations over pairs of their rows. " 1232,"def test_prune_gap_if_dummy_local(self): body = self.helper.send(self.room_id, body=""Test"", tok=self.token) body = self.helper.send_event( self.room_id, type=EventTypes.Dummy, content={}, tok=self.token ) local_message_event_id = body[""event_id""] self.assert_extremities([local_message_event_id]) # Advance the clock for many days to make the old extremity ""old"". We # also set the depth to ""lots"". self.reactor.advance(7 * 24 * 60 * 60) # Fudge a second event which points to an event we don't have. This is a # state event so that the state changes (otherwise we won't prune the # extremity as they'll have the same state group). remote_event_2 = event_from_pdu_json( { ""type"": EventTypes.Member, ""state_key"": ""@user:other2"", ""content"": {""membership"": Membership.JOIN}, ""room_id"": self.room_id, ""sender"": ""@user:other2"", ""depth"": 10000, ""prev_events"": [""$some_unknown_message""], ""auth_events"": [], ""origin_server_ts"": self.clock.time_msec(), }, RoomVersions.V6, ) state_before_gap = self.get_success( self.state.get_current_state_ids(self.room_id) ) self.persist_event(remote_event_2, state=state_before_gap) # Check the new extremity is just the new remote event. self.assert_extremities([remote_event_2.event_id, local_message_event_id]) ","Test that we don't drop extremities after a gap when the previous extremity is a local dummy event and points to local events. ",23,131,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_prune_gap_if_dummy_local(self): body = self.helper.send(self.room_id, body=""Test"", tok=self.token) body = self.helper.send_event( self.room_id, type=EventTypes.Dummy, content={}, tok=self.token ) local_message_event_id = body[""event_id""] self.assert_extremities([local_message_event_id]) # Advance the clock for many days to make the old extremity ""old"". We # also set the depth to ""lots"". self.reactor.advance(7 * 24 * 60 * 60) # Fudge a second event which points to an event we don't have. This is a # state event so that the state changes (otherwise we won't prune the # extremity as they'll have the same state group). remote_event_2 = event_from_pdu_json( { ""type"": EventTypes.Member, ""state_key"": ""@user:other2"", ""content"": {""membership"": Membership.JOIN}, ""room_id"": self.room_id, ""sender"": ""@user:other2"", ""depth"": 10000, ""prev_events"": [""$some_unknown_message""], ""auth_events"": [], ""origin_server_ts"": self.clock.time_msec(), }, RoomVersions.V6, ) state_before_gap = self.get_success( self.state.get_current_state_ids(self.room_id) ) self.persist_event(remote_event_2, state=state_before_gap) # Check the new extremity is just the new remote event. self.assert_extremities([remote_event_2.event_id, local_message_event_id]) ``` ###Assistant : Test that we don't drop extremities after a gap when the previous extremity is a local dummy event and points to local events. " 1233,"def _reset_layer_losses(parent_layer): losses_dict = {} for layer in utils.list_all_layers_and_sublayers(parent_layer): losses_dict[layer] = { 'losses': layer._losses[:], 'eager_losses': layer._eager_losses[:] } with utils.no_automatic_dependency_tracking_scope(layer): layer._losses = [] layer._eager_losses = [] return losses_dict ","Resets losses of layer and its sublayers, and returns original losses.",11,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _reset_layer_losses(parent_layer): losses_dict = {} for layer in utils.list_all_layers_and_sublayers(parent_layer): losses_dict[layer] = { 'losses': layer._losses[:], 'eager_losses': layer._eager_losses[:] } with utils.no_automatic_dependency_tracking_scope(layer): layer._losses = [] layer._eager_losses = [] return losses_dict ``` ###Assistant : Resets losses of layer and its sublayers, and returns original losses. " 1234,"def test_result_list_editable_html(self): new_parent = Parent.objects.create(name=""parent"") new_child = Child.objects.create(name=""name"", parent=new_parent) request = self.factory.get(""/child/"") request.user = self.superuser m = ChildAdmin(Child, custom_site) # Test with list_editable fields m.list_display = [""id"", ""name"", ""parent""] m.list_display_links = [""id""] m.list_editable = [""name""] cl = m.get_changelist_instance(request) FormSet = m.get_changelist_formset(request) cl.formset = FormSet(queryset=cl.result_list) template = Template( ""{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}"" ) context = Context({""cl"": cl, ""opts"": Child._meta}) table_output = template.render(context) # make sure that hidden fields are in the correct place hiddenfields_div = ( '
    ' '' ""
    "" ) % new_child.id self.assertInHTML( hiddenfields_div, table_output, msg_prefix=""Failed to find hidden fields"" ) # make sure that list editable fields are rendered in divs correctly editable_name_field = ( '' ) self.assertInHTML( '%s' % editable_name_field, table_output, msg_prefix='Failed to find ""name"" list_editable field', ) "," Regression tests for #11791: Inclusion tag result_list generates a table and this checks that the items are nested within the table element tags. Also a regression test for #13599, verifies that hidden fields when list_editable is enabled are rendered in a div outside the table. ",45,139,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_result_list_editable_html(self): new_parent = Parent.objects.create(name=""parent"") new_child = Child.objects.create(name=""name"", parent=new_parent) request = self.factory.get(""/child/"") request.user = self.superuser m = ChildAdmin(Child, custom_site) # Test with list_editable fields m.list_display = [""id"", ""name"", ""parent""] m.list_display_links = [""id""] m.list_editable = [""name""] cl = m.get_changelist_instance(request) FormSet = m.get_changelist_formset(request) cl.formset = FormSet(queryset=cl.result_list) template = Template( ""{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}"" ) context = Context({""cl"": cl, ""opts"": Child._meta}) table_output = template.render(context) # make sure that hidden fields are in the correct place hiddenfields_div = ( '
    ' '' ""
    "" ) % new_child.id self.assertInHTML( hiddenfields_div, table_output, msg_prefix=""Failed to find hidden fields"" ) # make sure that list editable fields are rendered in divs correctly editable_name_field = ( '' ) self.assertInHTML( '%s' % editable_name_field, table_output, msg_prefix='Failed to find ""name"" list_editable field', ) ``` ###Assistant : Regression tests for #11791: Inclusion tag result_list generates a table and this checks that the items are nested within the table element tags. Also a regression test for #13599, verifies that hidden fields when list_editable is enabled are rendered in a div outside the table. " 1235,"def test_title_column(self): root_page = Page.objects.filter(depth=2).first() blog = Site.objects.create( hostname=""blog.example.com"", site_name=""My blog"", root_page=root_page ) gallery = Site.objects.create( hostname=""gallery.example.com"", site_name=""My gallery"", root_page=root_page ) data = [blog, gallery] table = Table( [ TitleColumn( ""hostname"", url_name=""wagtailsites:edit"", link_classname=""choose-site"", link_attrs={""data-chooser"": ""yes""}, ), Column(""site_name"", label=""Site name""), ], data, ) html = self.render_component(table) self.assertHTMLEqual( html, % (blog.pk, gallery.pk), ) ","
    HostnameSite name
    My blog
    My gallery
    ",37,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_title_column(self): root_page = Page.objects.filter(depth=2).first() blog = Site.objects.create( hostname=""blog.example.com"", site_name=""My blog"", root_page=root_page ) gallery = Site.objects.create( hostname=""gallery.example.com"", site_name=""My gallery"", root_page=root_page ) data = [blog, gallery] table = Table( [ TitleColumn( ""hostname"", url_name=""wagtailsites:edit"", link_classname=""choose-site"", link_attrs={""data-chooser"": ""yes""}, ), Column(""site_name"", label=""Site name""), ], data, ) html = self.render_component(table) self.assertHTMLEqual( html, % (blog.pk, gallery.pk), ) ``` ###Assistant :
    HostnameSite name
    My blog
    My gallery
    " 1236,"def _clean_text(self, text): output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append("" "") else: output.append(char) return """".join(output) ",Performs invalid character removal and whitespace cleanup on text.,9,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _clean_text(self, text): output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append("" "") else: output.append(char) return """".join(output) ``` ###Assistant : Performs invalid character removal and whitespace cleanup on text. " 1237,"def call_candle(self, other_args): if self.symbol: parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog=""candle"", description=, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: plot_chart( symbol=self.symbol, currency=self.current_currency, prices_df=self.current_df, ) ","Process candle commandDisplay chart for loaded coin. You can specify currency vs which you want to show chart and also number of days to get data for.",27,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call_candle(self, other_args): if self.symbol: parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog=""candle"", description=, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: plot_chart( symbol=self.symbol, currency=self.current_currency, prices_df=self.current_df, ) ``` ###Assistant : Process candle commandDisplay chart for loaded coin. You can specify currency vs which you want to show chart and also number of days to get data for. " 1238,"async def test_load_values_when_added_to_hass(hass): config = { ""binary_sensor"": { ""name"": ""Test_Binary"", ""platform"": ""bayesian"", ""unique_id"": ""3b4c9563-5e84-4167-8fe7-8f507e796d72"", ""device_class"": ""connectivity"", ""observations"": [ { ""platform"": ""state"", ""entity_id"": ""sensor.test_monitored"", ""to_state"": ""off"", ""prob_given_true"": 0.8, ""prob_given_false"": 0.4, } ], ""prior"": 0.2, ""probability_threshold"": 0.32, } } hass.states.async_set(""sensor.test_monitored"", ""off"") await hass.async_block_till_done() assert await async_setup_component(hass, ""binary_sensor"", config) await hass.async_block_till_done() entity_registry = async_get_entities(hass) assert ( entity_registry.entities[""binary_sensor.test_binary""].unique_id == ""bayesian-3b4c9563-5e84-4167-8fe7-8f507e796d72"" ) state = hass.states.get(""binary_sensor.test_binary"") assert state.attributes.get(""device_class"") == ""connectivity"" assert state.attributes.get(""observations"")[0][""prob_given_true""] == 0.8 assert state.attributes.get(""observations"")[0][""prob_given_false""] == 0.4 ",Test that sensor initializes with observations of relevant entities.,9,72,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_load_values_when_added_to_hass(hass): config = { ""binary_sensor"": { ""name"": ""Test_Binary"", ""platform"": ""bayesian"", ""unique_id"": ""3b4c9563-5e84-4167-8fe7-8f507e796d72"", ""device_class"": ""connectivity"", ""observations"": [ { ""platform"": ""state"", ""entity_id"": ""sensor.test_monitored"", ""to_state"": ""off"", ""prob_given_true"": 0.8, ""prob_given_false"": 0.4, } ], ""prior"": 0.2, ""probability_threshold"": 0.32, } } hass.states.async_set(""sensor.test_monitored"", ""off"") await hass.async_block_till_done() assert await async_setup_component(hass, ""binary_sensor"", config) await hass.async_block_till_done() entity_registry = async_get_entities(hass) assert ( entity_registry.entities[""binary_sensor.test_binary""].unique_id == ""bayesian-3b4c9563-5e84-4167-8fe7-8f507e796d72"" ) state = hass.states.get(""binary_sensor.test_binary"") assert state.attributes.get(""device_class"") == ""connectivity"" assert state.attributes.get(""observations"")[0][""prob_given_true""] == 0.8 assert state.attributes.get(""observations"")[0][""prob_given_false""] == 0.4 ``` ###Assistant : Test that sensor initializes with observations of relevant entities. " 1239,"def test_copy_with_target_credential(self): expression = ""col1, col2"" op = DatabricksCopyIntoOperator( file_location=COPY_FILE_LOCATION, file_format='CSV', table_name='test', task_id=TASK_ID, expression_list=expression, storage_credential='abc', credential={'AZURE_SAS_TOKEN': 'abc'}, ) assert ( op._create_sql_query() == f.strip() ) ","COPY INTO test WITH (CREDENTIAL abc) FROM (SELECT {expression} FROM '{COPY_FILE_LOCATION}' WITH (CREDENTIAL (AZURE_SAS_TOKEN = 'abc') )) FILEFORMAT = CSV ",20,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_copy_with_target_credential(self): expression = ""col1, col2"" op = DatabricksCopyIntoOperator( file_location=COPY_FILE_LOCATION, file_format='CSV', table_name='test', task_id=TASK_ID, expression_list=expression, storage_credential='abc', credential={'AZURE_SAS_TOKEN': 'abc'}, ) assert ( op._create_sql_query() == f.strip() ) ``` ###Assistant : COPY INTO test WITH (CREDENTIAL abc) FROM (SELECT {expression} FROM '{COPY_FILE_LOCATION}' WITH (CREDENTIAL (AZURE_SAS_TOKEN = 'abc') )) FILEFORMAT = CSV " 1240,"def show_actual_vendor_versions(vendor_txt_versions): # type: (Dict[str, str]) -> None for module_name, expected_version in vendor_txt_versions.items(): extra_message = '' actual_version = get_vendor_version_from_module(module_name) if not actual_version: extra_message = ' (Unable to locate actual module version, using'\ ' vendor.txt specified version)' actual_version = expected_version elif parse_version(actual_version) != parse_version(expected_version): extra_message = ' (CONFLICT: vendor.txt suggests version should'\ ' be {})'.format(expected_version) logger.info('%s==%s%s', module_name, actual_version, extra_message) ","Log the actual version and print extra info if there is a conflict or if the actual version could not be imported. ",22,58,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def show_actual_vendor_versions(vendor_txt_versions): # type: (Dict[str, str]) -> None for module_name, expected_version in vendor_txt_versions.items(): extra_message = '' actual_version = get_vendor_version_from_module(module_name) if not actual_version: extra_message = ' (Unable to locate actual module version, using'\ ' vendor.txt specified version)' actual_version = expected_version elif parse_version(actual_version) != parse_version(expected_version): extra_message = ' (CONFLICT: vendor.txt suggests version should'\ ' be {})'.format(expected_version) logger.info('%s==%s%s', module_name, actual_version, extra_message) ``` ###Assistant : Log the actual version and print extra info if there is a conflict or if the actual version could not be imported. " 1241,"def escape_rfc3986(s): if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') # ensure unicode: after quoting, it can always be converted return compat_str(compat_urllib_parse.quote(s, b""%/;:@&=+$,!~*'()?#[]"")) ",Escape non-ASCII characters as suggested by RFC 3986,8,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def escape_rfc3986(s): if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') # ensure unicode: after quoting, it can always be converted return compat_str(compat_urllib_parse.quote(s, b""%/;:@&=+$,!~*'()?#[]"")) ``` ###Assistant : Escape non-ASCII characters as suggested by RFC 3986 " 1242,"def call_social(self, other_args): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog=""social"", description=, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_social( self.coin_map_df[""CoinGecko""], export=ns_parser.export ) ","Process social commandShows social media corresponding to loaded coin. You can find there name of telegram channel, urls to twitter, reddit, bitcointalk, facebook and discord.",25,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call_social(self, other_args): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog=""social"", description=, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_social( self.coin_map_df[""CoinGecko""], export=ns_parser.export ) ``` ###Assistant : Process social commandShows social media corresponding to loaded coin. You can find there name of telegram channel, urls to twitter, reddit, bitcointalk, facebook and discord. " 1243,"def test_ignores_different_group(self): url = reverse( ""sentry-api-0-project-event-details"", kwargs={ ""event_id"": self.next_transaction_event.event_id, ""project_slug"": self.next_transaction_event.project.slug, ""organization_slug"": self.next_transaction_event.project.organization.slug, }, ) with self.feature(""organizations:performance-issues""): response = self.client.get(url, format=""json"", data={""group_id"": self.group.id}) assert response.status_code == 200, response.content assert response.data[""id""] == str(self.next_transaction_event.event_id) assert response.data[""nextEventID""] is None ",Test that a different group's events aren't attributed to the one that was passed,14,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_ignores_different_group(self): url = reverse( ""sentry-api-0-project-event-details"", kwargs={ ""event_id"": self.next_transaction_event.event_id, ""project_slug"": self.next_transaction_event.project.slug, ""organization_slug"": self.next_transaction_event.project.organization.slug, }, ) with self.feature(""organizations:performance-issues""): response = self.client.get(url, format=""json"", data={""group_id"": self.group.id}) assert response.status_code == 200, response.content assert response.data[""id""] == str(self.next_transaction_event.event_id) assert response.data[""nextEventID""] is None ``` ###Assistant : Test that a different group's events aren't attributed to the one that was passed " 1244,"def _dedupe_indices(new, exclude, index_structure): inds_self = set(exclude) dums_new = set(get_dummy_indices(new)) conflicts = dums_new.intersection(inds_self) if len(conflicts) == 0: return None inds_self.update(dums_new) self_args_free = [(i, None) for i in inds_self] gen = index_structure._get_generator_for_dummy_indices(self_args_free) repl = {} for d in conflicts: if -d in repl.keys(): continue newname = gen(d.tensor_index_type) new_d = d.func(newname, *d.args[1:]) repl[d] = new_d repl[-d] = -new_d if len(repl) == 0: return None new_renamed = new._replace_indices(repl) return new_renamed "," exclude: set new: TensExpr index_structure: _IndexStructure (required to generate new dummy indices) If ``new`` has any dummy indices that are in ``exclude``, return a version of new with those indices replaced. If no replacements are needed, return None ``self_args_free`` is to be passed to ``_IndexStructure._get_generator_for_dummy_indices()``. Since the latter does not use the index position for anything, we just set it as ``None`` here. ",63,67,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _dedupe_indices(new, exclude, index_structure): inds_self = set(exclude) dums_new = set(get_dummy_indices(new)) conflicts = dums_new.intersection(inds_self) if len(conflicts) == 0: return None inds_self.update(dums_new) self_args_free = [(i, None) for i in inds_self] gen = index_structure._get_generator_for_dummy_indices(self_args_free) repl = {} for d in conflicts: if -d in repl.keys(): continue newname = gen(d.tensor_index_type) new_d = d.func(newname, *d.args[1:]) repl[d] = new_d repl[-d] = -new_d if len(repl) == 0: return None new_renamed = new._replace_indices(repl) return new_renamed ``` ###Assistant : exclude: set new: TensExpr index_structure: _IndexStructure (required to generate new dummy indices) If ``new`` has any dummy indices that are in ``exclude``, return a version of new with those indices replaced. If no replacements are needed, return None ``self_args_free`` is to be passed to ``_IndexStructure._get_generator_for_dummy_indices()``. Since the latter does not use the index position for anything, we just set it as ``None`` here. " 1245,"def get_window_extent(self, renderer=None): # make sure the location is updated so that transforms etc are correct: self._adjust_location() bb = super().get_window_extent(renderer=renderer) if self.axis is None or not self.axis.get_visible(): return bb bboxes = [bb] drawn_ticks = self.axis._update_ticks() major_tick = next(iter({*drawn_ticks} & {*self.axis.majorTicks}), None) minor_tick = next(iter({*drawn_ticks} & {*self.axis.minorTicks}), None) for tick in [major_tick, minor_tick]: if tick is None: continue bb0 = bb.frozen() tickl = tick._size tickdir = tick._tickdir if tickdir == 'out': padout = 1 padin = 0 elif tickdir == 'in': padout = 0 padin = 1 else: padout = 0.5 padin = 0.5 padout = padout * tickl / 72 * self.figure.dpi padin = padin * tickl / 72 * self.figure.dpi if tick.tick1line.get_visible(): if self.spine_type == 'left': bb0.x0 = bb0.x0 - padout bb0.x1 = bb0.x1 + padin elif self.spine_type == 'bottom': bb0.y0 = bb0.y0 - padout bb0.y1 = bb0.y1 + padin if tick.tick2line.get_visible(): if self.spine_type == 'right': bb0.x1 = bb0.x1 + padout bb0.x0 = bb0.x0 - padin elif self.spine_type == 'top': bb0.y1 = bb0.y1 + padout bb0.y0 = bb0.y0 - padout bboxes.append(bb0) return mtransforms.Bbox.union(bboxes) "," Return the window extent of the spines in display space, including padding for ticks (but not their labels) See Also -------- matplotlib.axes.Axes.get_tightbbox matplotlib.axes.Axes.get_window_extent ",23,174,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_window_extent(self, renderer=None): # make sure the location is updated so that transforms etc are correct: self._adjust_location() bb = super().get_window_extent(renderer=renderer) if self.axis is None or not self.axis.get_visible(): return bb bboxes = [bb] drawn_ticks = self.axis._update_ticks() major_tick = next(iter({*drawn_ticks} & {*self.axis.majorTicks}), None) minor_tick = next(iter({*drawn_ticks} & {*self.axis.minorTicks}), None) for tick in [major_tick, minor_tick]: if tick is None: continue bb0 = bb.frozen() tickl = tick._size tickdir = tick._tickdir if tickdir == 'out': padout = 1 padin = 0 elif tickdir == 'in': padout = 0 padin = 1 else: padout = 0.5 padin = 0.5 padout = padout * tickl / 72 * self.figure.dpi padin = padin * tickl / 72 * self.figure.dpi if tick.tick1line.get_visible(): if self.spine_type == 'left': bb0.x0 = bb0.x0 - padout bb0.x1 = bb0.x1 + padin elif self.spine_type == 'bottom': bb0.y0 = bb0.y0 - padout bb0.y1 = bb0.y1 + padin if tick.tick2line.get_visible(): if self.spine_type == 'right': bb0.x1 = bb0.x1 + padout bb0.x0 = bb0.x0 - padin elif self.spine_type == 'top': bb0.y1 = bb0.y1 + padout bb0.y0 = bb0.y0 - padout bboxes.append(bb0) return mtransforms.Bbox.union(bboxes) ``` ###Assistant : Return the window extent of the spines in display space, including padding for ticks (but not their labels) See Also -------- matplotlib.axes.Axes.get_tightbbox matplotlib.axes.Axes.get_window_extent " 1246,"def apply_support(self, location, type): if location not in self._node_labels: raise ValueError(""Support must be added on a known node"") else: self._supports[location] = type if type == ""pinned"": self._loads['R_'+str(location)+'_x']= [] self._loads['R_'+str(location)+'_y']= [] elif type == ""roller"": self._loads['R_'+str(location)+'_y']= [] if 'R_'+str(location)+'_x' in list(self._loads): self._loads.pop('R_'+str(location)+'_x') "," This method adds a pinned or roller support at a particular node Parameters ========== location: String or Symbol Label of the Node at which support is added. type: String Type of the support being provided at the node. Examples ======== >>> from sympy.physics.continuum_mechanics.truss import Truss >>> t = Truss() >>> t.add_node('A', 0, 0) >>> t.add_node('B', 3, 0) >>> t.apply_support('A', 'pinned') >>> t.supports {'A': 'pinned', 'B': 'none'} ",66,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def apply_support(self, location, type): if location not in self._node_labels: raise ValueError(""Support must be added on a known node"") else: self._supports[location] = type if type == ""pinned"": self._loads['R_'+str(location)+'_x']= [] self._loads['R_'+str(location)+'_y']= [] elif type == ""roller"": self._loads['R_'+str(location)+'_y']= [] if 'R_'+str(location)+'_x' in list(self._loads): self._loads.pop('R_'+str(location)+'_x') ``` ###Assistant : This method adds a pinned or roller support at a particular node Parameters ========== location: String or Symbol Label of the Node at which support is added. type: String Type of the support being provided at the node. Examples ======== >>> from sympy.physics.continuum_mechanics.truss import Truss >>> t = Truss() >>> t.add_node('A', 0, 0) >>> t.add_node('B', 3, 0) >>> t.apply_support('A', 'pinned') >>> t.supports {'A': 'pinned', 'B': 'none'} " 1247,"def timers(self) -> list[dict[str, Any]]: return [ { ""enabled"": timer.enabled, ""cron"": timer.cron, ""next_schedule"": as_utc(timer.next_schedule), } for timer in self.coordinator.data.timers ] ",Get the list of added timers of the vacuum cleaner.,10,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def timers(self) -> list[dict[str, Any]]: return [ { ""enabled"": timer.enabled, ""cron"": timer.cron, ""next_schedule"": as_utc(timer.next_schedule), } for timer in self.coordinator.data.timers ] ``` ###Assistant : Get the list of added timers of the vacuum cleaner. " 1248,"def plot_avg_pitch(pitch, chars, fig_size=(30, 10), output_fig=False): old_fig_size = plt.rcParams[""figure.figsize""] if fig_size is not None: plt.rcParams[""figure.figsize""] = fig_size fig, ax = plt.subplots() x = np.array(range(len(chars))) my_xticks = [c for c in chars] plt.xticks(x, my_xticks) ax.set_xlabel(""characters"") ax.set_ylabel(""freq"") ax2 = ax.twinx() ax2.plot(pitch, linewidth=5.0, color=""red"") ax2.set_ylabel(""F0"") plt.rcParams[""figure.figsize""] = old_fig_size if not output_fig: plt.close() return fig ","Plot pitch curves on top of the input characters. Args: pitch (np.array): Pitch values. chars (str): Characters to place to the x-axis. Shapes: pitch: :math:`(T,)` ",25,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def plot_avg_pitch(pitch, chars, fig_size=(30, 10), output_fig=False): old_fig_size = plt.rcParams[""figure.figsize""] if fig_size is not None: plt.rcParams[""figure.figsize""] = fig_size fig, ax = plt.subplots() x = np.array(range(len(chars))) my_xticks = [c for c in chars] plt.xticks(x, my_xticks) ax.set_xlabel(""characters"") ax.set_ylabel(""freq"") ax2 = ax.twinx() ax2.plot(pitch, linewidth=5.0, color=""red"") ax2.set_ylabel(""F0"") plt.rcParams[""figure.figsize""] = old_fig_size if not output_fig: plt.close() return fig ``` ###Assistant : Plot pitch curves on top of the input characters. Args: pitch (np.array): Pitch values. chars (str): Characters to place to the x-axis. Shapes: pitch: :math:`(T,)` " 1249,"def composite(nth): n = as_int(nth) if n < 1: raise ValueError(""nth must be a positive integer; composite(1) == 4"") composite_arr = [4, 6, 8, 9, 10, 12, 14, 15, 16, 18] if n <= 10: return composite_arr[n - 1] a, b = 4, sieve._list[-1] if n <= b - primepi(b) - 1: while a < b - 1: mid = (a + b) >> 1 if mid - primepi(mid) - 1 > n: b = mid else: a = mid if isprime(a): a -= 1 return a from sympy.functions.elementary.exponential import log from sympy.functions.special.error_functions import li a = 4 # Lower bound for binary search b = int(n*(log(n) + log(log(n)))) # Upper bound for the search. while a < b: mid = (a + b) >> 1 if mid - li(mid) - 1 > n: b = mid else: a = mid + 1 n_composites = a - primepi(a) - 1 while n_composites > n: if not isprime(a): n_composites -= 1 a -= 1 if isprime(a): a -= 1 return a "," Return the nth composite number, with the composite numbers indexed as composite(1) = 4, composite(2) = 6, etc.... Examples ======== >>> from sympy import composite >>> composite(36) 52 >>> composite(1) 4 >>> composite(17737) 20000 See Also ======== sympy.ntheory.primetest.isprime : Test if n is prime primerange : Generate all primes in a given range primepi : Return the number of primes less than or equal to n prime : Return the nth prime compositepi : Return the number of positive composite numbers less than or equal to n ",87,170,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def composite(nth): n = as_int(nth) if n < 1: raise ValueError(""nth must be a positive integer; composite(1) == 4"") composite_arr = [4, 6, 8, 9, 10, 12, 14, 15, 16, 18] if n <= 10: return composite_arr[n - 1] a, b = 4, sieve._list[-1] if n <= b - primepi(b) - 1: while a < b - 1: mid = (a + b) >> 1 if mid - primepi(mid) - 1 > n: b = mid else: a = mid if isprime(a): a -= 1 return a from sympy.functions.elementary.exponential import log from sympy.functions.special.error_functions import li a = 4 # Lower bound for binary search b = int(n*(log(n) + log(log(n)))) # Upper bound for the search. while a < b: mid = (a + b) >> 1 if mid - li(mid) - 1 > n: b = mid else: a = mid + 1 n_composites = a - primepi(a) - 1 while n_composites > n: if not isprime(a): n_composites -= 1 a -= 1 if isprime(a): a -= 1 return a ``` ###Assistant : Return the nth composite number, with the composite numbers indexed as composite(1) = 4, composite(2) = 6, etc.... Examples ======== >>> from sympy import composite >>> composite(36) 52 >>> composite(1) 4 >>> composite(17737) 20000 See Also ======== sympy.ntheory.primetest.isprime : Test if n is prime primerange : Generate all primes in a given range primepi : Return the number of primes less than or equal to n prime : Return the nth prime compositepi : Return the number of positive composite numbers less than or equal to n " 1250,"def histogram2d(x, y, bins=10, range=None, density=None, weights=None): from numpy import histogramdd if len(x) != len(y): raise ValueError('x and y must have the same length.') try: N = len(bins) except TypeError: N = 1 if N != 1 and N != 2: xedges = yedges = asarray(bins) bins = [xedges, yedges] hist, edges = histogramdd([x, y], bins, range, density, weights) return hist, edges[0], edges[1] @set_module('numpy')"," Compute the bi-dimensional histogram of two data samples. Parameters ---------- x : array_like, shape (N,) An array containing the x coordinates of the points to be histogrammed. y : array_like, shape (N,) An array containing the y coordinates of the points to be histogrammed. bins : int or array_like or [int, int] or [array, array], optional The bin specification: * If int, the number of bins for the two dimensions (nx=ny=bins). * If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins). * If [int, int], the number of bins in each dimension (nx, ny = bins). * If [array, array], the bin edges in each dimension (x_edges, y_edges = bins). * A combination [int, array] or [array, int], where int is the number of bins and array is the bin edges. range : array_like, shape(2,2), optional The leftmost and rightmost edges of the bins along each dimension (if not specified explicitly in the `bins` parameters): ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range will be considered outliers and not tallied in the histogram. density : bool, optional If False, the default, returns the number of samples in each bin. If True, returns the probability *density* function at the bin, ``bin_count / sample_count / bin_area``. weights : array_like, shape(N,), optional An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. Weights are normalized to 1 if `density` is True. If `density` is False, the values of the returned histogram are equal to the sum of the weights belonging to the samples falling into each bin. Returns ------- H : ndarray, shape(nx, ny) The bi-dimensional histogram of samples `x` and `y`. Values in `x` are histogrammed along the first dimension and values in `y` are histogrammed along the second dimension. xedges : ndarray, shape(nx+1,) The bin edges along the first dimension. yedges : ndarray, shape(ny+1,) The bin edges along the second dimension. See Also -------- histogram : 1D histogram histogramdd : Multidimensional histogram Notes ----- When `density` is True, then the returned histogram is the sample density, defined such that the sum over bins of the product ``bin_value * bin_area`` is 1. Please note that the histogram does not follow the Cartesian convention where `x` values are on the abscissa and `y` values on the ordinate axis. Rather, `x` is histogrammed along the first dimension of the array (vertical), and `y` along the second dimension of the array (horizontal). This ensures compatibility with `histogramdd`. Examples -------- >>> from matplotlib.image import NonUniformImage >>> import matplotlib.pyplot as plt Construct a 2-D histogram with variable bin width. First define the bin edges: >>> xedges = [0, 1, 3, 5] >>> yedges = [0, 2, 3, 4, 6] Next we create a histogram H with random bin content: >>> x = np.random.normal(2, 1, 100) >>> y = np.random.normal(1, 1, 100) >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges)) >>> # Histogram does not follow Cartesian convention (see Notes), >>> # therefore transpose H for visualization purposes. >>> H = H.T :func:`imshow ` can only display square bins: >>> fig = plt.figure(figsize=(7, 3)) >>> ax = fig.add_subplot(131, title='imshow: square bins') >>> plt.imshow(H, interpolation='nearest', origin='lower', ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) :func:`pcolormesh ` can display actual edges: >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges', ... aspect='equal') >>> X, Y = np.meshgrid(xedges, yedges) >>> ax.pcolormesh(X, Y, H) :class:`NonUniformImage ` can be used to display actual bin edges with interpolation: >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated', ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]]) >>> im = NonUniformImage(ax, interpolation='bilinear') >>> xcenters = (xedges[:-1] + xedges[1:]) / 2 >>> ycenters = (yedges[:-1] + yedges[1:]) / 2 >>> im.set_data(xcenters, ycenters, H) >>> ax.images.append(im) >>> plt.show() It is also possible to construct a 2-D histogram without specifying bin edges: >>> # Generate non-symmetric test data >>> n = 10000 >>> x = np.linspace(1, 100, n) >>> y = 2*np.log(x) + np.random.rand(n) - 0.5 >>> # Compute 2d histogram. Note the order of x/y and xedges/yedges >>> H, yedges, xedges = np.histogram2d(y, x, bins=20) Now we can plot the histogram using :func:`pcolormesh `, and a :func:`hexbin ` for comparison. >>> # Plot histogram using pcolormesh >>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True) >>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow') >>> ax1.plot(x, 2*np.log(x), 'k-') >>> ax1.set_xlim(x.min(), x.max()) >>> ax1.set_ylim(y.min(), y.max()) >>> ax1.set_xlabel('x') >>> ax1.set_ylabel('y') >>> ax1.set_title('histogram2d') >>> ax1.grid() >>> # Create hexbin plot for comparison >>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow') >>> ax2.plot(x, 2*np.log(x), 'k-') >>> ax2.set_title('hexbin') >>> ax2.set_xlim(x.min(), x.max()) >>> ax2.set_xlabel('x') >>> ax2.grid() >>> plt.show() ",747,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def histogram2d(x, y, bins=10, range=None, density=None, weights=None): from numpy import histogramdd if len(x) != len(y): raise ValueError('x and y must have the same length.') try: N = len(bins) except TypeError: N = 1 if N != 1 and N != 2: xedges = yedges = asarray(bins) bins = [xedges, yedges] hist, edges = histogramdd([x, y], bins, range, density, weights) return hist, edges[0], edges[1] @set_module('numpy') ``` ###Assistant : Compute the bi-dimensional histogram of two data samples. Parameters ---------- x : array_like, shape (N,) An array containing the x coordinates of the points to be histogrammed. y : array_like, shape (N,) An array containing the y coordinates of the points to be histogrammed. bins : int or array_like or [int, int] or [array, array], optional The bin specification: * If int, the number of bins for the two dimensions (nx=ny=bins). * If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins). * If [int, int], the number of bins in each dimension (nx, ny = bins). * If [array, array], the bin edges in each dimension (x_edges, y_edges = bins). * A combination [int, array] or [array, int], where int is the number of bins and array is the bin edges. range : array_like, shape(2,2), optional The leftmost and rightmost edges of the bins along each dimension (if not specified explicitly in the `bins` parameters): ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range will be considered outliers and not tallied in the histogram. density : bool, optional If False, the default, returns the number of samples in each bin. If True, returns the probability *density* function at the bin, ``bin_count / sample_count / bin_area``. weights : array_like, shape(N,), optional An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. Weights are normalized to 1 if `density` is True. If `density` is False, the values of the returned histogram are equal to the sum of the weights belonging to the samples falling into each bin. Returns ------- H : ndarray, shape(nx, ny) The bi-dimensional histogram of samples `x` and `y`. Values in `x` are histogrammed along the first dimension and values in `y` are histogrammed along the second dimension. xedges : ndarray, shape(nx+1,) The bin edges along the first dimension. yedges : ndarray, shape(ny+1,) The bin edges along the second dimension. See Also -------- histogram : 1D histogram histogramdd : Multidimensional histogram Notes ----- When `density` is True, then the returned histogram is the sample density, defined such that the sum over bins of the product ``bin_value * bin_area`` is 1. Please note that the histogram does not follow the Cartesian convention where `x` values are on the abscissa and `y` values on the ordinate axis. Rather, `x` is histogrammed along the first dimension of the array (vertical), and `y` along the second dimension of the array (horizontal). This ensures compatibility with `histogramdd`. Examples -------- >>> from matplotlib.image import NonUniformImage >>> import matplotlib.pyplot as plt Construct a 2-D histogram with variable bin width. First define the bin edges: >>> xedges = [0, 1, 3, 5] >>> yedges = [0, 2, 3, 4, 6] Next we create a histogram H with random bin content: >>> x = np.random.normal(2, 1, 100) >>> y = np.random.normal(1, 1, 100) >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges)) >>> # Histogram does not follow Cartesian convention (see Notes), >>> # therefore transpose H for visualization purposes. >>> H = H.T :func:`imshow ` can only display square bins: >>> fig = plt.figure(figsize=(7, 3)) >>> ax = fig.add_subplot(131, title='imshow: square bins') >>> plt.imshow(H, interpolation='nearest', origin='lower', ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) :func:`pcolormesh ` can display actual edges: >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges', ... aspect='equal') >>> X, Y = np.meshgrid(xedges, yedges) >>> ax.pcolormesh(X, Y, H) :class:`NonUniformImage ` can be used to display actual bin edges with interpolation: >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated', ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]]) >>> im = NonUniformImage(ax, interpolation='bilinear') >>> xcenters = (xedges[:-1] + xedges[1:]) / 2 >>> ycenters = (yedges[:-1] + yedges[1:]) / 2 >>> im.set_data(xcenters, ycenters, H) >>> ax.images.append(im) >>> plt.show() It is also possible to construct a 2-D histogram without specifying bin edges: >>> # Generate non-symmetric test data >>> n = 10000 >>> x = np.linspace(1, 100, n) >>> y = 2*np.log(x) + np.random.rand(n) - 0.5 >>> # Compute 2d histogram. Note the order of x/y and xedges/yedges >>> H, yedges, xedges = np.histogram2d(y, x, bins=20) Now we can plot the histogram using :func:`pcolormesh `, and a :func:`hexbin ` for comparison. >>> # Plot histogram using pcolormesh >>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True) >>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow') >>> ax1.plot(x, 2*np.log(x), 'k-') >>> ax1.set_xlim(x.min(), x.max()) >>> ax1.set_ylim(y.min(), y.max()) >>> ax1.set_xlabel('x') >>> ax1.set_ylabel('y') >>> ax1.set_title('histogram2d') >>> ax1.grid() >>> # Create hexbin plot for comparison >>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow') >>> ax2.plot(x, 2*np.log(x), 'k-') >>> ax2.set_title('hexbin') >>> ax2.set_xlim(x.min(), x.max()) >>> ax2.set_xlabel('x') >>> ax2.grid() >>> plt.show() " 1251,"def available(self) -> bool: return self._device is not None and self._device.profile_device.available ",Device is available when we have a connection to it.,10,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def available(self) -> bool: return self._device is not None and self._device.profile_device.available ``` ###Assistant : Device is available when we have a connection to it. " 1252,"def _get_input_from_iterator(iterator, model): next_element = iterator.get_next() # `len(nest.flatten(x))` is going to not count empty elements such as {}. # len(nest.flatten([[0,1,2], {}])) is 3 and not 4. The `next_element` is # going to get flattened in `_prepare_feed_values` to work around that. Empty # elements are going to get filtered out as part of the flattening. if len(tf.nest.flatten(next_element)) == len(model.inputs): x = next_element y = None sample_weights = None elif len(tf.nest.flatten(next_element)) == ( len(model.inputs) + len(model.outputs) ): x, y = next_element sample_weights = None else: x, y, sample_weights = next_element # Validate that all the elements in x and y are of the same type and shape. validate_distributed_dataset_inputs( model._distribution_strategy, x, y, sample_weights ) return x, y, sample_weights ",Get elements from the iterator and verify the input shape and type.,12,115,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_input_from_iterator(iterator, model): next_element = iterator.get_next() # `len(nest.flatten(x))` is going to not count empty elements such as {}. # len(nest.flatten([[0,1,2], {}])) is 3 and not 4. The `next_element` is # going to get flattened in `_prepare_feed_values` to work around that. Empty # elements are going to get filtered out as part of the flattening. if len(tf.nest.flatten(next_element)) == len(model.inputs): x = next_element y = None sample_weights = None elif len(tf.nest.flatten(next_element)) == ( len(model.inputs) + len(model.outputs) ): x, y = next_element sample_weights = None else: x, y, sample_weights = next_element # Validate that all the elements in x and y are of the same type and shape. validate_distributed_dataset_inputs( model._distribution_strategy, x, y, sample_weights ) return x, y, sample_weights ``` ###Assistant : Get elements from the iterator and verify the input shape and type. " 1253,"def inner(a, b): return (a, b) @array_function_from_c_func_and_dispatcher(_multiarray_umath.where)"," inner(a, b, /) Inner product of two arrays. Ordinary inner product of vectors for 1-D arrays (without complex conjugation), in higher dimensions a sum product over the last axes. Parameters ---------- a, b : array_like If `a` and `b` are nonscalar, their last dimensions must match. Returns ------- out : ndarray If `a` and `b` are both scalars or both 1-D arrays then a scalar is returned; otherwise an array is returned. ``out.shape = (*a.shape[:-1], *b.shape[:-1])`` Raises ------ ValueError If both `a` and `b` are nonscalar and their last dimensions have different sizes. See Also -------- tensordot : Sum products over arbitrary axes. dot : Generalised matrix product, using second last dimension of `b`. einsum : Einstein summation convention. Notes ----- For vectors (1-D arrays) it computes the ordinary inner-product:: np.inner(a, b) = sum(a[:]*b[:]) More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``:: np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) or explicitly:: np.inner(a, b)[i0,...,ir-2,j0,...,js-2] = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:]) In addition `a` or `b` may be scalars, in which case:: np.inner(a,b) = a*b Examples -------- Ordinary inner product for vectors: >>> a = np.array([1,2,3]) >>> b = np.array([0,1,0]) >>> np.inner(a, b) 2 Some multidimensional examples: >>> a = np.arange(24).reshape((2,3,4)) >>> b = np.arange(4) >>> c = np.inner(a, b) >>> c.shape (2, 3) >>> c array([[ 14, 38, 62], [ 86, 110, 134]]) >>> a = np.arange(2).reshape((1,1,2)) >>> b = np.arange(6).reshape((3,2)) >>> c = np.inner(a, b) >>> c.shape (1, 1, 3) >>> c array([[[1, 3, 5]]]) An example where `b` is a scalar: >>> np.inner(np.eye(2), 7) array([[7., 0.], [0., 7.]]) ",260,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def inner(a, b): return (a, b) @array_function_from_c_func_and_dispatcher(_multiarray_umath.where) ``` ###Assistant : inner(a, b, /) Inner product of two arrays. Ordinary inner product of vectors for 1-D arrays (without complex conjugation), in higher dimensions a sum product over the last axes. Parameters ---------- a, b : array_like If `a` and `b` are nonscalar, their last dimensions must match. Returns ------- out : ndarray If `a` and `b` are both scalars or both 1-D arrays then a scalar is returned; otherwise an array is returned. ``out.shape = (*a.shape[:-1], *b.shape[:-1])`` Raises ------ ValueError If both `a` and `b` are nonscalar and their last dimensions have different sizes. See Also -------- tensordot : Sum products over arbitrary axes. dot : Generalised matrix product, using second last dimension of `b`. einsum : Einstein summation convention. Notes ----- For vectors (1-D arrays) it computes the ordinary inner-product:: np.inner(a, b) = sum(a[:]*b[:]) More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``:: np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) or explicitly:: np.inner(a, b)[i0,...,ir-2,j0,...,js-2] = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:]) In addition `a` or `b` may be scalars, in which case:: np.inner(a,b) = a*b Examples -------- Ordinary inner product for vectors: >>> a = np.array([1,2,3]) >>> b = np.array([0,1,0]) >>> np.inner(a, b) 2 Some multidimensional examples: >>> a = np.arange(24).reshape((2,3,4)) >>> b = np.arange(4) >>> c = np.inner(a, b) >>> c.shape (2, 3) >>> c array([[ 14, 38, 62], [ 86, 110, 134]]) >>> a = np.arange(2).reshape((1,1,2)) >>> b = np.arange(6).reshape((3,2)) >>> c = np.inner(a, b) >>> c.shape (1, 1, 3) >>> c array([[[1, 3, 5]]]) An example where `b` is a scalar: >>> np.inner(np.eye(2), 7) array([[7., 0.], [0., 7.]]) " 1254,"def contains(self, mouseevent): inside, info = self._default_contains(mouseevent) if inside is not None: return inside, info if not self.get_visible(): return False, {} pickradius = ( float(self._picker) if isinstance(self._picker, Number) and self._picker is not True # the bool, not just nonzero or 1 else self._pickradius) if self.axes: self.axes._unstale_viewLim() transform, offset_trf, offsets, paths = self._prepare_points() # Tests if the point is contained on one of the polygons formed # by the control points of each of the paths. A point is considered # ""on"" a path if it would lie within a stroke of width 2*pickradius # following the path. If pickradius <= 0, then we instead simply check # if the point is *inside* of the path instead. ind = _path.point_in_path_collection( mouseevent.x, mouseevent.y, pickradius, transform.frozen(), paths, self.get_transforms(), offsets, offset_trf, pickradius <= 0) return len(ind) > 0, dict(ind=ind) "," Test whether the mouse event occurred in the collection. Returns ``bool, dict(ind=itemlist)``, where every item in itemlist contains the event. ",20,135,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def contains(self, mouseevent): inside, info = self._default_contains(mouseevent) if inside is not None: return inside, info if not self.get_visible(): return False, {} pickradius = ( float(self._picker) if isinstance(self._picker, Number) and self._picker is not True # the bool, not just nonzero or 1 else self._pickradius) if self.axes: self.axes._unstale_viewLim() transform, offset_trf, offsets, paths = self._prepare_points() # Tests if the point is contained on one of the polygons formed # by the control points of each of the paths. A point is considered # ""on"" a path if it would lie within a stroke of width 2*pickradius # following the path. If pickradius <= 0, then we instead simply check # if the point is *inside* of the path instead. ind = _path.point_in_path_collection( mouseevent.x, mouseevent.y, pickradius, transform.frozen(), paths, self.get_transforms(), offsets, offset_trf, pickradius <= 0) return len(ind) > 0, dict(ind=ind) ``` ###Assistant : Test whether the mouse event occurred in the collection. Returns ``bool, dict(ind=itemlist)``, where every item in itemlist contains the event. " 1255,"def deployments(self) -> List[Dict]: return [ { 'name': self.name, 'head_host': self.head_host, 'head_port_in': self.head_port_in, } ] ","Get deployments of the deployment. The BaseDeployment just gives one deployment. :return: list of deployments ",15,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def deployments(self) -> List[Dict]: return [ { 'name': self.name, 'head_host': self.head_host, 'head_port_in': self.head_port_in, } ] ``` ###Assistant : Get deployments of the deployment. The BaseDeployment just gives one deployment. :return: list of deployments " 1256,"def parse_content_disposition(reply): is_inline = True filename = None content_disposition_header = b'Content-Disposition' # First check if the Content-Disposition header has a filename # attribute. if reply.hasRawHeader(content_disposition_header): # We use the unsafe variant of the filename as we sanitize it via # os.path.basename later. try: value = bytes(reply.rawHeader(content_disposition_header)) log.network.debug(""Parsing Content-Disposition: {value!r}"") content_disposition = ContentDisposition.parse(value) filename = content_disposition.filename() except ContentDispositionError as e: log.network.error(f""Error while parsing filename: {e}"") else: is_inline = content_disposition.is_inline() # Then try to get filename from url if not filename: filename = reply.url().path().rstrip('/') # If that fails as well, use a fallback if not filename: filename = 'qutebrowser-download' return is_inline, os.path.basename(filename) ","Parse a content_disposition header. Args: reply: The QNetworkReply to get a filename for. Return: A (is_inline, filename) tuple. ",18,100,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parse_content_disposition(reply): is_inline = True filename = None content_disposition_header = b'Content-Disposition' # First check if the Content-Disposition header has a filename # attribute. if reply.hasRawHeader(content_disposition_header): # We use the unsafe variant of the filename as we sanitize it via # os.path.basename later. try: value = bytes(reply.rawHeader(content_disposition_header)) log.network.debug(""Parsing Content-Disposition: {value!r}"") content_disposition = ContentDisposition.parse(value) filename = content_disposition.filename() except ContentDispositionError as e: log.network.error(f""Error while parsing filename: {e}"") else: is_inline = content_disposition.is_inline() # Then try to get filename from url if not filename: filename = reply.url().path().rstrip('/') # If that fails as well, use a fallback if not filename: filename = 'qutebrowser-download' return is_inline, os.path.basename(filename) ``` ###Assistant : Parse a content_disposition header. Args: reply: The QNetworkReply to get a filename for. Return: A (is_inline, filename) tuple. " 1257,"def _get_categorical_mapping(self, scale, data): levels = categorical_order(data, scale.order) n = len(levels) values = scale.values if isinstance(values, dict): self._check_dict_entries(levels, values) # TODO where to ensure that dict values have consistent representation? colors = [values[x] for x in levels] elif isinstance(values, list): colors = self._check_list_length(levels, scale.values) elif isinstance(values, tuple): colors = blend_palette(values, n) elif isinstance(values, str): colors = color_palette(values, n) elif values is None: if n <= len(get_color_cycle()): # Use current (global) default palette colors = color_palette(n_colors=n) else: colors = color_palette(""husl"", n) else: scale_class = scale.__class__.__name__ msg = "" "".join([ f""Scale values for {self.variable} with a {scale_class} mapping"", f""must be string, list, tuple, or dict; not {type(scale.values)}."" ]) raise TypeError(msg) # If color specified here has alpha channel, it will override alpha property colors = self._standardize_colors(colors) ",Define mapping as lookup in list of discrete color values.,10,124,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_categorical_mapping(self, scale, data): levels = categorical_order(data, scale.order) n = len(levels) values = scale.values if isinstance(values, dict): self._check_dict_entries(levels, values) # TODO where to ensure that dict values have consistent representation? colors = [values[x] for x in levels] elif isinstance(values, list): colors = self._check_list_length(levels, scale.values) elif isinstance(values, tuple): colors = blend_palette(values, n) elif isinstance(values, str): colors = color_palette(values, n) elif values is None: if n <= len(get_color_cycle()): # Use current (global) default palette colors = color_palette(n_colors=n) else: colors = color_palette(""husl"", n) else: scale_class = scale.__class__.__name__ msg = "" "".join([ f""Scale values for {self.variable} with a {scale_class} mapping"", f""must be string, list, tuple, or dict; not {type(scale.values)}."" ]) raise TypeError(msg) # If color specified here has alpha channel, it will override alpha property colors = self._standardize_colors(colors) ``` ###Assistant : Define mapping as lookup in list of discrete color values. " 1258,"def minorlocator(self, loc): self._long_axis().set_minor_locator(loc) self._minorlocator = loc "," Set minor locator being used for colorbar ",7,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def minorlocator(self, loc): self._long_axis().set_minor_locator(loc) self._minorlocator = loc ``` ###Assistant : Set minor locator being used for colorbar " 1259,"def test_process_datetime_to_timestamp_freeze_time(time_zone, hass): hass.config.set_time_zone(time_zone) utc_now = dt_util.utcnow() with freeze_time(utc_now): epoch = utc_now.timestamp() assert process_datetime_to_timestamp(dt_util.utcnow()) == epoch now = dt_util.now() assert process_datetime_to_timestamp(now) == epoch @pytest.mark.parametrize( ""time_zone"", [""Europe/Berlin"", ""America/Chicago"", ""US/Hawaii"", ""UTC""] )","Test we can handle processing database datatimes to timestamps. This test freezes time to make sure everything matches. ",18,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_process_datetime_to_timestamp_freeze_time(time_zone, hass): hass.config.set_time_zone(time_zone) utc_now = dt_util.utcnow() with freeze_time(utc_now): epoch = utc_now.timestamp() assert process_datetime_to_timestamp(dt_util.utcnow()) == epoch now = dt_util.now() assert process_datetime_to_timestamp(now) == epoch @pytest.mark.parametrize( ""time_zone"", [""Europe/Berlin"", ""America/Chicago"", ""US/Hawaii"", ""UTC""] ) ``` ###Assistant : Test we can handle processing database datatimes to timestamps. This test freezes time to make sure everything matches. " 1260,"def _spatially_filter(self) -> np.ndarray: logger.debug(""Spatially Filter"") assert self._shapes_model is not None landmarks_norm = self._normalized[""landmarks""] # Convert to matrix form landmarks_norm_table = np.reshape(landmarks_norm, [68 * 2, landmarks_norm.shape[2]]).T # Project onto shapes model and reconstruct landmarks_norm_table_rec = self._shapes_model.inverse_transform( self._shapes_model.transform(landmarks_norm_table)) # Convert back to shapes (numKeypoint, num_dims, numFrames) landmarks_norm_rec = np.reshape(landmarks_norm_table_rec.T, [68, 2, landmarks_norm.shape[2]]) # Transform back to image co-ordinates retval = self._normalized_to_original(landmarks_norm_rec, self._normalized[""scale_factors""], self._normalized[""mean_coords""]) logger.debug(""Spatially Filtered: %s"", retval) return retval "," interpret the shapes using our shape model (project and reconstruct) Returns ------- :class:`numpy.ndarray` The filtered landmarks in original coordinate space ",20,68,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _spatially_filter(self) -> np.ndarray: logger.debug(""Spatially Filter"") assert self._shapes_model is not None landmarks_norm = self._normalized[""landmarks""] # Convert to matrix form landmarks_norm_table = np.reshape(landmarks_norm, [68 * 2, landmarks_norm.shape[2]]).T # Project onto shapes model and reconstruct landmarks_norm_table_rec = self._shapes_model.inverse_transform( self._shapes_model.transform(landmarks_norm_table)) # Convert back to shapes (numKeypoint, num_dims, numFrames) landmarks_norm_rec = np.reshape(landmarks_norm_table_rec.T, [68, 2, landmarks_norm.shape[2]]) # Transform back to image co-ordinates retval = self._normalized_to_original(landmarks_norm_rec, self._normalized[""scale_factors""], self._normalized[""mean_coords""]) logger.debug(""Spatially Filtered: %s"", retval) return retval ``` ###Assistant : interpret the shapes using our shape model (project and reconstruct) Returns ------- :class:`numpy.ndarray` The filtered landmarks in original coordinate space " 1261,"def letter_form(self): return tuple(flatten([(i,)*j if j > 0 else (-i,)*(-j) for i, j in self.array_form])) "," The letter representation of a ``FreeGroupElement`` is a tuple of generator symbols, with each entry corresponding to a group generator. Inverses of the generators are represented by negative generator symbols. Examples ======== >>> from sympy.combinatorics import free_group >>> f, a, b, c, d = free_group(""a b c d"") >>> (a**3).letter_form (a, a, a) >>> (a**2*d**-2*a*b**-4).letter_form (a, a, -d, -d, a, -b, -b, -b, -b) >>> (a**-2*b**3*d).letter_form (-a, -a, b, b, b, d) See Also ======== array_form ",76,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def letter_form(self): return tuple(flatten([(i,)*j if j > 0 else (-i,)*(-j) for i, j in self.array_form])) ``` ###Assistant : The letter representation of a ``FreeGroupElement`` is a tuple of generator symbols, with each entry corresponding to a group generator. Inverses of the generators are represented by negative generator symbols. Examples ======== >>> from sympy.combinatorics import free_group >>> f, a, b, c, d = free_group(""a b c d"") >>> (a**3).letter_form (a, a, a) >>> (a**2*d**-2*a*b**-4).letter_form (a, a, -d, -d, a, -b, -b, -b, -b) >>> (a**-2*b**3*d).letter_form (-a, -a, b, b, b, d) See Also ======== array_form " 1262,"def _handle_default_message(self, type, data): logger.debug(f""Received message from Leader of type {type}: {data}"") "," Default leader message handler, just logs it. We should never have to run this unless the leader sends us some weird message. ",22,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _handle_default_message(self, type, data): logger.debug(f""Received message from Leader of type {type}: {data}"") ``` ###Assistant : Default leader message handler, just logs it. We should never have to run this unless the leader sends us some weird message. " 1263,"def export_probs(self) -> dict[str, Any]: result = {} for module in self.nas_modules: try: result.update(module.export_probs(memo=result)) except NotImplementedError: warnings.warn( 'Some super-modules you have used did not implement export_probs. You might find some logs are missing.', UserWarning ) return result "," Export the probability of every choice in the search space got chosen. .. note:: If such method of some modules is not implemented, they will be simply ignored. Returns ------- dict In most cases, keys are names of ``nas_modules`` suffixed with ``/`` and choice name. Values are the probability / logits depending on the implementation. ",55,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def export_probs(self) -> dict[str, Any]: result = {} for module in self.nas_modules: try: result.update(module.export_probs(memo=result)) except NotImplementedError: warnings.warn( 'Some super-modules you have used did not implement export_probs. You might find some logs are missing.', UserWarning ) return result ``` ###Assistant : Export the probability of every choice in the search space got chosen. .. note:: If such method of some modules is not implemented, they will be simply ignored. Returns ------- dict In most cases, keys are names of ``nas_modules`` suffixed with ``/`` and choice name. Values are the probability / logits depending on the implementation. " 1264,"def debounce_update_release_health_data(organization, project_ids): # Figure out which projects need to get updates from the snuba. should_update = {} cache_keys = [""debounce-health:%d"" % id for id in project_ids] cache_data = cache.get_many(cache_keys) for project_id, cache_key in zip(project_ids, cache_keys): if cache_data.get(cache_key) is None: should_update[project_id] = cache_key if not should_update: return projects = {p.id: p for p in Project.objects.get_many_from_cache(should_update.keys())} # This gives us updates for all release-projects which have seen new # health data over the last days. It will miss releases where the last # date is longer than what `get_changed_project_release_model_adoptions` # considers recent. project_releases = release_health.get_changed_project_release_model_adoptions( should_update.keys() ) # Check which we already have rows for. existing = set( ReleaseProject.objects.filter( project_id__in=[x[0] for x in project_releases], release__version__in=[x[1] for x in project_releases], ).values_list(""project_id"", ""release__version"") ) to_upsert = [] for key in project_releases: if key not in existing: to_upsert.append(key) if to_upsert: dates = release_health.get_oldest_health_data_for_releases(to_upsert) for project_id, version in to_upsert: project = projects.get(project_id) if project is None: # should not happen continue # Ignore versions that were saved with an empty string before validation was added if not Release.is_valid_version(version): continue # We might have never observed the release. This for instance can # happen if the release only had health data so far. For these cases # we want to create the release the first time we observed it on the # health side. release = Release.get_or_create( project=project, version=version, date_added=dates.get((project_id, version)) ) # Make sure that the release knows about this project. Like we had before # the project might not have been associated with this release yet. release.add_project(project) # Debounce updates for a minute cache.set_many(dict(zip(should_update.values(), [True] * len(should_update))), 60) @region_silo_endpoint","This causes a flush of snuba health data to the postgres tables once per minute for the given projects. ",19,265,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def debounce_update_release_health_data(organization, project_ids): # Figure out which projects need to get updates from the snuba. should_update = {} cache_keys = [""debounce-health:%d"" % id for id in project_ids] cache_data = cache.get_many(cache_keys) for project_id, cache_key in zip(project_ids, cache_keys): if cache_data.get(cache_key) is None: should_update[project_id] = cache_key if not should_update: return projects = {p.id: p for p in Project.objects.get_many_from_cache(should_update.keys())} # This gives us updates for all release-projects which have seen new # health data over the last days. It will miss releases where the last # date is longer than what `get_changed_project_release_model_adoptions` # considers recent. project_releases = release_health.get_changed_project_release_model_adoptions( should_update.keys() ) # Check which we already have rows for. existing = set( ReleaseProject.objects.filter( project_id__in=[x[0] for x in project_releases], release__version__in=[x[1] for x in project_releases], ).values_list(""project_id"", ""release__version"") ) to_upsert = [] for key in project_releases: if key not in existing: to_upsert.append(key) if to_upsert: dates = release_health.get_oldest_health_data_for_releases(to_upsert) for project_id, version in to_upsert: project = projects.get(project_id) if project is None: # should not happen continue # Ignore versions that were saved with an empty string before validation was added if not Release.is_valid_version(version): continue # We might have never observed the release. This for instance can # happen if the release only had health data so far. For these cases # we want to create the release the first time we observed it on the # health side. release = Release.get_or_create( project=project, version=version, date_added=dates.get((project_id, version)) ) # Make sure that the release knows about this project. Like we had before # the project might not have been associated with this release yet. release.add_project(project) # Debounce updates for a minute cache.set_many(dict(zip(should_update.values(), [True] * len(should_update))), 60) @region_silo_endpoint ``` ###Assistant : This causes a flush of snuba health data to the postgres tables once per minute for the given projects. " 1265,"def show_code(co, *, file=None): print(code_info(co), file=file) _Instruction = collections.namedtuple(""_Instruction"", ""opname opcode arg argval argrepr offset starts_line is_jump_target"") _Instruction.opname.__doc__ = ""Human readable name for operation"" _Instruction.opcode.__doc__ = ""Numeric code for operation"" _Instruction.arg.__doc__ = ""Numeric argument to operation (if any), otherwise None"" _Instruction.argval.__doc__ = ""Resolved arg value (if known), otherwise same as arg"" _Instruction.argrepr.__doc__ = ""Human readable description of operation argument"" _Instruction.offset.__doc__ = ""Start index of operation within bytecode sequence"" _Instruction.starts_line.__doc__ = ""Line started by this opcode (if any), otherwise None"" _Instruction.is_jump_target.__doc__ = ""True if other code jumps to here, otherwise False"" _OPNAME_WIDTH = 20 _OPARG_WIDTH = 5 ","Print details of methods, functions, or code to *file*. If *file* is not provided, the output is printed on stdout. ",20,96,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def show_code(co, *, file=None): print(code_info(co), file=file) _Instruction = collections.namedtuple(""_Instruction"", ""opname opcode arg argval argrepr offset starts_line is_jump_target"") _Instruction.opname.__doc__ = ""Human readable name for operation"" _Instruction.opcode.__doc__ = ""Numeric code for operation"" _Instruction.arg.__doc__ = ""Numeric argument to operation (if any), otherwise None"" _Instruction.argval.__doc__ = ""Resolved arg value (if known), otherwise same as arg"" _Instruction.argrepr.__doc__ = ""Human readable description of operation argument"" _Instruction.offset.__doc__ = ""Start index of operation within bytecode sequence"" _Instruction.starts_line.__doc__ = ""Line started by this opcode (if any), otherwise None"" _Instruction.is_jump_target.__doc__ = ""True if other code jumps to here, otherwise False"" _OPNAME_WIDTH = 20 _OPARG_WIDTH = 5 ``` ###Assistant : Print details of methods, functions, or code to *file*. If *file* is not provided, the output is printed on stdout. " 1266,"def set_cmap(cmap): cmap = colormaps[cmap] rc('image', cmap=cmap.name) im = gci() if im is not None: im.set_cmap(cmap) @_copy_docstring_and_deprecators(matplotlib.image.imread)"," Set the default colormap, and applies it to the current image if any. Parameters ---------- cmap : `~matplotlib.colors.Colormap` or str A colormap instance or the name of a registered colormap. See Also -------- colormaps matplotlib.cm.register_cmap matplotlib.cm.get_cmap ",36,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_cmap(cmap): cmap = colormaps[cmap] rc('image', cmap=cmap.name) im = gci() if im is not None: im.set_cmap(cmap) @_copy_docstring_and_deprecators(matplotlib.image.imread) ``` ###Assistant : Set the default colormap, and applies it to the current image if any. Parameters ---------- cmap : `~matplotlib.colors.Colormap` or str A colormap instance or the name of a registered colormap. See Also -------- colormaps matplotlib.cm.register_cmap matplotlib.cm.get_cmap " 1267,"def completion_item_focus(self, which, history=False): if history: if (self._cmd.text() == ':' or self._cmd.history.is_browsing() or not self._active): if which == 'next': self._cmd.command_history_next() return elif which == 'prev': self._cmd.command_history_prev() return else: raise cmdutils.CommandError(""Can't combine --history with "" ""{}!"".format(which)) if not self._active: return selmodel = self.selectionModel() indices = { 'next': lambda: self._next_idx(upwards=False), 'prev': lambda: self._next_idx(upwards=True), 'next-category': lambda: self._next_category_idx(upwards=False), 'prev-category': lambda: self._next_category_idx(upwards=True), 'next-page': lambda: self._next_page(upwards=False), 'prev-page': lambda: self._next_page(upwards=True), } idx = indices[which]() if not idx.isValid(): return selmodel.setCurrentIndex( idx, QItemSelectionModel.ClearAndSelect | QItemSelectionModel.Rows) # if the last item is focused, try to fetch more next_idx = self.indexBelow(idx) if not self.visualRect(next_idx).isValid(): self.expandAll() count = self._model().count() if count == 0: self.hide() elif count == 1 and config.val.completion.quick: self.hide() elif config.val.completion.show == 'auto': self.show() ","Shift the focus of the completion menu to another item. Args: which: 'next', 'prev', 'next-category', 'prev-category', 'next-page', or 'prev-page'. history: Navigate through command history if no text was typed. ",29,114,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def completion_item_focus(self, which, history=False): if history: if (self._cmd.text() == ':' or self._cmd.history.is_browsing() or not self._active): if which == 'next': self._cmd.command_history_next() return elif which == 'prev': self._cmd.command_history_prev() return else: raise cmdutils.CommandError(""Can't combine --history with "" ""{}!"".format(which)) if not self._active: return selmodel = self.selectionModel() indices = { 'next': lambda: self._next_idx(upwards=False), 'prev': lambda: self._next_idx(upwards=True), 'next-category': lambda: self._next_category_idx(upwards=False), 'prev-category': lambda: self._next_category_idx(upwards=True), 'next-page': lambda: self._next_page(upwards=False), 'prev-page': lambda: self._next_page(upwards=True), } idx = indices[which]() if not idx.isValid(): return selmodel.setCurrentIndex( idx, QItemSelectionModel.ClearAndSelect | QItemSelectionModel.Rows) # if the last item is focused, try to fetch more next_idx = self.indexBelow(idx) if not self.visualRect(next_idx).isValid(): self.expandAll() count = self._model().count() if count == 0: self.hide() elif count == 1 and config.val.completion.quick: self.hide() elif config.val.completion.show == 'auto': self.show() ``` ###Assistant : Shift the focus of the completion menu to another item. Args: which: 'next', 'prev', 'next-category', 'prev-category', 'next-page', or 'prev-page'. history: Navigate through command history if no text was typed. " 1268,"def _background_extract(self, output_folder, progress_queue): _io = dict(saver=ImagesSaver(get_folder(output_folder), as_bytes=True), loader=ImagesLoader(self._input_location, count=self._alignments.frames_count)) for frame_idx, (filename, image) in enumerate(_io[""loader""].load()): logger.trace(""Outputting frame: %s: %s"", frame_idx, filename) src_filename = os.path.basename(filename) frame_name = os.path.splitext(src_filename)[0] progress_queue.put(1) for face_idx, face in enumerate(self._frame_faces[frame_idx]): output = f""{frame_name}_{face_idx}.png"" aligned = AlignedFace(face.landmarks_xy, image=image, centering=""head"", size=512) # TODO user selectable size meta = dict(alignments=face.to_png_meta(), source=dict(alignments_version=self._alignments.version, original_filename=output, face_index=face_idx, source_filename=src_filename, source_is_video=self._globals.is_video, source_frame_dims=image.shape[:2])) b_image = encode_image(aligned.face, "".png"", metadata=meta) _io[""saver""].save(output, b_image) _io[""saver""].close() "," Perform the background extraction in a thread so GUI doesn't become unresponsive. Parameters ---------- output_folder: str The location to save the output faces to progress_queue: :class:`queue.Queue` The queue to place incremental counts to for updating the GUI's progress bar ",39,65,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _background_extract(self, output_folder, progress_queue): _io = dict(saver=ImagesSaver(get_folder(output_folder), as_bytes=True), loader=ImagesLoader(self._input_location, count=self._alignments.frames_count)) for frame_idx, (filename, image) in enumerate(_io[""loader""].load()): logger.trace(""Outputting frame: %s: %s"", frame_idx, filename) src_filename = os.path.basename(filename) frame_name = os.path.splitext(src_filename)[0] progress_queue.put(1) for face_idx, face in enumerate(self._frame_faces[frame_idx]): output = f""{frame_name}_{face_idx}.png"" aligned = AlignedFace(face.landmarks_xy, image=image, centering=""head"", size=512) # TODO user selectable size meta = dict(alignments=face.to_png_meta(), source=dict(alignments_version=self._alignments.version, original_filename=output, face_index=face_idx, source_filename=src_filename, source_is_video=self._globals.is_video, source_frame_dims=image.shape[:2])) b_image = encode_image(aligned.face, "".png"", metadata=meta) _io[""saver""].save(output, b_image) _io[""saver""].close() ``` ###Assistant : Perform the background extraction in a thread so GUI doesn't become unresponsive. Parameters ---------- output_folder: str The location to save the output faces to progress_queue: :class:`queue.Queue` The queue to place incremental counts to for updating the GUI's progress bar " 1269,"def test_multi_part_language(self, m): m.return_value = [""chi_sim"", ""eng""] msgs = check_default_language_available(None) self.assertEqual(len(msgs), 0) "," GIVEN: - An OCR language which is multi part (ie chi-sim) - The language is correctly formatted WHEN: - Installed packages are checked THEN: - No errors are reported ",29,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_multi_part_language(self, m): m.return_value = [""chi_sim"", ""eng""] msgs = check_default_language_available(None) self.assertEqual(len(msgs), 0) ``` ###Assistant : GIVEN: - An OCR language which is multi part (ie chi-sim) - The language is correctly formatted WHEN: - Installed packages are checked THEN: - No errors are reported " 1270,"def finished_callback(self, runner_obj): event_data = { 'event': 'EOF', 'final_counter': self.event_ct, 'guid': self.guid, } event_data.setdefault(self.event_data_key, self.instance.id) self.dispatcher.dispatch(event_data) "," Ansible runner callback triggered on finished run ",7,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def finished_callback(self, runner_obj): event_data = { 'event': 'EOF', 'final_counter': self.event_ct, 'guid': self.guid, } event_data.setdefault(self.event_data_key, self.instance.id) self.dispatcher.dispatch(event_data) ``` ###Assistant : Ansible runner callback triggered on finished run " 1271,"def test_anonymize_gql_operation_response_with_fragment_spread(gql_operation_factory): query = result = {""data"": ""result""} sensitive_fields = {""Product"": {""name""}} operation_result = gql_operation_factory(query, result=result) anonymize_gql_operation_response(operation_result, sensitive_fields) assert operation_result.result[""data""] == MASK @pytest.mark.parametrize( ""sensitive_fields"", [ {""NonExistingType"": {}}, {""Product"": {""nonExistingField""}}, {""Node"": {""id""}}, ], )"," fragment ProductFragment on Product { id name } query products($first: Int){ products(channel: ""channel-pln"", first:$first){ edges{ node{ ... ProductFragment variants { variantName: name } } } } }",27,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_anonymize_gql_operation_response_with_fragment_spread(gql_operation_factory): query = result = {""data"": ""result""} sensitive_fields = {""Product"": {""name""}} operation_result = gql_operation_factory(query, result=result) anonymize_gql_operation_response(operation_result, sensitive_fields) assert operation_result.result[""data""] == MASK @pytest.mark.parametrize( ""sensitive_fields"", [ {""NonExistingType"": {}}, {""Product"": {""nonExistingField""}}, {""Node"": {""id""}}, ], ) ``` ###Assistant : fragment ProductFragment on Product { id name } query products($first: Int){ products(channel: ""channel-pln"", first:$first){ edges{ node{ ... ProductFragment variants { variantName: name } } } } } " 1272,"def complete_bipartite_graph(n1, n2, create_using=None): G = nx.empty_graph(0, create_using) if G.is_directed(): raise nx.NetworkXError(""Directed Graph not supported"") n1, top = n1 n2, bottom = n2 if isinstance(n1, numbers.Integral) and isinstance(n2, numbers.Integral): bottom = [n1 + i for i in bottom] G.add_nodes_from(top, bipartite=0) G.add_nodes_from(bottom, bipartite=1) if len(G) != len(top) + len(bottom): raise nx.NetworkXError(""Inputs n1 and n2 must contain distinct nodes"") G.add_edges_from((u, v) for u in top for v in bottom) G.graph[""name""] = f""complete_bipartite_graph({n1}, {n2})"" return G @py_random_state(3)","Returns the complete bipartite graph `K_{n_1,n_2}`. The graph is composed of two partitions with nodes 0 to (n1 - 1) in the first and nodes n1 to (n1 + n2 - 1) in the second. Each node in the first is connected to each node in the second. Parameters ---------- n1, n2 : integer or iterable container of nodes If integers, nodes are from `range(n1)` and `range(n1, n1 + n2)`. If a container, the elements are the nodes. create_using : NetworkX graph instance, (default: nx.Graph) Return graph of this type. Notes ----- Nodes are the integers 0 to `n1 + n2 - 1` unless either n1 or n2 are containers of nodes. If only one of n1 or n2 are integers, that integer is replaced by `range` of that integer. The nodes are assigned the attribute 'bipartite' with the value 0 or 1 to indicate which bipartite set the node belongs to. This function is not imported in the main namespace. To use it use nx.bipartite.complete_bipartite_graph ",166,74,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def complete_bipartite_graph(n1, n2, create_using=None): G = nx.empty_graph(0, create_using) if G.is_directed(): raise nx.NetworkXError(""Directed Graph not supported"") n1, top = n1 n2, bottom = n2 if isinstance(n1, numbers.Integral) and isinstance(n2, numbers.Integral): bottom = [n1 + i for i in bottom] G.add_nodes_from(top, bipartite=0) G.add_nodes_from(bottom, bipartite=1) if len(G) != len(top) + len(bottom): raise nx.NetworkXError(""Inputs n1 and n2 must contain distinct nodes"") G.add_edges_from((u, v) for u in top for v in bottom) G.graph[""name""] = f""complete_bipartite_graph({n1}, {n2})"" return G @py_random_state(3) ``` ###Assistant : Returns the complete bipartite graph `K_{n_1,n_2}`. The graph is composed of two partitions with nodes 0 to (n1 - 1) in the first and nodes n1 to (n1 + n2 - 1) in the second. Each node in the first is connected to each node in the second. Parameters ---------- n1, n2 : integer or iterable container of nodes If integers, nodes are from `range(n1)` and `range(n1, n1 + n2)`. If a container, the elements are the nodes. create_using : NetworkX graph instance, (default: nx.Graph) Return graph of this type. Notes ----- Nodes are the integers 0 to `n1 + n2 - 1` unless either n1 or n2 are containers of nodes. If only one of n1 or n2 are integers, that integer is replaced by `range` of that integer. The nodes are assigned the attribute 'bipartite' with the value 0 or 1 to indicate which bipartite set the node belongs to. This function is not imported in the main namespace. To use it use nx.bipartite.complete_bipartite_graph " 1273,"def shutdown(self, callback=None): if self._state == _UNWRAPPED: raise RuntimeError('no security layer present') if self._state == _SHUTDOWN: raise RuntimeError('shutdown in progress') assert self._state in (_WRAPPED, _DO_HANDSHAKE) self._state = _SHUTDOWN self._shutdown_cb = callback ssldata, appdata = self.feed_ssldata(b'') assert appdata == [] or appdata == [b''] return ssldata ","Start the SSL shutdown sequence. Return a list of ssldata. A ssldata element is a list of buffers The optional *callback* argument can be used to install a callback that will be called when the shutdown is complete. The callback will be called without arguments. ",45,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def shutdown(self, callback=None): if self._state == _UNWRAPPED: raise RuntimeError('no security layer present') if self._state == _SHUTDOWN: raise RuntimeError('shutdown in progress') assert self._state in (_WRAPPED, _DO_HANDSHAKE) self._state = _SHUTDOWN self._shutdown_cb = callback ssldata, appdata = self.feed_ssldata(b'') assert appdata == [] or appdata == [b''] return ssldata ``` ###Assistant : Start the SSL shutdown sequence. Return a list of ssldata. A ssldata element is a list of buffers The optional *callback* argument can be used to install a callback that will be called when the shutdown is complete. The callback will be called without arguments. " 1274,"def preferred_ip(vm_, ips): proto = config.get_cloud_config_value( ""protocol"", vm_, __opts__, default=""ipv4"", search_global=False ) family = socket.AF_INET if proto == ""ipv6"": family = socket.AF_INET6 for ip in ips: ignore_ip = ignore_cidr(vm_, ip) if ignore_ip: continue try: socket.inet_pton(family, ip) return ip except Exception: # pylint: disable=broad-except continue return False "," Return either an 'ipv4' (default) or 'ipv6' address depending on 'protocol' option. The list of 'ipv4' IPs is filtered by ignore_cidr() to remove any unreachable private addresses. ",27,46,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def preferred_ip(vm_, ips): proto = config.get_cloud_config_value( ""protocol"", vm_, __opts__, default=""ipv4"", search_global=False ) family = socket.AF_INET if proto == ""ipv6"": family = socket.AF_INET6 for ip in ips: ignore_ip = ignore_cidr(vm_, ip) if ignore_ip: continue try: socket.inet_pton(family, ip) return ip except Exception: # pylint: disable=broad-except continue return False ``` ###Assistant : Return either an 'ipv4' (default) or 'ipv6' address depending on 'protocol' option. The list of 'ipv4' IPs is filtered by ignore_cidr() to remove any unreachable private addresses. " 1275,"def get_parent_account(doctype, txt, searchfield, start, page_len, filters): return frappe.db.sql( % (""%s"", searchfield, ""%s"", ""%s"", ""%s""), (filters[""company""], ""%%%s%%"" % txt, page_len, start), as_list=1, ) ","select name from tabAccount where is_group = 1 and docstatus != 2 and company = %s and %s like %s order by name limit %s offset %s",27,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_parent_account(doctype, txt, searchfield, start, page_len, filters): return frappe.db.sql( % (""%s"", searchfield, ""%s"", ""%s"", ""%s""), (filters[""company""], ""%%%s%%"" % txt, page_len, start), as_list=1, ) ``` ###Assistant : select name from tabAccount where is_group = 1 and docstatus != 2 and company = %s and %s like %s order by name limit %s offset %s " 1276,"def exact_laplacian_kernel(x, y, stddev): r x_aligned, y_aligned = _align_matrices(x, y) diff_l1_norm = tf.reduce_sum(tf.abs(tf.subtract(x_aligned, y_aligned)), 2) return tf.exp(-diff_l1_norm / stddev) ","Computes exact Laplacian kernel value(s) for tensors x and y using stddev. The Laplacian kernel for vectors u, v is defined as follows: K(u, v) = exp(-||u-v|| / stddev) where the norm is the l1-norm. x, y can be either vectors or matrices. If they are vectors, they must have the same dimension. If they are matrices, they must have the same number of columns. In the latter case, the method returns (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and v is a row from y. Args: x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim]. y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim]. stddev: The width of the Gaussian kernel. Returns: A single value (scalar) with shape (1, 1) if x, y are vectors or a matrix of shape (m, n) with entries K(u, v) (where K is the Laplacian kernel) for all (u,v) pairs where u, v are rows from x and y respectively. Raises: ValueError: if the shapes of x, y are not compatible. ",195,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def exact_laplacian_kernel(x, y, stddev): r x_aligned, y_aligned = _align_matrices(x, y) diff_l1_norm = tf.reduce_sum(tf.abs(tf.subtract(x_aligned, y_aligned)), 2) return tf.exp(-diff_l1_norm / stddev) ``` ###Assistant : Computes exact Laplacian kernel value(s) for tensors x and y using stddev. The Laplacian kernel for vectors u, v is defined as follows: K(u, v) = exp(-||u-v|| / stddev) where the norm is the l1-norm. x, y can be either vectors or matrices. If they are vectors, they must have the same dimension. If they are matrices, they must have the same number of columns. In the latter case, the method returns (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and v is a row from y. Args: x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim]. y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim]. stddev: The width of the Gaussian kernel. Returns: A single value (scalar) with shape (1, 1) if x, y are vectors or a matrix of shape (m, n) with entries K(u, v) (where K is the Laplacian kernel) for all (u,v) pairs where u, v are rows from x and y respectively. Raises: ValueError: if the shapes of x, y are not compatible. " 1277,"def transform(self, X): check_is_fitted(self) if self.n_neighbors is not None: distances, indices = self.nbrs_.kneighbors(X, return_distance=True) else: distances, indices = self.nbrs_.radius_neighbors(X, return_distance=True) # Create the graph of shortest distances from X to # training data via the nearest neighbors of X. # This can be done as a single array operation, but it potentially # takes a lot of memory. To avoid that, use a loop: n_samples_fit = self.nbrs_.n_samples_fit_ n_queries = distances.shape[0] G_X = np.zeros((n_queries, n_samples_fit)) for i in range(n_queries): G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0) G_X **= 2 G_X *= -0.5 return self.kernel_pca_.transform(G_X) ","Transform X. This is implemented by linking the points X into the graph of geodesic distances of the training data. First the `n_neighbors` nearest neighbors of X are found in the training data, and from these the shortest geodesic distances from each point in X to each point in the training data are computed in order to construct the kernel. The embedding of X is the projection of this kernel onto the embedding vectors of the training set. Parameters ---------- X : array-like, shape (n_queries, n_features) If neighbors_algorithm='precomputed', X is assumed to be a distance matrix or a sparse graph of shape (n_queries, n_samples_fit). Returns ------- X_new : array-like, shape (n_queries, n_components) X transformed in the new space. ",118,93,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def transform(self, X): check_is_fitted(self) if self.n_neighbors is not None: distances, indices = self.nbrs_.kneighbors(X, return_distance=True) else: distances, indices = self.nbrs_.radius_neighbors(X, return_distance=True) # Create the graph of shortest distances from X to # training data via the nearest neighbors of X. # This can be done as a single array operation, but it potentially # takes a lot of memory. To avoid that, use a loop: n_samples_fit = self.nbrs_.n_samples_fit_ n_queries = distances.shape[0] G_X = np.zeros((n_queries, n_samples_fit)) for i in range(n_queries): G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0) G_X **= 2 G_X *= -0.5 return self.kernel_pca_.transform(G_X) ``` ###Assistant : Transform X. This is implemented by linking the points X into the graph of geodesic distances of the training data. First the `n_neighbors` nearest neighbors of X are found in the training data, and from these the shortest geodesic distances from each point in X to each point in the training data are computed in order to construct the kernel. The embedding of X is the projection of this kernel onto the embedding vectors of the training set. Parameters ---------- X : array-like, shape (n_queries, n_features) If neighbors_algorithm='precomputed', X is assumed to be a distance matrix or a sparse graph of shape (n_queries, n_samples_fit). Returns ------- X_new : array-like, shape (n_queries, n_components) X transformed in the new space. " 1278,"def generate_config_style_dict(self) -> dict[str, str]: keys_converting_dict = { ""fill"": (""color"", ""fill_color""), ""fill-opacity"": (""opacity"", ""fill_opacity""), ""stroke"": (""color"", ""stroke_color""), ""stroke-opacity"": (""opacity"", ""stroke_opacity""), ""stroke-width"": (""stroke_width"",), } svg_default_dict = self.svg_default result = {} for svg_key, style_keys in keys_converting_dict.items(): for style_key in style_keys: if svg_default_dict[style_key] is None: continue result[svg_key] = str(svg_default_dict[style_key]) return result ",Generate a dictionary holding the default style information.,8,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def generate_config_style_dict(self) -> dict[str, str]: keys_converting_dict = { ""fill"": (""color"", ""fill_color""), ""fill-opacity"": (""opacity"", ""fill_opacity""), ""stroke"": (""color"", ""stroke_color""), ""stroke-opacity"": (""opacity"", ""stroke_opacity""), ""stroke-width"": (""stroke_width"",), } svg_default_dict = self.svg_default result = {} for svg_key, style_keys in keys_converting_dict.items(): for style_key in style_keys: if svg_default_dict[style_key] is None: continue result[svg_key] = str(svg_default_dict[style_key]) return result ``` ###Assistant : Generate a dictionary holding the default style information. " 1279,"def get_variables(self): # type: () -> t.Dict[str, t.Union[str, t.List[str]]] return dict( bootstrap_type=self.bootstrap_type, controller='yes' if self.controller else '', python_versions=self.python_versions, ssh_key_type=self.ssh_key.KEY_TYPE, ssh_private_key=self.ssh_key.key_contents, ssh_public_key=self.ssh_key.pub_contents, ) ",The variables to template in the bootstrapping script.,8,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_variables(self): # type: () -> t.Dict[str, t.Union[str, t.List[str]]] return dict( bootstrap_type=self.bootstrap_type, controller='yes' if self.controller else '', python_versions=self.python_versions, ssh_key_type=self.ssh_key.KEY_TYPE, ssh_private_key=self.ssh_key.key_contents, ssh_public_key=self.ssh_key.pub_contents, ) ``` ###Assistant : The variables to template in the bootstrapping script. " 1280,"def _create_pseudo_member_(cls, value): pseudo_member = cls._value2member_map_.get(value, None) if pseudo_member is None: # verify all bits are accounted for _, extra_flags = _decompose(cls, value) if extra_flags: raise ValueError(""%r is not a valid %s"" % (value, cls.__qualname__)) # construct a singleton enum pseudo-member pseudo_member = object.__new__(cls) pseudo_member._name_ = None pseudo_member._value_ = value # use setdefault in case another thread already created a composite # with this value pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) return pseudo_member "," Create a composite member iff value contains only members. ",9,71,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_pseudo_member_(cls, value): pseudo_member = cls._value2member_map_.get(value, None) if pseudo_member is None: # verify all bits are accounted for _, extra_flags = _decompose(cls, value) if extra_flags: raise ValueError(""%r is not a valid %s"" % (value, cls.__qualname__)) # construct a singleton enum pseudo-member pseudo_member = object.__new__(cls) pseudo_member._name_ = None pseudo_member._value_ = value # use setdefault in case another thread already created a composite # with this value pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) return pseudo_member ``` ###Assistant : Create a composite member iff value contains only members. " 1281,"def _identify_infrequent(self, category_count, n_samples, col_idx): if isinstance(self.min_frequency, numbers.Integral): infrequent_mask = category_count < self.min_frequency elif isinstance(self.min_frequency, numbers.Real): min_frequency_abs = n_samples * self.min_frequency infrequent_mask = category_count < min_frequency_abs else: infrequent_mask = np.zeros(category_count.shape[0], dtype=bool) n_current_features = category_count.size - infrequent_mask.sum() + 1 if self.max_categories is not None and self.max_categories < n_current_features: # stable sort to preserve original count order smallest_levels = np.argsort(category_count, kind=""mergesort"")[ : -self.max_categories + 1 ] infrequent_mask[smallest_levels] = True output = np.flatnonzero(infrequent_mask) return output if output.size > 0 else None ","Compute the infrequent indices. Parameters ---------- category_count : ndarray of shape (n_cardinality,) Category counts. n_samples : int Number of samples. col_idx : int Index of the current category. Only used for the error message. Returns ------- output : ndarray of shape (n_infrequent_categories,) or None If there are infrequent categories, indices of infrequent categories. Otherwise None. ",55,78,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _identify_infrequent(self, category_count, n_samples, col_idx): if isinstance(self.min_frequency, numbers.Integral): infrequent_mask = category_count < self.min_frequency elif isinstance(self.min_frequency, numbers.Real): min_frequency_abs = n_samples * self.min_frequency infrequent_mask = category_count < min_frequency_abs else: infrequent_mask = np.zeros(category_count.shape[0], dtype=bool) n_current_features = category_count.size - infrequent_mask.sum() + 1 if self.max_categories is not None and self.max_categories < n_current_features: # stable sort to preserve original count order smallest_levels = np.argsort(category_count, kind=""mergesort"")[ : -self.max_categories + 1 ] infrequent_mask[smallest_levels] = True output = np.flatnonzero(infrequent_mask) return output if output.size > 0 else None ``` ###Assistant : Compute the infrequent indices. Parameters ---------- category_count : ndarray of shape (n_cardinality,) Category counts. n_samples : int Number of samples. col_idx : int Index of the current category. Only used for the error message. Returns ------- output : ndarray of shape (n_infrequent_categories,) or None If there are infrequent categories, indices of infrequent categories. Otherwise None. " 1282,"def get_conn(self) -> container_v1.ClusterManagerClient: if self._client is None: credentials = self._get_credentials() self._client = container_v1.ClusterManagerClient(credentials=credentials, client_info=CLIENT_INFO) return self._client # To preserve backward compatibility # TODO: remove one day"," Returns ClusterManagerCLinet object. :rtype: google.cloud.container_v1.ClusterManagerClient ",5,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_conn(self) -> container_v1.ClusterManagerClient: if self._client is None: credentials = self._get_credentials() self._client = container_v1.ClusterManagerClient(credentials=credentials, client_info=CLIENT_INFO) return self._client # To preserve backward compatibility # TODO: remove one day ``` ###Assistant : Returns ClusterManagerCLinet object. :rtype: google.cloud.container_v1.ClusterManagerClient " 1283,"def iscoroutinefunction(func): return (inspect.iscoroutinefunction(func) or getattr(func, '_is_coroutine', None) is _is_coroutine) # Prioritize native coroutine check to speed-up # asyncio.iscoroutine. _COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType, collections.abc.Coroutine, CoroWrapper) _iscoroutine_typecache = set() ",Return True if func is a decorated coroutine function.,9,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def iscoroutinefunction(func): return (inspect.iscoroutinefunction(func) or getattr(func, '_is_coroutine', None) is _is_coroutine) # Prioritize native coroutine check to speed-up # asyncio.iscoroutine. _COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType, collections.abc.Coroutine, CoroWrapper) _iscoroutine_typecache = set() ``` ###Assistant : Return True if func is a decorated coroutine function. " 1284,"async def async_load(self) -> _T | None: if self._load_task is None: self._load_task = self.hass.async_create_task(self._async_load()) return await self._load_task ","Load data. If the expected version and minor version do not match the given versions, the migrate function will be invoked with migrate_func(version, minor_version, config). Will ensure that when a call comes in while another one is in progress, the second call will wait and return the result of the first call. ",52,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_load(self) -> _T | None: if self._load_task is None: self._load_task = self.hass.async_create_task(self._async_load()) return await self._load_task ``` ###Assistant : Load data. If the expected version and minor version do not match the given versions, the migrate function will be invoked with migrate_func(version, minor_version, config). Will ensure that when a call comes in while another one is in progress, the second call will wait and return the result of the first call. " 1285,"def extra_state_attributes(self) -> dict[str, Any] | None: data = super().extra_state_attributes or {} last_user = self.vera_device.get_last_user_alert() if last_user is not None: data[ATTR_LAST_USER_NAME] = last_user[1] data[ATTR_LOW_BATTERY] = self.vera_device.get_low_battery_alert() return data ","Who unlocked the lock and did a low battery alert fire. Reports on the previous poll cycle. changed_by_name is a string like 'Bob'. low_battery is 1 if an alert fired, 0 otherwise. ",32,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def extra_state_attributes(self) -> dict[str, Any] | None: data = super().extra_state_attributes or {} last_user = self.vera_device.get_last_user_alert() if last_user is not None: data[ATTR_LAST_USER_NAME] = last_user[1] data[ATTR_LOW_BATTERY] = self.vera_device.get_low_battery_alert() return data ``` ###Assistant : Who unlocked the lock and did a low battery alert fire. Reports on the previous poll cycle. changed_by_name is a string like 'Bob'. low_battery is 1 if an alert fired, 0 otherwise. " 1286,"def magic(self, arg_s): warnings.warn( ""`magic(...)` is deprecated since IPython 0.13 (warning added in "" ""8.1), use run_line_magic(magic_name, parameter_s)."", DeprecationWarning, stacklevel=2, ) # TODO: should we issue a loud deprecation warning here? magic_name, _, magic_arg_s = arg_s.partition(' ') magic_name = magic_name.lstrip(prefilter.ESC_MAGIC) return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2) #------------------------------------------------------------------------- # Things related to macros #------------------------------------------------------------------------- "," DEPRECATED Deprecated since IPython 0.13 (warning added in 8.1), use run_line_magic(magic_name, parameter_s). Call a magic function by name. Input: a string containing the name of the magic function to call and any additional arguments to be passed to the magic. magic('name -opt foo bar') is equivalent to typing at the ipython prompt: In[1]: %name -opt foo bar To call a magic without arguments, simply use magic('name'). This provides a proper Python function to call IPython's magics in any valid Python code you can type at the interpreter, including loops and compound statements. ",92,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def magic(self, arg_s): warnings.warn( ""`magic(...)` is deprecated since IPython 0.13 (warning added in "" ""8.1), use run_line_magic(magic_name, parameter_s)."", DeprecationWarning, stacklevel=2, ) # TODO: should we issue a loud deprecation warning here? magic_name, _, magic_arg_s = arg_s.partition(' ') magic_name = magic_name.lstrip(prefilter.ESC_MAGIC) return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2) #------------------------------------------------------------------------- # Things related to macros #------------------------------------------------------------------------- ``` ###Assistant : DEPRECATED Deprecated since IPython 0.13 (warning added in 8.1), use run_line_magic(magic_name, parameter_s). Call a magic function by name. Input: a string containing the name of the magic function to call and any additional arguments to be passed to the magic. magic('name -opt foo bar') is equivalent to typing at the ipython prompt: In[1]: %name -opt foo bar To call a magic without arguments, simply use magic('name'). This provides a proper Python function to call IPython's magics in any valid Python code you can type at the interpreter, including loops and compound statements. " 1287,"def create_command(name, **kwargs): # type: (str, **Any) -> Command module_path, class_name, summary = commands_dict[name] module = importlib.import_module(module_path) command_class = getattr(module, class_name) command = command_class(name=name, summary=summary, **kwargs) return command "," Create an instance of the Command class with the given name. ",11,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def create_command(name, **kwargs): # type: (str, **Any) -> Command module_path, class_name, summary = commands_dict[name] module = importlib.import_module(module_path) command_class = getattr(module, class_name) command = command_class(name=name, summary=summary, **kwargs) return command ``` ###Assistant : Create an instance of the Command class with the given name. " 1288,"def to_pandas_refs(self) -> List[ObjectRef[""pandas.DataFrame""]]: block_to_df = cached_remote_fn(_block_to_df) return [block_to_df.remote(block) for block in self._blocks.get_blocks()] ","Convert this dataset into a distributed set of Pandas dataframes. This is only supported for datasets convertible to Arrow records. This function induces a copy of the data. For zero-copy access to the underlying data, consider using ``.to_arrow()`` or ``.get_internal_block_refs()``. Time complexity: O(dataset size / parallelism) Returns: A list of remote Pandas dataframes created from this dataset. ",57,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def to_pandas_refs(self) -> List[ObjectRef[""pandas.DataFrame""]]: block_to_df = cached_remote_fn(_block_to_df) return [block_to_df.remote(block) for block in self._blocks.get_blocks()] ``` ###Assistant : Convert this dataset into a distributed set of Pandas dataframes. This is only supported for datasets convertible to Arrow records. This function induces a copy of the data. For zero-copy access to the underlying data, consider using ``.to_arrow()`` or ``.get_internal_block_refs()``. Time complexity: O(dataset size / parallelism) Returns: A list of remote Pandas dataframes created from this dataset. " 1289,"def hsplit(ary, indices_or_sections): if _nx.ndim(ary) == 0: raise ValueError('hsplit only works on arrays of 1 or more dimensions') if ary.ndim > 1: return split(ary, indices_or_sections, 1) else: return split(ary, indices_or_sections, 0) @array_function_dispatch(_hvdsplit_dispatcher)"," Split an array into multiple sub-arrays horizontally (column-wise). Please refer to the `split` documentation. `hsplit` is equivalent to `split` with ``axis=1``, the array is always split along the second axis except for 1-D arrays, where it is split at ``axis=0``. See Also -------- split : Split an array into multiple sub-arrays of equal size. Examples -------- >>> x = np.arange(16.0).reshape(4, 4) >>> x array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [12., 13., 14., 15.]]) >>> np.hsplit(x, 2) [array([[ 0., 1.], [ 4., 5.], [ 8., 9.], [12., 13.]]), array([[ 2., 3.], [ 6., 7.], [10., 11.], [14., 15.]])] >>> np.hsplit(x, np.array([3, 6])) [array([[ 0., 1., 2.], [ 4., 5., 6.], [ 8., 9., 10.], [12., 13., 14.]]), array([[ 3.], [ 7.], [11.], [15.]]), array([], shape=(4, 0), dtype=float64)] With a higher dimensional array the split is still along the second axis. >>> x = np.arange(8.0).reshape(2, 2, 2) >>> x array([[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]) >>> np.hsplit(x, 2) [array([[[0., 1.]], [[4., 5.]]]), array([[[2., 3.]], [[6., 7.]]])] With a 1-D array, the split is along axis 0. >>> x = np.array([0, 1, 2, 3, 4, 5]) >>> np.hsplit(x, 2) [array([0, 1, 2]), array([3, 4, 5])] ",203,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def hsplit(ary, indices_or_sections): if _nx.ndim(ary) == 0: raise ValueError('hsplit only works on arrays of 1 or more dimensions') if ary.ndim > 1: return split(ary, indices_or_sections, 1) else: return split(ary, indices_or_sections, 0) @array_function_dispatch(_hvdsplit_dispatcher) ``` ###Assistant : Split an array into multiple sub-arrays horizontally (column-wise). Please refer to the `split` documentation. `hsplit` is equivalent to `split` with ``axis=1``, the array is always split along the second axis except for 1-D arrays, where it is split at ``axis=0``. See Also -------- split : Split an array into multiple sub-arrays of equal size. Examples -------- >>> x = np.arange(16.0).reshape(4, 4) >>> x array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [12., 13., 14., 15.]]) >>> np.hsplit(x, 2) [array([[ 0., 1.], [ 4., 5.], [ 8., 9.], [12., 13.]]), array([[ 2., 3.], [ 6., 7.], [10., 11.], [14., 15.]])] >>> np.hsplit(x, np.array([3, 6])) [array([[ 0., 1., 2.], [ 4., 5., 6.], [ 8., 9., 10.], [12., 13., 14.]]), array([[ 3.], [ 7.], [11.], [15.]]), array([], shape=(4, 0), dtype=float64)] With a higher dimensional array the split is still along the second axis. >>> x = np.arange(8.0).reshape(2, 2, 2) >>> x array([[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]) >>> np.hsplit(x, 2) [array([[[0., 1.]], [[4., 5.]]]), array([[[2., 3.]], [[6., 7.]]])] With a 1-D array, the split is along axis 0. >>> x = np.array([0, 1, 2, 3, 4, 5]) >>> np.hsplit(x, 2) [array([0, 1, 2]), array([3, 4, 5])] " 1290,"def get_points_earned(self): def get_returned_amount(): returned_amount = frappe.db.sql( , self.name, ) return abs(flt(returned_amount[0][0])) if returned_amount else 0 lp_details = get_loyalty_program_details_with_points( self.customer, company=self.company, loyalty_program=self.loyalty_program, expiry_date=self.posting_date, include_expired_entry=True, ) if ( lp_details and getdate(lp_details.from_date) <= getdate(self.posting_date) and (not lp_details.to_date or getdate(lp_details.to_date) >= getdate(self.posting_date)) ): returned_amount = get_returned_amount() eligible_amount = flt(self.grand_total) - cint(self.loyalty_amount) - returned_amount points_earned = cint(eligible_amount / lp_details.collection_factor) return points_earned or 0 "," select sum(grand_total) from `tabSales Invoice` where docstatus=1 and is_return=1 and ifnull(return_against, '')=%s ",12,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_points_earned(self): def get_returned_amount(): returned_amount = frappe.db.sql( , self.name, ) return abs(flt(returned_amount[0][0])) if returned_amount else 0 lp_details = get_loyalty_program_details_with_points( self.customer, company=self.company, loyalty_program=self.loyalty_program, expiry_date=self.posting_date, include_expired_entry=True, ) if ( lp_details and getdate(lp_details.from_date) <= getdate(self.posting_date) and (not lp_details.to_date or getdate(lp_details.to_date) >= getdate(self.posting_date)) ): returned_amount = get_returned_amount() eligible_amount = flt(self.grand_total) - cint(self.loyalty_amount) - returned_amount points_earned = cint(eligible_amount / lp_details.collection_factor) return points_earned or 0 ``` ###Assistant : select sum(grand_total) from `tabSales Invoice` where docstatus=1 and is_return=1 and ifnull(return_against, '')=%s " 1291,"def _get_kernel(self) -> plaidml.tile.Value: coords = np.arange(self._filter_size, dtype=""float32"") coords -= (self._filter_size - 1) / 2. kernel = np.square(coords) kernel *= -0.5 / np.square(self._filter_sigma) kernel = np.reshape(kernel, (1, -1)) + np.reshape(kernel, (-1, 1)) kernel = K.constant(np.reshape(kernel, (1, -1))) kernel = K.softmax(kernel) kernel = K.reshape(kernel, (self._filter_size, self._filter_size, 1, 1)) return kernel "," Obtain the base kernel for performing depthwise convolution. Returns ------- :class:`plaidml.tile.Value` The gaussian kernel based on selected size and sigma ",20,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_kernel(self) -> plaidml.tile.Value: coords = np.arange(self._filter_size, dtype=""float32"") coords -= (self._filter_size - 1) / 2. kernel = np.square(coords) kernel *= -0.5 / np.square(self._filter_sigma) kernel = np.reshape(kernel, (1, -1)) + np.reshape(kernel, (-1, 1)) kernel = K.constant(np.reshape(kernel, (1, -1))) kernel = K.softmax(kernel) kernel = K.reshape(kernel, (self._filter_size, self._filter_size, 1, 1)) return kernel ``` ###Assistant : Obtain the base kernel for performing depthwise convolution. Returns ------- :class:`plaidml.tile.Value` The gaussian kernel based on selected size and sigma " 1292,"def test_default_default(self): block = blocks.ListBlock(blocks.CharBlock(default='chocolate')) self.assertEqual(list(block.get_default()), ['chocolate']) block.set_name('test_shoppinglistblock') js_args = ListBlockAdapter().js_args(block) self.assertEqual(js_args[2], 'chocolate') "," if no explicit 'default' is set on the ListBlock, it should fall back on a single instance of the child block in its default state. ",25,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_default_default(self): block = blocks.ListBlock(blocks.CharBlock(default='chocolate')) self.assertEqual(list(block.get_default()), ['chocolate']) block.set_name('test_shoppinglistblock') js_args = ListBlockAdapter().js_args(block) self.assertEqual(js_args[2], 'chocolate') ``` ###Assistant : if no explicit 'default' is set on the ListBlock, it should fall back on a single instance of the child block in its default state. " 1293,"def convert_empty_str_key(self) -> None: if self.namespaces and """" in self.namespaces.keys(): self.namespaces[None] = self.namespaces.pop("""", ""default"") "," Replace zero-length string in `namespaces`. This method will replace '' with None to align to `lxml` requirement that empty string prefixes are not allowed. ",24,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def convert_empty_str_key(self) -> None: if self.namespaces and """" in self.namespaces.keys(): self.namespaces[None] = self.namespaces.pop("""", ""default"") ``` ###Assistant : Replace zero-length string in `namespaces`. This method will replace '' with None to align to `lxml` requirement that empty string prefixes are not allowed. " 1294,"def test_get_with_custom_key_using_default_key(self): # Generate signature signature = generate_signature(self.image.id, ""fill-800x600"") # Get the image response = self.client.get( reverse( ""wagtailimages_serve_custom_key"", args=(signature, self.image.id, ""fill-800x600""), ) + ""test.png"" ) # Check response self.assertEqual(response.status_code, 403) "," Test that that the key can be changed on the view This tests that the default key no longer works when the key is changed on the view ",28,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_with_custom_key_using_default_key(self): # Generate signature signature = generate_signature(self.image.id, ""fill-800x600"") # Get the image response = self.client.get( reverse( ""wagtailimages_serve_custom_key"", args=(signature, self.image.id, ""fill-800x600""), ) + ""test.png"" ) # Check response self.assertEqual(response.status_code, 403) ``` ###Assistant : Test that that the key can be changed on the view This tests that the default key no longer works when the key is changed on the view " 1295,"async def test_timeouts_do_not_hide_crashes(self, flow_run, orion_client): started = anyio.Event() "," Since timeouts capture anyio cancellations, we want to ensure that something still ends up in a 'Crashed' state if it is cancelled independently from our timeout cancellation. ",27,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_timeouts_do_not_hide_crashes(self, flow_run, orion_client): started = anyio.Event() ``` ###Assistant : Since timeouts capture anyio cancellations, we want to ensure that something still ends up in a 'Crashed' state if it is cancelled independently from our timeout cancellation. " 1296,"def test_enable_disable_conflict_with_config(): nlp = English() nlp.add_pipe(""tagger"") nlp.add_pipe(""senter"") nlp.add_pipe(""sentencizer"") with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) # Expected to fail, as config and arguments conflict. with pytest.raises(ValueError): spacy.load( tmp_dir, enable=[""tagger""], config={""nlp"": {""disabled"": [""senter""]}} ) # Expected to succeed without warning due to the lack of a conflicting config option. spacy.load(tmp_dir, enable=[""tagger""]) # Expected to succeed with a warning, as disable=[] should override the config setting. with pytest.warns(UserWarning): spacy.load( tmp_dir, enable=[""tagger""], disable=[], config={""nlp"": {""disabled"": [""senter""]}}, ) ",Test conflict between enable/disable w.r.t. `nlp.disabled` set in the config.,10,72,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_enable_disable_conflict_with_config(): nlp = English() nlp.add_pipe(""tagger"") nlp.add_pipe(""senter"") nlp.add_pipe(""sentencizer"") with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) # Expected to fail, as config and arguments conflict. with pytest.raises(ValueError): spacy.load( tmp_dir, enable=[""tagger""], config={""nlp"": {""disabled"": [""senter""]}} ) # Expected to succeed without warning due to the lack of a conflicting config option. spacy.load(tmp_dir, enable=[""tagger""]) # Expected to succeed with a warning, as disable=[] should override the config setting. with pytest.warns(UserWarning): spacy.load( tmp_dir, enable=[""tagger""], disable=[], config={""nlp"": {""disabled"": [""senter""]}}, ) ``` ###Assistant : Test conflict between enable/disable w.r.t. `nlp.disabled` set in the config. " 1297,"async def test_logs_streaming(job_manager): stream_logs_script = stream_logs_cmd = f'python -c ""{stream_logs_script}""' job_id = await job_manager.submit_job(entrypoint=stream_logs_cmd) await async_wait_for_condition( lambda: ""STREAMED"" in job_manager.get_job_logs(job_id) ) job_manager.stop_job(job_id) @pytest.mark.asyncio","Test that logs are streamed during the job, not just at the end. import time print('STREAMED') while True: time.sleep(1) ",19,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_logs_streaming(job_manager): stream_logs_script = stream_logs_cmd = f'python -c ""{stream_logs_script}""' job_id = await job_manager.submit_job(entrypoint=stream_logs_cmd) await async_wait_for_condition( lambda: ""STREAMED"" in job_manager.get_job_logs(job_id) ) job_manager.stop_job(job_id) @pytest.mark.asyncio ``` ###Assistant : Test that logs are streamed during the job, not just at the end. import time print('STREAMED') while True: time.sleep(1) " 1298,"def _train_no_recompute(n_steps): img_dim, n_channels, batch_size = 256, 1, 4 x, y = _get_dummy_data(img_dim, n_channels, batch_size) model = _get_big_cnn_model( img_dim, n_channels, num_partitions=3, blocks_per_partition=2 ) optimizer = optimizers.SGD() losses = [] tr_vars = model.trainable_variables for _ in range(n_steps): with tf.GradientTape() as tape: logits = model(x) loss = _compute_loss(logits, y) losses.append(loss) grads = tape.gradient(loss, tr_vars) # tr_vars optimizer.apply_gradients(zip(grads, tr_vars)) del grads return losses ",Trains a single large model without gradient checkpointing.,8,60,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _train_no_recompute(n_steps): img_dim, n_channels, batch_size = 256, 1, 4 x, y = _get_dummy_data(img_dim, n_channels, batch_size) model = _get_big_cnn_model( img_dim, n_channels, num_partitions=3, blocks_per_partition=2 ) optimizer = optimizers.SGD() losses = [] tr_vars = model.trainable_variables for _ in range(n_steps): with tf.GradientTape() as tape: logits = model(x) loss = _compute_loss(logits, y) losses.append(loss) grads = tape.gradient(loss, tr_vars) # tr_vars optimizer.apply_gradients(zip(grads, tr_vars)) del grads return losses ``` ###Assistant : Trains a single large model without gradient checkpointing. " 1299,"def _make_replica_execution_function(model, mode): if mode == ModeKeys.TRAIN: func = model.train_on_batch elif mode == ModeKeys.TEST: func = model.test_on_batch else: ",A single step of the distributed execution on a replica.,10,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _make_replica_execution_function(model, mode): if mode == ModeKeys.TRAIN: func = model.train_on_batch elif mode == ModeKeys.TEST: func = model.test_on_batch else: ``` ###Assistant : A single step of the distributed execution on a replica. " 1300,"def monthdays2calendar(self, year, month): days = list(self.itermonthdays2(year, month)) return [ days[i:i+7] for i in range(0, len(days), 7) ] "," Return a matrix representing a month's calendar. Each row represents a week; week entries are (day number, weekday number) tuples. Day numbers outside this month are zero. ",27,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def monthdays2calendar(self, year, month): days = list(self.itermonthdays2(year, month)) return [ days[i:i+7] for i in range(0, len(days), 7) ] ``` ###Assistant : Return a matrix representing a month's calendar. Each row represents a week; week entries are (day number, weekday number) tuples. Day numbers outside this month are zero. " 1301,"async def predict_with_route(self, route_path, *args, **kwargs): if route_path not in self.dags: raise RayServeException(f""{route_path} does not exist in dags routes"") return await self.dags[route_path].remote(*args, **kwargs) ",Perform inference directly without HTTP for multi dags.,8,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def predict_with_route(self, route_path, *args, **kwargs): if route_path not in self.dags: raise RayServeException(f""{route_path} does not exist in dags routes"") return await self.dags[route_path].remote(*args, **kwargs) ``` ###Assistant : Perform inference directly without HTTP for multi dags. " 1302,"def should_recover(self): return ( self.num_failures < self.max_failures or self.max_failures < 0 or ( self.num_failures == self.max_failures and self.num_restore_failures < int(os.environ.get(""TUNE_RESTORE_RETRY_NUM"", 0)) ) ) ","Returns whether the trial qualifies for retrying. This is if the trial has not failed more than max_failures. Note this may return true even when there is no checkpoint, either because `self.checkpoint_freq` is `0` or because the trial failed before a checkpoint has been made. ",45,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def should_recover(self): return ( self.num_failures < self.max_failures or self.max_failures < 0 or ( self.num_failures == self.max_failures and self.num_restore_failures < int(os.environ.get(""TUNE_RESTORE_RETRY_NUM"", 0)) ) ) ``` ###Assistant : Returns whether the trial qualifies for retrying. This is if the trial has not failed more than max_failures. Note this may return true even when there is no checkpoint, either because `self.checkpoint_freq` is `0` or because the trial failed before a checkpoint has been made. " 1303,"def test_change_root_page_locale_on_locale_deletion(self): # change 'real' pages first Page.objects.filter(depth__gt=1).update( locale=Locale.objects.get(language_code=""fr"") ) self.assertEqual(Page.get_first_root_node().locale.language_code, ""en"") Locale.objects.get(language_code=""en"").delete() self.assertEqual(Page.get_first_root_node().locale.language_code, ""fr"") "," On deleting the locale used for the root page (but no 'real' pages), the root page should be reassigned to a new locale (the default one, if possible) ",28,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_change_root_page_locale_on_locale_deletion(self): # change 'real' pages first Page.objects.filter(depth__gt=1).update( locale=Locale.objects.get(language_code=""fr"") ) self.assertEqual(Page.get_first_root_node().locale.language_code, ""en"") Locale.objects.get(language_code=""en"").delete() self.assertEqual(Page.get_first_root_node().locale.language_code, ""fr"") ``` ###Assistant : On deleting the locale used for the root page (but no 'real' pages), the root page should be reassigned to a new locale (the default one, if possible) " 1304,"def _proc_pax(self, tarfile): # Read the header information. buf = tarfile.fileobj.read(self._block(self.size)) # A pax header stores supplemental information for either # the following file (extended) or all following files # (global). if self.type == XGLTYPE: pax_headers = tarfile.pax_headers else: pax_headers = tarfile.pax_headers.copy() # Check if the pax header contains a hdrcharset field. This tells us # the encoding of the path, linkpath, uname and gname fields. Normally, # these fields are UTF-8 encoded but since POSIX.1-2008 tar # implementations are allowed to store them as raw binary strings if # the translation to UTF-8 fails. match = re.search(br""\d+ hdrcharset=([^\n]+)\n"", buf) if match is not None: pax_headers[""hdrcharset""] = match.group(1).decode(""utf8"") # For the time being, we don't care about anything other than ""BINARY"". # The only other value that is currently allowed by the standard is # ""ISO-IR 10646 2000 UTF-8"" in other words UTF-8. hdrcharset = pax_headers.get(""hdrcharset"") if hdrcharset == ""BINARY"": encoding = tarfile.encoding else: encoding = ""utf8"" # Parse pax header information. A record looks like that: # ""%d %s=%s\n"" % (length, keyword, value). length is the size # of the complete record including the length field itself and # the newline. keyword and value are both UTF-8 encoded strings. regex = re.compile(br""(\d+) ([^=]+)="") pos = 0 while True: match = regex.match(buf, pos) if not match: break length, keyword = match.groups() length = int(length) value = buf[match.end(2) + 1:match.start(1) + length - 1] # Normally, we could just use ""utf8"" as the encoding and ""strict"" # as the error handler, but we better not take the risk. For # example, GNU tar <= 1.23 is known to store filenames it cannot # translate to UTF-8 as raw strings (unfortunately without a # hdrcharset=BINARY header). # We first try the strict standard encoding, and if that fails we # fall back on the user's encoding and error handler. keyword = self._decode_pax_field(keyword, ""utf8"", ""utf8"", tarfile.errors) if keyword in PAX_NAME_FIELDS: value = self._decode_pax_field(value, encoding, tarfile.encoding, tarfile.errors) else: value = self._decode_pax_field(value, ""utf8"", ""utf8"", tarfile.errors) pax_headers[keyword] = value pos += length # Fetch the next header. try: next = self.fromtarfile(tarfile) except HeaderError: raise SubsequentHeaderError(""missing or bad subsequent header"") # Process GNU sparse information. if ""GNU.sparse.map"" in pax_headers: # GNU extended sparse format version 0.1. self._proc_gnusparse_01(next, pax_headers) elif ""GNU.sparse.size"" in pax_headers: # GNU extended sparse format version 0.0. self._proc_gnusparse_00(next, pax_headers, buf) elif pax_headers.get(""GNU.sparse.major"") == ""1"" and pax_headers.get(""GNU.sparse.minor"") == ""0"": # GNU extended sparse format version 1.0. self._proc_gnusparse_10(next, pax_headers, tarfile) if self.type in (XHDTYPE, SOLARIS_XHDTYPE): # Patch the TarInfo object with the extended header info. next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) next.offset = self.offset if ""size"" in pax_headers: # If the extended header replaces the size field, # we need to recalculate the offset where the next # header starts. offset = next.offset_data if next.isreg() or next.type not in SUPPORTED_TYPES: offset += next._block(next.size) tarfile.offset = offset return next ","Process an extended or global header as described in POSIX.1-2008. ",10,468,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _proc_pax(self, tarfile): # Read the header information. buf = tarfile.fileobj.read(self._block(self.size)) # A pax header stores supplemental information for either # the following file (extended) or all following files # (global). if self.type == XGLTYPE: pax_headers = tarfile.pax_headers else: pax_headers = tarfile.pax_headers.copy() # Check if the pax header contains a hdrcharset field. This tells us # the encoding of the path, linkpath, uname and gname fields. Normally, # these fields are UTF-8 encoded but since POSIX.1-2008 tar # implementations are allowed to store them as raw binary strings if # the translation to UTF-8 fails. match = re.search(br""\d+ hdrcharset=([^\n]+)\n"", buf) if match is not None: pax_headers[""hdrcharset""] = match.group(1).decode(""utf8"") # For the time being, we don't care about anything other than ""BINARY"". # The only other value that is currently allowed by the standard is # ""ISO-IR 10646 2000 UTF-8"" in other words UTF-8. hdrcharset = pax_headers.get(""hdrcharset"") if hdrcharset == ""BINARY"": encoding = tarfile.encoding else: encoding = ""utf8"" # Parse pax header information. A record looks like that: # ""%d %s=%s\n"" % (length, keyword, value). length is the size # of the complete record including the length field itself and # the newline. keyword and value are both UTF-8 encoded strings. regex = re.compile(br""(\d+) ([^=]+)="") pos = 0 while True: match = regex.match(buf, pos) if not match: break length, keyword = match.groups() length = int(length) value = buf[match.end(2) + 1:match.start(1) + length - 1] # Normally, we could just use ""utf8"" as the encoding and ""strict"" # as the error handler, but we better not take the risk. For # example, GNU tar <= 1.23 is known to store filenames it cannot # translate to UTF-8 as raw strings (unfortunately without a # hdrcharset=BINARY header). # We first try the strict standard encoding, and if that fails we # fall back on the user's encoding and error handler. keyword = self._decode_pax_field(keyword, ""utf8"", ""utf8"", tarfile.errors) if keyword in PAX_NAME_FIELDS: value = self._decode_pax_field(value, encoding, tarfile.encoding, tarfile.errors) else: value = self._decode_pax_field(value, ""utf8"", ""utf8"", tarfile.errors) pax_headers[keyword] = value pos += length # Fetch the next header. try: next = self.fromtarfile(tarfile) except HeaderError: raise SubsequentHeaderError(""missing or bad subsequent header"") # Process GNU sparse information. if ""GNU.sparse.map"" in pax_headers: # GNU extended sparse format version 0.1. self._proc_gnusparse_01(next, pax_headers) elif ""GNU.sparse.size"" in pax_headers: # GNU extended sparse format version 0.0. self._proc_gnusparse_00(next, pax_headers, buf) elif pax_headers.get(""GNU.sparse.major"") == ""1"" and pax_headers.get(""GNU.sparse.minor"") == ""0"": # GNU extended sparse format version 1.0. self._proc_gnusparse_10(next, pax_headers, tarfile) if self.type in (XHDTYPE, SOLARIS_XHDTYPE): # Patch the TarInfo object with the extended header info. next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) next.offset = self.offset if ""size"" in pax_headers: # If the extended header replaces the size field, # we need to recalculate the offset where the next # header starts. offset = next.offset_data if next.isreg() or next.type not in SUPPORTED_TYPES: offset += next._block(next.size) tarfile.offset = offset return next ``` ###Assistant : Process an extended or global header as described in POSIX.1-2008. " 1305,"def get_global_params(): GlobalParams = namedtuple('GlobalParams', [ 'drop_connect_rate', 'width_coefficient', 'depth_coefficient', 'depth_divisor', 'image_size' ]) global_params = GlobalParams( drop_connect_rate=0.3, width_coefficient=1.2, depth_coefficient=1.4, depth_divisor=8, image_size=64) return global_params "," The fllowing are efficientnetb3's arch superparams, but to fit for scene text recognition task, the resolution(image_size) here is changed from 300 to 64. ",23,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_global_params(): GlobalParams = namedtuple('GlobalParams', [ 'drop_connect_rate', 'width_coefficient', 'depth_coefficient', 'depth_divisor', 'image_size' ]) global_params = GlobalParams( drop_connect_rate=0.3, width_coefficient=1.2, depth_coefficient=1.4, depth_divisor=8, image_size=64) return global_params ``` ###Assistant : The fllowing are efficientnetb3's arch superparams, but to fit for scene text recognition task, the resolution(image_size) here is changed from 300 to 64. " 1306,"def _create_closed(cls, vertices): v = _to_unmasked_float_array(vertices) return cls(np.concatenate([v, v[:1]]), closed=True) "," Create a closed polygonal path going through *vertices*. Unlike ``Path(..., closed=True)``, *vertices* should **not** end with an entry for the CLOSEPATH; this entry is added by `._create_closed`. ",27,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_closed(cls, vertices): v = _to_unmasked_float_array(vertices) return cls(np.concatenate([v, v[:1]]), closed=True) ``` ###Assistant : Create a closed polygonal path going through *vertices*. Unlike ``Path(..., closed=True)``, *vertices* should **not** end with an entry for the CLOSEPATH; this entry is added by `._create_closed`. " 1307,"def test_predict_proba(loss, global_random_seed): n_samples = 20 y_true, raw_prediction = random_y_true_raw_prediction( loss=loss, n_samples=n_samples, y_bound=(-100, 100), raw_bound=(-5, 5), seed=global_random_seed, ) if hasattr(loss, ""predict_proba""): proba = loss.predict_proba(raw_prediction) assert proba.shape == (n_samples, loss.n_classes) assert np.sum(proba, axis=1) == approx(1, rel=1e-11) if hasattr(loss, ""gradient_proba""): for grad, proba in ( (None, None), (None, np.empty_like(raw_prediction)), (np.empty_like(raw_prediction), None), (np.empty_like(raw_prediction), np.empty_like(raw_prediction)), ): grad, proba = loss.gradient_proba( y_true=y_true, raw_prediction=raw_prediction, sample_weight=None, gradient_out=grad, proba_out=proba, ) assert proba.shape == (n_samples, loss.n_classes) assert np.sum(proba, axis=1) == approx(1, rel=1e-11) assert_allclose( grad, loss.gradient( y_true=y_true, raw_prediction=raw_prediction, sample_weight=None, gradient_out=None, ), ) @pytest.mark.parametrize(""loss"", ALL_LOSSES) @pytest.mark.parametrize(""sample_weight"", [None, ""range""]) @pytest.mark.parametrize(""dtype"", (np.float32, np.float64)) @pytest.mark.parametrize(""order"", (""C"", ""F""))",Test that predict_proba and gradient_proba work as expected.,8,93,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_predict_proba(loss, global_random_seed): n_samples = 20 y_true, raw_prediction = random_y_true_raw_prediction( loss=loss, n_samples=n_samples, y_bound=(-100, 100), raw_bound=(-5, 5), seed=global_random_seed, ) if hasattr(loss, ""predict_proba""): proba = loss.predict_proba(raw_prediction) assert proba.shape == (n_samples, loss.n_classes) assert np.sum(proba, axis=1) == approx(1, rel=1e-11) if hasattr(loss, ""gradient_proba""): for grad, proba in ( (None, None), (None, np.empty_like(raw_prediction)), (np.empty_like(raw_prediction), None), (np.empty_like(raw_prediction), np.empty_like(raw_prediction)), ): grad, proba = loss.gradient_proba( y_true=y_true, raw_prediction=raw_prediction, sample_weight=None, gradient_out=grad, proba_out=proba, ) assert proba.shape == (n_samples, loss.n_classes) assert np.sum(proba, axis=1) == approx(1, rel=1e-11) assert_allclose( grad, loss.gradient( y_true=y_true, raw_prediction=raw_prediction, sample_weight=None, gradient_out=None, ), ) @pytest.mark.parametrize(""loss"", ALL_LOSSES) @pytest.mark.parametrize(""sample_weight"", [None, ""range""]) @pytest.mark.parametrize(""dtype"", (np.float32, np.float64)) @pytest.mark.parametrize(""order"", (""C"", ""F"")) ``` ###Assistant : Test that predict_proba and gradient_proba work as expected. " 1308,"def get_late_shipments(scorecard): return get_total_shipments(scorecard) - get_on_time_shipments(scorecard) ",Gets the number of late shipments (counting each item) in the period (based on Purchase Receipts vs POs),18,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_late_shipments(scorecard): return get_total_shipments(scorecard) - get_on_time_shipments(scorecard) ``` ###Assistant : Gets the number of late shipments (counting each item) in the period (based on Purchase Receipts vs POs) " 1309,"def eval_loss(self, targets, predictions): eval_loss = 0 for of_name, of_obj in self.output_features.items(): of_eval_loss = of_obj.eval_loss(targets[of_name], predictions[of_name]) eval_loss += of_obj.loss[""weight""] * of_eval_loss additional_loss = 0 additional_losses = self.losses() if additional_losses: additional_loss = torch.sum(torch.stack(additional_losses)) # other losses return eval_loss, additional_loss ","Computes all evaluation losses for the model given targets and predictions. Args: targets: A dictionary of target names to target tensors. predictions: A dictionary of output names to output tensors. Returns: A tuple of loss values for eval losses and additional losses. ",42,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def eval_loss(self, targets, predictions): eval_loss = 0 for of_name, of_obj in self.output_features.items(): of_eval_loss = of_obj.eval_loss(targets[of_name], predictions[of_name]) eval_loss += of_obj.loss[""weight""] * of_eval_loss additional_loss = 0 additional_losses = self.losses() if additional_losses: additional_loss = torch.sum(torch.stack(additional_losses)) # other losses return eval_loss, additional_loss ``` ###Assistant : Computes all evaluation losses for the model given targets and predictions. Args: targets: A dictionary of target names to target tensors. predictions: A dictionary of output names to output tensors. Returns: A tuple of loss values for eval losses and additional losses. " 1310,"def partition_query(self, query, limit, offset): return ( ( f""SELECT * FROM ({query}) AS _ ORDER BY(SELECT NULL)"" + f"" OFFSET {offset} ROWS FETCH NEXT {limit} ROWS ONLY"" ) if self._dialect_is_microsoft_sql() else f""SELECT * FROM ({query}) LIMIT {limit} OFFSET {offset}"" ) "," Get a query that partitions the original `query`. Parameters ---------- query : str The SQL query to get a partition. limit : int The size of the partition. offset : int Where the partition begins. Returns ------- str ",38,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def partition_query(self, query, limit, offset): return ( ( f""SELECT * FROM ({query}) AS _ ORDER BY(SELECT NULL)"" + f"" OFFSET {offset} ROWS FETCH NEXT {limit} ROWS ONLY"" ) if self._dialect_is_microsoft_sql() else f""SELECT * FROM ({query}) LIMIT {limit} OFFSET {offset}"" ) ``` ###Assistant : Get a query that partitions the original `query`. Parameters ---------- query : str The SQL query to get a partition. limit : int The size of the partition. offset : int Where the partition begins. Returns ------- str " 1311,"def test_parameter_ends_with__in__or__isnull(self): # When it ends with '__in' ----------------------------------------- modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site) request = self.request_factory.get(""/"", {""decade__in"": ""the 90s""}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.bio_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(filterspec.title, ""publication decade"") choices = list(filterspec.choices(changelist)) self.assertEqual(choices[2][""display""], ""the 1990's"") self.assertIs(choices[2][""selected""], True) self.assertEqual(choices[2][""query_string""], ""?decade__in=the+90s"") # When it ends with '__isnull' --------------------------------------- modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site) request = self.request_factory.get(""/"", {""decade__isnull"": ""the 90s""}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.bio_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(filterspec.title, ""publication decade"") choices = list(filterspec.choices(changelist)) self.assertEqual(choices[2][""display""], ""the 1990's"") self.assertIs(choices[2][""selected""], True) self.assertEqual(choices[2][""query_string""], ""?decade__isnull=the+90s"") "," A SimpleListFilter's parameter name is not mistaken for a model field if it ends with '__isnull' or '__in' (#17091). ",19,122,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_parameter_ends_with__in__or__isnull(self): # When it ends with '__in' ----------------------------------------- modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site) request = self.request_factory.get(""/"", {""decade__in"": ""the 90s""}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.bio_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(filterspec.title, ""publication decade"") choices = list(filterspec.choices(changelist)) self.assertEqual(choices[2][""display""], ""the 1990's"") self.assertIs(choices[2][""selected""], True) self.assertEqual(choices[2][""query_string""], ""?decade__in=the+90s"") # When it ends with '__isnull' --------------------------------------- modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site) request = self.request_factory.get(""/"", {""decade__isnull"": ""the 90s""}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.bio_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(filterspec.title, ""publication decade"") choices = list(filterspec.choices(changelist)) self.assertEqual(choices[2][""display""], ""the 1990's"") self.assertIs(choices[2][""selected""], True) self.assertEqual(choices[2][""query_string""], ""?decade__isnull=the+90s"") ``` ###Assistant : A SimpleListFilter's parameter name is not mistaken for a model field if it ends with '__isnull' or '__in' (#17091). " 1312,"def v4_int_to_packed(address): try: return address.to_bytes(4, 'big') except OverflowError: raise ValueError(""Address negative or too large for IPv4"") ","Represent an address as 4 packed bytes in network (big-endian) order. Args: address: An integer representation of an IPv4 IP address. Returns: The integer address packed as 4 bytes in network (big-endian) order. Raises: ValueError: If the integer is negative or too large to be an IPv4 IP address. ",49,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def v4_int_to_packed(address): try: return address.to_bytes(4, 'big') except OverflowError: raise ValueError(""Address negative or too large for IPv4"") ``` ###Assistant : Represent an address as 4 packed bytes in network (big-endian) order. Args: address: An integer representation of an IPv4 IP address. Returns: The integer address packed as 4 bytes in network (big-endian) order. Raises: ValueError: If the integer is negative or too large to be an IPv4 IP address. " 1313,"def test_delete_alias_not_allowed(self) -> None: self._create_alias(self.admin_user) self.get_failure( self.handler.delete_association( create_requester(self.test_user), self.room_alias ), synapse.api.errors.AuthError, ) ",A user that doesn't meet the expected guidelines cannot delete an alias.,12,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_delete_alias_not_allowed(self) -> None: self._create_alias(self.admin_user) self.get_failure( self.handler.delete_association( create_requester(self.test_user), self.room_alias ), synapse.api.errors.AuthError, ) ``` ###Assistant : A user that doesn't meet the expected guidelines cannot delete an alias. " 1314,"def trigintegrate(f, x, conds='piecewise'): pat, a, n, m = _pat_sincos(x) f = f.rewrite('sincos') M = f.match(pat) if M is None: return n, m = M[n], M[m] if n.is_zero and m.is_zero: return x zz = x if n.is_zero else S.Zero a = M[a] if n.is_odd or m.is_odd: u = _u n_, m_ = n.is_odd, m.is_odd # take smallest n or m -- to choose simplest substitution if n_ and m_: # Make sure to choose the positive one # otherwise an incorrect integral can occur. if n < 0 and m > 0: m_ = True n_ = False elif m < 0 and n > 0: n_ = True m_ = False # Both are negative so choose the smallest n or m # in absolute value for simplest substitution. elif (n < 0 and m < 0): n_ = n > m m_ = not (n > m) # Both n and m are odd and positive else: n_ = (n < m) # NB: careful here, one of the m_ = not (n < m) # conditions *must* be true # n m u=C (n-1)/2 m # S(x) * C(x) dx --> -(1-u^2) * u du if n_: ff = -(1 - u**2)**((n - 1)/2) * u**m uu = cos(a*x) # n m u=S n (m-1)/2 # S(x) * C(x) dx --> u * (1-u^2) du elif m_: ff = u**n * (1 - u**2)**((m - 1)/2) uu = sin(a*x) fi = integrate(ff, u) # XXX cyclic deps fx = fi.subs(u, uu) if conds == 'piecewise': return Piecewise((fx / a, Ne(a, 0)), (zz, True)) return fx / a # n & m are both even # # 2k 2m 2l 2l # we transform S (x) * C (x) into terms with only S (x) or C (x) # # example: # 100 4 100 2 2 100 4 2 # S (x) * C (x) = S (x) * (1-S (x)) = S (x) * (1 + S (x) - 2*S (x)) # # 104 102 100 # = S (x) - 2*S (x) + S (x) # 2k # then S is integrated with recursive formula # take largest n or m -- to choose simplest substitution n_ = (Abs(n) > Abs(m)) m_ = (Abs(m) > Abs(n)) res = S.Zero if n_: # 2k 2 k i 2i # C = (1 - S ) = sum(i, (-) * B(k, i) * S ) if m > 0: for i in range(0, m//2 + 1): res += (S.NegativeOne**i * binomial(m//2, i) * _sin_pow_integrate(n + 2*i, x)) elif m == 0: res = _sin_pow_integrate(n, x) else: # m < 0 , |n| > |m| # / # | # | m n # | cos (x) sin (x) dx = # | # | #/ # / # | # -1 m+1 n-1 n - 1 | m+2 n-2 # ________ cos (x) sin (x) + _______ | cos (x) sin (x) dx # | # m + 1 m + 1 | # / res = (Rational(-1, m + 1) * cos(x)**(m + 1) * sin(x)**(n - 1) + Rational(n - 1, m + 1) * trigintegrate(cos(x)**(m + 2)*sin(x)**(n - 2), x)) elif m_: # 2k 2 k i 2i # S = (1 - C ) = sum(i, (-) * B(k, i) * C ) if n > 0: # / / # | | # | m n | -m n # | cos (x)*sin (x) dx or | cos (x) * sin (x) dx # | | # / / # # |m| > |n| ; m, n >0 ; m, n belong to Z - {0} # n 2 # sin (x) term is expanded here in terms of cos (x), # and then integrated. # for i in range(0, n//2 + 1): res += (S.NegativeOne**i * binomial(n//2, i) * _cos_pow_integrate(m + 2*i, x)) elif n == 0: # / # | # | 1 # | _ _ _ # | m # | cos (x) # / # res = _cos_pow_integrate(m, x) else: # n < 0 , |m| > |n| # / # | # | m n # | cos (x) sin (x) dx = # | # | #/ # / # | # 1 m-1 n+1 m - 1 | m-2 n+2 # _______ cos (x) sin (x) + _______ | cos (x) sin (x) dx # | # n + 1 n + 1 | # / res = (Rational(1, n + 1) * cos(x)**(m - 1)*sin(x)**(n + 1) + Rational(m - 1, n + 1) * trigintegrate(cos(x)**(m - 2)*sin(x)**(n + 2), x)) else: if m == n: ##Substitute sin(2x)/2 for sin(x)cos(x) and then Integrate. res = integrate((sin(2*x)*S.Half)**m, x) elif (m == -n): if n < 0: # Same as the scheme described above. # the function argument to integrate in the end will # be 1, this cannot be integrated by trigintegrate. # Hence use sympy.integrals.integrate. res = (Rational(1, n + 1) * cos(x)**(m - 1) * sin(x)**(n + 1) + Rational(m - 1, n + 1) * integrate(cos(x)**(m - 2) * sin(x)**(n + 2), x)) else: res = (Rational(-1, m + 1) * cos(x)**(m + 1) * sin(x)**(n - 1) + Rational(n - 1, m + 1) * integrate(cos(x)**(m + 2)*sin(x)**(n - 2), x)) if conds == 'piecewise': return Piecewise((res.subs(x, a*x) / a, Ne(a, 0)), (zz, True)) return res.subs(x, a*x) / a "," Integrate f = Mul(trig) over x. Examples ======== >>> from sympy import sin, cos, tan, sec >>> from sympy.integrals.trigonometry import trigintegrate >>> from sympy.abc import x >>> trigintegrate(sin(x)*cos(x), x) sin(x)**2/2 >>> trigintegrate(sin(x)**2, x) x/2 - sin(x)*cos(x)/2 >>> trigintegrate(tan(x)*sec(x), x) 1/cos(x) >>> trigintegrate(sin(x)*tan(x), x) -log(sin(x) - 1)/2 + log(sin(x) + 1)/2 - sin(x) References ========== .. [1] http://en.wikibooks.org/wiki/Calculus/Integration_techniques See Also ======== sympy.integrals.integrals.Integral.doit sympy.integrals.integrals.Integral ",62,909,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def trigintegrate(f, x, conds='piecewise'): pat, a, n, m = _pat_sincos(x) f = f.rewrite('sincos') M = f.match(pat) if M is None: return n, m = M[n], M[m] if n.is_zero and m.is_zero: return x zz = x if n.is_zero else S.Zero a = M[a] if n.is_odd or m.is_odd: u = _u n_, m_ = n.is_odd, m.is_odd # take smallest n or m -- to choose simplest substitution if n_ and m_: # Make sure to choose the positive one # otherwise an incorrect integral can occur. if n < 0 and m > 0: m_ = True n_ = False elif m < 0 and n > 0: n_ = True m_ = False # Both are negative so choose the smallest n or m # in absolute value for simplest substitution. elif (n < 0 and m < 0): n_ = n > m m_ = not (n > m) # Both n and m are odd and positive else: n_ = (n < m) # NB: careful here, one of the m_ = not (n < m) # conditions *must* be true # n m u=C (n-1)/2 m # S(x) * C(x) dx --> -(1-u^2) * u du if n_: ff = -(1 - u**2)**((n - 1)/2) * u**m uu = cos(a*x) # n m u=S n (m-1)/2 # S(x) * C(x) dx --> u * (1-u^2) du elif m_: ff = u**n * (1 - u**2)**((m - 1)/2) uu = sin(a*x) fi = integrate(ff, u) # XXX cyclic deps fx = fi.subs(u, uu) if conds == 'piecewise': return Piecewise((fx / a, Ne(a, 0)), (zz, True)) return fx / a # n & m are both even # # 2k 2m 2l 2l # we transform S (x) * C (x) into terms with only S (x) or C (x) # # example: # 100 4 100 2 2 100 4 2 # S (x) * C (x) = S (x) * (1-S (x)) = S (x) * (1 + S (x) - 2*S (x)) # # 104 102 100 # = S (x) - 2*S (x) + S (x) # 2k # then S is integrated with recursive formula # take largest n or m -- to choose simplest substitution n_ = (Abs(n) > Abs(m)) m_ = (Abs(m) > Abs(n)) res = S.Zero if n_: # 2k 2 k i 2i # C = (1 - S ) = sum(i, (-) * B(k, i) * S ) if m > 0: for i in range(0, m//2 + 1): res += (S.NegativeOne**i * binomial(m//2, i) * _sin_pow_integrate(n + 2*i, x)) elif m == 0: res = _sin_pow_integrate(n, x) else: # m < 0 , |n| > |m| # / # | # | m n # | cos (x) sin (x) dx = # | # | #/ # / # | # -1 m+1 n-1 n - 1 | m+2 n-2 # ________ cos (x) sin (x) + _______ | cos (x) sin (x) dx # | # m + 1 m + 1 | # / res = (Rational(-1, m + 1) * cos(x)**(m + 1) * sin(x)**(n - 1) + Rational(n - 1, m + 1) * trigintegrate(cos(x)**(m + 2)*sin(x)**(n - 2), x)) elif m_: # 2k 2 k i 2i # S = (1 - C ) = sum(i, (-) * B(k, i) * C ) if n > 0: # / / # | | # | m n | -m n # | cos (x)*sin (x) dx or | cos (x) * sin (x) dx # | | # / / # # |m| > |n| ; m, n >0 ; m, n belong to Z - {0} # n 2 # sin (x) term is expanded here in terms of cos (x), # and then integrated. # for i in range(0, n//2 + 1): res += (S.NegativeOne**i * binomial(n//2, i) * _cos_pow_integrate(m + 2*i, x)) elif n == 0: # / # | # | 1 # | _ _ _ # | m # | cos (x) # / # res = _cos_pow_integrate(m, x) else: # n < 0 , |m| > |n| # / # | # | m n # | cos (x) sin (x) dx = # | # | #/ # / # | # 1 m-1 n+1 m - 1 | m-2 n+2 # _______ cos (x) sin (x) + _______ | cos (x) sin (x) dx # | # n + 1 n + 1 | # / res = (Rational(1, n + 1) * cos(x)**(m - 1)*sin(x)**(n + 1) + Rational(m - 1, n + 1) * trigintegrate(cos(x)**(m - 2)*sin(x)**(n + 2), x)) else: if m == n: ##Substitute sin(2x)/2 for sin(x)cos(x) and then Integrate. res = integrate((sin(2*x)*S.Half)**m, x) elif (m == -n): if n < 0: # Same as the scheme described above. # the function argument to integrate in the end will # be 1, this cannot be integrated by trigintegrate. # Hence use sympy.integrals.integrate. res = (Rational(1, n + 1) * cos(x)**(m - 1) * sin(x)**(n + 1) + Rational(m - 1, n + 1) * integrate(cos(x)**(m - 2) * sin(x)**(n + 2), x)) else: res = (Rational(-1, m + 1) * cos(x)**(m + 1) * sin(x)**(n - 1) + Rational(n - 1, m + 1) * integrate(cos(x)**(m + 2)*sin(x)**(n - 2), x)) if conds == 'piecewise': return Piecewise((res.subs(x, a*x) / a, Ne(a, 0)), (zz, True)) return res.subs(x, a*x) / a ``` ###Assistant : Integrate f = Mul(trig) over x. Examples ======== >>> from sympy import sin, cos, tan, sec >>> from sympy.integrals.trigonometry import trigintegrate >>> from sympy.abc import x >>> trigintegrate(sin(x)*cos(x), x) sin(x)**2/2 >>> trigintegrate(sin(x)**2, x) x/2 - sin(x)*cos(x)/2 >>> trigintegrate(tan(x)*sec(x), x) 1/cos(x) >>> trigintegrate(sin(x)*tan(x), x) -log(sin(x) - 1)/2 + log(sin(x) + 1)/2 - sin(x) References ========== .. [1] http://en.wikibooks.org/wiki/Calculus/Integration_techniques See Also ======== sympy.integrals.integrals.Integral.doit sympy.integrals.integrals.Integral " 1315,"def get_pywin32_module_file_attribute(module_name): from PyInstaller.utils.win32 import winutils module = winutils.import_pywin32_module(module_name) return module.__file__ "," Get the absolute path of the PyWin32 DLL specific to the PyWin32 module with the passed name. On import, each PyWin32 module: * Imports a DLL specific to that module. * Overwrites the values of all module attributes with values specific to that DLL. This includes that module's `__file__` attribute, which then provides the absolute path of that DLL. This function safely imports that module in a PyWin32-aware subprocess and returns the value of that module's `__file__` attribute. Parameters ---------- module_name : str Fully-qualified name of that module. Returns ---------- str Absolute path of that DLL. See Also ---------- `PyInstaller.utils.win32.winutils.import_pywin32_module()` For further details. ",103,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_pywin32_module_file_attribute(module_name): from PyInstaller.utils.win32 import winutils module = winutils.import_pywin32_module(module_name) return module.__file__ ``` ###Assistant : Get the absolute path of the PyWin32 DLL specific to the PyWin32 module with the passed name. On import, each PyWin32 module: * Imports a DLL specific to that module. * Overwrites the values of all module attributes with values specific to that DLL. This includes that module's `__file__` attribute, which then provides the absolute path of that DLL. This function safely imports that module in a PyWin32-aware subprocess and returns the value of that module's `__file__` attribute. Parameters ---------- module_name : str Fully-qualified name of that module. Returns ---------- str Absolute path of that DLL. See Also ---------- `PyInstaller.utils.win32.winutils.import_pywin32_module()` For further details. " 1316,"def render_warning(self, message): context = {""error"": message} return render_to_response(""sentry/pipeline-provider-error.html"", context, self.request) ",For situations when we want to display an error without triggering an issue,13,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def render_warning(self, message): context = {""error"": message} return render_to_response(""sentry/pipeline-provider-error.html"", context, self.request) ``` ###Assistant : For situations when we want to display an error without triggering an issue " 1317,"def test_context_filter_not_labels(self) -> None: event_id = self._send_labelled_messages_in_room() channel = self.make_request( ""GET"", ""/rooms/%s/context/%s?filter=%s"" % (self.room_id, event_id, json.dumps(self.FILTER_NOT_LABELS)), access_token=self.tok, ) self.assertEqual(channel.code, 200, channel.result) events_before = channel.json_body[""events_before""] self.assertEqual( len(events_before), 1, [event[""content""] for event in events_before] ) self.assertEqual( events_before[0][""content""][""body""], ""without label"", events_before[0] ) events_after = channel.json_body[""events_after""] self.assertEqual( len(events_after), 2, [event[""content""] for event in events_after] ) self.assertEqual( events_after[0][""content""][""body""], ""with wrong label"", events_after[0] ) self.assertEqual( events_after[1][""content""][""body""], ""with two wrong labels"", events_after[1] ) ",Test that we can filter by the absence of a label on a /context request.,15,66,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_context_filter_not_labels(self) -> None: event_id = self._send_labelled_messages_in_room() channel = self.make_request( ""GET"", ""/rooms/%s/context/%s?filter=%s"" % (self.room_id, event_id, json.dumps(self.FILTER_NOT_LABELS)), access_token=self.tok, ) self.assertEqual(channel.code, 200, channel.result) events_before = channel.json_body[""events_before""] self.assertEqual( len(events_before), 1, [event[""content""] for event in events_before] ) self.assertEqual( events_before[0][""content""][""body""], ""without label"", events_before[0] ) events_after = channel.json_body[""events_after""] self.assertEqual( len(events_after), 2, [event[""content""] for event in events_after] ) self.assertEqual( events_after[0][""content""][""body""], ""with wrong label"", events_after[0] ) self.assertEqual( events_after[1][""content""][""body""], ""with two wrong labels"", events_after[1] ) ``` ###Assistant : Test that we can filter by the absence of a label on a /context request. " 1318,"def _consistent_PT(u, v, graph_params, state_params): G1, G2 = graph_params.G1, graph_params.G2 mapping, reverse_mapping = state_params.mapping, state_params.reverse_mapping for neighbor in G1[u]: if neighbor in mapping: if G1.number_of_edges(u, neighbor) != G2.number_of_edges( v, mapping[neighbor] ): return False for neighbor in G2[v]: if neighbor in reverse_mapping: if G1.number_of_edges(u, reverse_mapping[neighbor]) != G2.number_of_edges( v, neighbor ): return False return True ","Checks the consistency of extending the mapping using the current node pair. Parameters ---------- u, v: Graph node The two candidate nodes being examined. graph_params: namedtuple Contains all the Graph-related parameters: G1,G2: NetworkX Graph or MultiGraph instances. The two graphs to check for isomorphism or monomorphism G1_labels,G2_labels: dict The label of every node in G1 and G2 respectively state_params: namedtuple Contains all the State-related parameters: mapping: dict The mapping as extended so far. Maps nodes of G1 to nodes of G2 reverse_mapping: dict The reverse mapping as extended so far. Maps nodes from G2 to nodes of G1. It's basically ""mapping"" reversed T1, T2: set Ti contains uncovered neighbors of covered nodes from Gi, i.e. nodes that are not in the mapping, but are neighbors of nodes that are. T1_out, T2_out: set Ti_out contains all the nodes from Gi, that are neither in the mapping nor in Ti Returns ------- True if the pair passes all the consistency checks successfully. False otherwise. ",162,53,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _consistent_PT(u, v, graph_params, state_params): G1, G2 = graph_params.G1, graph_params.G2 mapping, reverse_mapping = state_params.mapping, state_params.reverse_mapping for neighbor in G1[u]: if neighbor in mapping: if G1.number_of_edges(u, neighbor) != G2.number_of_edges( v, mapping[neighbor] ): return False for neighbor in G2[v]: if neighbor in reverse_mapping: if G1.number_of_edges(u, reverse_mapping[neighbor]) != G2.number_of_edges( v, neighbor ): return False return True ``` ###Assistant : Checks the consistency of extending the mapping using the current node pair. Parameters ---------- u, v: Graph node The two candidate nodes being examined. graph_params: namedtuple Contains all the Graph-related parameters: G1,G2: NetworkX Graph or MultiGraph instances. The two graphs to check for isomorphism or monomorphism G1_labels,G2_labels: dict The label of every node in G1 and G2 respectively state_params: namedtuple Contains all the State-related parameters: mapping: dict The mapping as extended so far. Maps nodes of G1 to nodes of G2 reverse_mapping: dict The reverse mapping as extended so far. Maps nodes from G2 to nodes of G1. It's basically ""mapping"" reversed T1, T2: set Ti contains uncovered neighbors of covered nodes from Gi, i.e. nodes that are not in the mapping, but are neighbors of nodes that are. T1_out, T2_out: set Ti_out contains all the nodes from Gi, that are neither in the mapping nor in Ti Returns ------- True if the pair passes all the consistency checks successfully. False otherwise. " 1319,"def get_variant_values_for(items): attribute_map = {} for attr in frappe.db.sql( % "", "".join([""%s""] * len(items)), tuple(items), as_dict=1, ): attribute_map.setdefault(attr[""parent""], {}) attribute_map[attr[""parent""]].update({attr[""attribute""]: attr[""attribute_value""]}) return attribute_map ","Returns variant values for items.select parent, attribute, attribute_value from `tabItem Variant Attribute` where parent in (%s) ",16,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_variant_values_for(items): attribute_map = {} for attr in frappe.db.sql( % "", "".join([""%s""] * len(items)), tuple(items), as_dict=1, ): attribute_map.setdefault(attr[""parent""], {}) attribute_map[attr[""parent""]].update({attr[""attribute""]: attr[""attribute_value""]}) return attribute_map ``` ###Assistant : Returns variant values for items.select parent, attribute, attribute_value from `tabItem Variant Attribute` where parent in (%s) " 1320,"def test_edit_post_locked_by_self(self): # Lock the snippet self.lock_snippet(self.user) # Try to edit the snippet response = self.client.post( self.get_url(""edit""), {""text"": ""Edited while locked""}, follow=True, ) self.refresh_snippet() # Should not show error message self.assertNotContains( response, f""The {self.model_name} could not be saved as it is locked"", ) # Check that the snippet is still locked self.assertTrue(self.snippet.locked) # Check that the snippet is edited self.assertEqual(self.snippet.text, ""Edited while locked"") ",A user can edit a snippet that is locked by themselves.,11,63,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_edit_post_locked_by_self(self): # Lock the snippet self.lock_snippet(self.user) # Try to edit the snippet response = self.client.post( self.get_url(""edit""), {""text"": ""Edited while locked""}, follow=True, ) self.refresh_snippet() # Should not show error message self.assertNotContains( response, f""The {self.model_name} could not be saved as it is locked"", ) # Check that the snippet is still locked self.assertTrue(self.snippet.locked) # Check that the snippet is edited self.assertEqual(self.snippet.text, ""Edited while locked"") ``` ###Assistant : A user can edit a snippet that is locked by themselves. " 1321,"def update_metrics(self, targets, predictions): for of_name, of_obj in self.output_features.items(): of_obj.update_metrics(targets[of_name], predictions[of_name]) eval_loss, additional_losses = self.eval_loss(targets, predictions) self.eval_loss_metric.update(eval_loss) self.eval_additional_losses_metrics.update(additional_losses) ",Updates the model's metrics given targets and predictions.,8,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update_metrics(self, targets, predictions): for of_name, of_obj in self.output_features.items(): of_obj.update_metrics(targets[of_name], predictions[of_name]) eval_loss, additional_losses = self.eval_loss(targets, predictions) self.eval_loss_metric.update(eval_loss) self.eval_additional_losses_metrics.update(additional_losses) ``` ###Assistant : Updates the model's metrics given targets and predictions. " 1322,"def tick_right(self): label = True if 'label1On' in self._major_tick_kw: label = (self._major_tick_kw['label1On'] or self._major_tick_kw['label2On']) self.set_ticks_position('right') # if labels were turned off before this was called # leave them off self.set_tick_params(which='both', labelright=label) "," Move ticks and ticklabels (if present) to the right of the Axes. ",12,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def tick_right(self): label = True if 'label1On' in self._major_tick_kw: label = (self._major_tick_kw['label1On'] or self._major_tick_kw['label2On']) self.set_ticks_position('right') # if labels were turned off before this was called # leave them off self.set_tick_params(which='both', labelright=label) ``` ###Assistant : Move ticks and ticklabels (if present) to the right of the Axes. " 1323,"def test_show_message_twice(view, info1, info2, count): view.show_message(info1) view.show_message(info2) assert len(view._messages) == count ",Show the exact same message twice -> only one should be shown.,12,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_show_message_twice(view, info1, info2, count): view.show_message(info1) view.show_message(info2) assert len(view._messages) == count ``` ###Assistant : Show the exact same message twice -> only one should be shown. " 1324,"def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): bboxes1 = get_box_tensor(bboxes1) bboxes2 = get_box_tensor(bboxes2) assert bboxes1.size(-1) in [0, 4, 5] assert bboxes2.size(-1) in [0, 4, 5] if bboxes2.size(-1) == 5: bboxes2 = bboxes2[..., :4] if bboxes1.size(-1) == 5: bboxes1 = bboxes1[..., :4] if self.dtype == 'fp16': # change tensor type to save cpu and cuda memory and keep speed bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype) bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype) overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) if not overlaps.is_cuda and overlaps.dtype == torch.float16: # resume cpu float32 overlaps = overlaps.float() return overlaps return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) ","Calculate IoU between 2D bboxes. Args: bboxes1 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4) in format, or shape (m, 5) in format. bboxes2 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4) in format, shape (m, 5) in format, or be empty. If ``is_aligned `` is ``True``, then m and n must be equal. mode (str): ""iou"" (intersection over union), ""iof"" (intersection over foreground), or ""giou"" (generalized intersection over union). is_aligned (bool, optional): If True, then m and n must be equal. Default False. Returns: Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,) ",115,94,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): bboxes1 = get_box_tensor(bboxes1) bboxes2 = get_box_tensor(bboxes2) assert bboxes1.size(-1) in [0, 4, 5] assert bboxes2.size(-1) in [0, 4, 5] if bboxes2.size(-1) == 5: bboxes2 = bboxes2[..., :4] if bboxes1.size(-1) == 5: bboxes1 = bboxes1[..., :4] if self.dtype == 'fp16': # change tensor type to save cpu and cuda memory and keep speed bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype) bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype) overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) if not overlaps.is_cuda and overlaps.dtype == torch.float16: # resume cpu float32 overlaps = overlaps.float() return overlaps return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) ``` ###Assistant : Calculate IoU between 2D bboxes. Args: bboxes1 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4) in format, or shape (m, 5) in format. bboxes2 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4) in format, shape (m, 5) in format, or be empty. If ``is_aligned `` is ``True``, then m and n must be equal. mode (str): ""iou"" (intersection over union), ""iof"" (intersection over foreground), or ""giou"" (generalized intersection over union). is_aligned (bool, optional): If True, then m and n must be equal. Default False. Returns: Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,) " 1325,"def binning(self) -> List[List[str]]: return self._binning_linear_threshold(multiplier=100) "," Create bins to split linearly from the lowest to the highest sample value Returns ------- list List of bins of filenames ",21,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def binning(self) -> List[List[str]]: return self._binning_linear_threshold(multiplier=100) ``` ###Assistant : Create bins to split linearly from the lowest to the highest sample value Returns ------- list List of bins of filenames " 1326,"def connect(self, publish_port, connect_callback=None, disconnect_callback=None): raise NotImplementedError "," Create a network connection to the the PublishServer or broker. ",10,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def connect(self, publish_port, connect_callback=None, disconnect_callback=None): raise NotImplementedError ``` ###Assistant : Create a network connection to the the PublishServer or broker. " 1327,"def contains(self, g): if not isinstance(g, FreeGroupElement): return False elif self != g.group: return False else: return True ","Tests if Free Group element ``g`` belong to self, ``G``. In mathematical terms any linear combination of generators of a Free Group is contained in it. Examples ======== >>> from sympy.combinatorics import free_group >>> f, x, y, z = free_group(""x y z"") >>> f.contains(x**3*y**2) True ",45,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def contains(self, g): if not isinstance(g, FreeGroupElement): return False elif self != g.group: return False else: return True ``` ###Assistant : Tests if Free Group element ``g`` belong to self, ``G``. In mathematical terms any linear combination of generators of a Free Group is contained in it. Examples ======== >>> from sympy.combinatorics import free_group >>> f, x, y, z = free_group(""x y z"") >>> f.contains(x**3*y**2) True " 1328,"def get_pledged_security_qty(loan): current_pledges = {} unpledges = frappe._dict( frappe.db.sql( , (loan), ) ) pledges = frappe._dict( frappe.db.sql( , (loan), ) ) for security, qty in pledges.items(): current_pledges.setdefault(security, qty) current_pledges[security] -= unpledges.get(security, 0.0) return current_pledges "," SELECT u.loan_security, sum(u.qty) as qty FROM `tabLoan Security Unpledge` up, `tabUnpledge` u WHERE up.loan = %s AND u.parent = up.name AND up.status = 'Approved' GROUP BY u.loan_security SELECT p.loan_security, sum(p.qty) as qty FROM `tabLoan Security Pledge` lp, `tabPledge`p WHERE lp.loan = %s AND p.parent = lp.name AND lp.status = 'Pledged' GROUP BY p.loan_security ",53,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_pledged_security_qty(loan): current_pledges = {} unpledges = frappe._dict( frappe.db.sql( , (loan), ) ) pledges = frappe._dict( frappe.db.sql( , (loan), ) ) for security, qty in pledges.items(): current_pledges.setdefault(security, qty) current_pledges[security] -= unpledges.get(security, 0.0) return current_pledges ``` ###Assistant : SELECT u.loan_security, sum(u.qty) as qty FROM `tabLoan Security Unpledge` up, `tabUnpledge` u WHERE up.loan = %s AND u.parent = up.name AND up.status = 'Approved' GROUP BY u.loan_security SELECT p.loan_security, sum(p.qty) as qty FROM `tabLoan Security Pledge` lp, `tabPledge`p WHERE lp.loan = %s AND p.parent = lp.name AND lp.status = 'Pledged' GROUP BY p.loan_security " 1329,"def get_select(self): select = [] klass_info = None annotations = {} select_idx = 0 for alias, (sql, params) in self.query.extra_select.items(): annotations[alias] = select_idx select.append((RawSQL(sql, params), alias)) select_idx += 1 assert not (self.query.select and self.query.default_cols) if self.query.default_cols: cols = self.get_default_columns() else: # self.query.select is a special case. These columns never go to # any model. cols = self.query.select if cols: select_list = [] for col in cols: select_list.append(select_idx) select.append((col, None)) select_idx += 1 klass_info = { ""model"": self.query.model, ""select_fields"": select_list, } for alias, annotation in self.query.annotation_select.items(): annotations[alias] = select_idx select.append((annotation, alias)) select_idx += 1 if self.query.select_related: related_klass_infos = self.get_related_selections(select) klass_info[""related_klass_infos""] = related_klass_infos "," Return three values: - a list of 3-tuples of (expression, (sql, params), alias) - a klass_info structure, - a dictionary of annotations The (sql, params) is what the expression will produce, and alias is the ""AS alias"" for the column (possibly None). The klass_info structure contains the following information: - The base model of the query. - Which columns for that model are present in the query (by position of the select clause). - related_klass_infos: [f, klass_info] to descent into The annotations is a dictionary of {'attname': column position} values. ",90,101,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_select(self): select = [] klass_info = None annotations = {} select_idx = 0 for alias, (sql, params) in self.query.extra_select.items(): annotations[alias] = select_idx select.append((RawSQL(sql, params), alias)) select_idx += 1 assert not (self.query.select and self.query.default_cols) if self.query.default_cols: cols = self.get_default_columns() else: # self.query.select is a special case. These columns never go to # any model. cols = self.query.select if cols: select_list = [] for col in cols: select_list.append(select_idx) select.append((col, None)) select_idx += 1 klass_info = { ""model"": self.query.model, ""select_fields"": select_list, } for alias, annotation in self.query.annotation_select.items(): annotations[alias] = select_idx select.append((annotation, alias)) select_idx += 1 if self.query.select_related: related_klass_infos = self.get_related_selections(select) klass_info[""related_klass_infos""] = related_klass_infos ``` ###Assistant : Return three values: - a list of 3-tuples of (expression, (sql, params), alias) - a klass_info structure, - a dictionary of annotations The (sql, params) is what the expression will produce, and alias is the ""AS alias"" for the column (possibly None). The klass_info structure contains the following information: - The base model of the query. - Which columns for that model are present in the query (by position of the select clause). - related_klass_infos: [f, klass_info] to descent into The annotations is a dictionary of {'attname': column position} values. " 1330,"def set_module_collection_mode(self, name, mode): if name is None: name = self.__name__ if mode is None: self._module_collection_mode.pop(name) else: self._module_collection_mode[name] = mode ",""" Set the package/module collection mode for the specified module name. If `name` is `None`, the hooked module/package name is used. Valid values for `mode` are: `'pyc'`, `'py'`, and `None`. ",30,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_module_collection_mode(self, name, mode): if name is None: name = self.__name__ if mode is None: self._module_collection_mode.pop(name) else: self._module_collection_mode[name] = mode ``` ###Assistant : "" Set the package/module collection mode for the specified module name. If `name` is `None`, the hooked module/package name is used. Valid values for `mode` are: `'pyc'`, `'py'`, and `None`. " 1331,"def write_file (filename, contents): f = open(filename, ""w"") try: for line in contents: f.write(line + ""\n"") finally: f.close() ","Create a file with the specified name and write 'contents' (a sequence of strings without line terminators) to it. ",19,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def write_file (filename, contents): f = open(filename, ""w"") try: for line in contents: f.write(line + ""\n"") finally: f.close() ``` ###Assistant : Create a file with the specified name and write 'contents' (a sequence of strings without line terminators) to it. " 1332,"async def test_flow_run_policy_is_backwards_compatible(self): empty_new_policy = schemas.core.FlowRunPolicy() # should not raise an error self.OldFlowRunPolicy(**empty_new_policy.dict()) "," In version 2.1.1 and prior, the FlowRunPolicy schema required two properties, `max_retries` and `retry_delay_seconds`. These properties are deprecated. This test ensures old clients can load new FlowRunPolicySchemas. It can be removed when the corresponding properties are removed. ",37,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_flow_run_policy_is_backwards_compatible(self): empty_new_policy = schemas.core.FlowRunPolicy() # should not raise an error self.OldFlowRunPolicy(**empty_new_policy.dict()) ``` ###Assistant : In version 2.1.1 and prior, the FlowRunPolicy schema required two properties, `max_retries` and `retry_delay_seconds`. These properties are deprecated. This test ensures old clients can load new FlowRunPolicySchemas. It can be removed when the corresponding properties are removed. " 1333,"def test_help_text_examples_are_contextualized(): rendered_inline = render(spacing_invalid_value(""padding"", ""inline"")) assert ""widget.styles.padding"" in rendered_inline rendered_css = render(spacing_invalid_value(""padding"", ""css"")) assert ""padding:"" in rendered_css ","Ensure that if the user is using CSS, they see CSS-specific examples and if they're using inline styles they see inline-specific examples.",22,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_help_text_examples_are_contextualized(): rendered_inline = render(spacing_invalid_value(""padding"", ""inline"")) assert ""widget.styles.padding"" in rendered_inline rendered_css = render(spacing_invalid_value(""padding"", ""css"")) assert ""padding:"" in rendered_css ``` ###Assistant : Ensure that if the user is using CSS, they see CSS-specific examples and if they're using inline styles they see inline-specific examples. " 1334,"def test_reverse_proxy(tctx, keep_host_header): server = Placeholder(Server) tctx.options.mode = ""reverse:http://localhost:8000"" tctx.options.connection_strategy = ""lazy"" tctx.options.keep_host_header = keep_host_header assert ( Playbook(modes.ReverseProxy(tctx), hooks=False) >> DataReceived( tctx.client, b""GET /foo HTTP/1.1\r\n"" b""Host: example.com\r\n\r\n"" ) << NextLayerHook(Placeholder(NextLayer)) >> reply_next_layer(lambda ctx: http.HttpLayer(ctx, HTTPMode.transparent)) << OpenConnection(server) >> reply(None) << SendData( server, b""GET /foo HTTP/1.1\r\n"" b""Host: "" + (b""example.com"" if keep_host_header else b""localhost:8000"") + b""\r\n\r\n"", ) >> DataReceived(server, b""HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"") << SendData(tctx.client, b""HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"") ) assert server().address == (""localhost"", 8000) @pytest.mark.parametrize(""patch"", [True, False]) @pytest.mark.parametrize(""connection_strategy"", [""eager"", ""lazy""])","Test mitmproxy in reverse proxy mode. - make sure that we connect to the right host - make sure that we respect keep_host_header - make sure that we include non-standard ports in the host header (#4280) ",36,80,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_reverse_proxy(tctx, keep_host_header): server = Placeholder(Server) tctx.options.mode = ""reverse:http://localhost:8000"" tctx.options.connection_strategy = ""lazy"" tctx.options.keep_host_header = keep_host_header assert ( Playbook(modes.ReverseProxy(tctx), hooks=False) >> DataReceived( tctx.client, b""GET /foo HTTP/1.1\r\n"" b""Host: example.com\r\n\r\n"" ) << NextLayerHook(Placeholder(NextLayer)) >> reply_next_layer(lambda ctx: http.HttpLayer(ctx, HTTPMode.transparent)) << OpenConnection(server) >> reply(None) << SendData( server, b""GET /foo HTTP/1.1\r\n"" b""Host: "" + (b""example.com"" if keep_host_header else b""localhost:8000"") + b""\r\n\r\n"", ) >> DataReceived(server, b""HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"") << SendData(tctx.client, b""HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"") ) assert server().address == (""localhost"", 8000) @pytest.mark.parametrize(""patch"", [True, False]) @pytest.mark.parametrize(""connection_strategy"", [""eager"", ""lazy""]) ``` ###Assistant : Test mitmproxy in reverse proxy mode. - make sure that we connect to the right host - make sure that we respect keep_host_header - make sure that we include non-standard ports in the host header (#4280) " 1335,"def _check_filter_horizontal(self, obj): if not isinstance(obj.filter_horizontal, (list, tuple)): return must_be( ""a list or tuple"", option=""filter_horizontal"", obj=obj, id=""admin.E018"" ) else: return list( chain.from_iterable( self._check_filter_item( obj, field_name, ""filter_horizontal[%d]"" % index ) for index, field_name in enumerate(obj.filter_horizontal) ) ) ",Check that filter_horizontal is a sequence of field names.,9,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_filter_horizontal(self, obj): if not isinstance(obj.filter_horizontal, (list, tuple)): return must_be( ""a list or tuple"", option=""filter_horizontal"", obj=obj, id=""admin.E018"" ) else: return list( chain.from_iterable( self._check_filter_item( obj, field_name, ""filter_horizontal[%d]"" % index ) for index, field_name in enumerate(obj.filter_horizontal) ) ) ``` ###Assistant : Check that filter_horizontal is a sequence of field names. " 1336,"def poly_intersection(poly_det, poly_gt): assert isinstance(poly_det, plg.Polygon) assert isinstance(poly_gt, plg.Polygon) poly_inter = poly_det & poly_gt if len(poly_inter) == 0: return 0, poly_inter return poly_inter.area(), poly_inter ","Calculate the intersection area between two polygon. Args: poly_det (Polygon): A polygon predicted by detector. poly_gt (Polygon): A gt polygon. Returns: intersection_area (float): The intersection area between two polygons. ",29,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def poly_intersection(poly_det, poly_gt): assert isinstance(poly_det, plg.Polygon) assert isinstance(poly_gt, plg.Polygon) poly_inter = poly_det & poly_gt if len(poly_inter) == 0: return 0, poly_inter return poly_inter.area(), poly_inter ``` ###Assistant : Calculate the intersection area between two polygon. Args: poly_det (Polygon): A polygon predicted by detector. poly_gt (Polygon): A gt polygon. Returns: intersection_area (float): The intersection area between two polygons. " 1337,"def size(self) -> int: return sum(len(x.data) for x in [*self.answers, *self.authorities, *self.additionals]) ",Returns the cumulative data size of all resource record sections.,10,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def size(self) -> int: return sum(len(x.data) for x in [*self.answers, *self.authorities, *self.additionals]) ``` ###Assistant : Returns the cumulative data size of all resource record sections. " 1338,"async def get_job_submission_info(self): jobs = {} fetched_jobs = await self._job_info_client.get_all_jobs() for ( job_submission_id, job_info, ) in fetched_jobs.items(): if job_info is not None: entry = { ""job_submission_id"": job_submission_id, ""status"": job_info.status, ""message"": job_info.message, ""error_type"": job_info.error_type, ""start_time"": job_info.start_time, ""end_time"": job_info.end_time, ""metadata"": job_info.metadata, ""runtime_env"": job_info.runtime_env, ""entrypoint"": job_info.entrypoint, } jobs[job_submission_id] = entry return jobs ",Info for Ray job submission. Here a job can have 0 or many drivers.,14,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def get_job_submission_info(self): jobs = {} fetched_jobs = await self._job_info_client.get_all_jobs() for ( job_submission_id, job_info, ) in fetched_jobs.items(): if job_info is not None: entry = { ""job_submission_id"": job_submission_id, ""status"": job_info.status, ""message"": job_info.message, ""error_type"": job_info.error_type, ""start_time"": job_info.start_time, ""end_time"": job_info.end_time, ""metadata"": job_info.metadata, ""runtime_env"": job_info.runtime_env, ""entrypoint"": job_info.entrypoint, } jobs[job_submission_id] = entry return jobs ``` ###Assistant : Info for Ray job submission. Here a job can have 0 or many drivers. " 1339,"def _gen_sieve_array(M, factor_base): sieve_array = [0]*(2*M + 1) for factor in factor_base: if factor.soln1 is None: #The prime does not divides a continue for idx in range((M + factor.soln1) % factor.prime, 2*M, factor.prime): sieve_array[idx] += factor.log_p if factor.prime == 2: continue #if prime is 2 then sieve only with soln_1_p for idx in range((M + factor.soln2) % factor.prime, 2*M, factor.prime): sieve_array[idx] += factor.log_p return sieve_array ","Sieve Stage of the Quadratic Sieve. For every prime in the factor_base that does not divide the coefficient `a` we add log_p over the sieve_array such that ``-M <= soln1 + i*p <= M`` and ``-M <= soln2 + i*p <= M`` where `i` is an integer. When p = 2 then log_p is only added using ``-M <= soln1 + i*p <= M``. Parameters ========== M : sieve interval factor_base : factor_base primes ",74,65,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _gen_sieve_array(M, factor_base): sieve_array = [0]*(2*M + 1) for factor in factor_base: if factor.soln1 is None: #The prime does not divides a continue for idx in range((M + factor.soln1) % factor.prime, 2*M, factor.prime): sieve_array[idx] += factor.log_p if factor.prime == 2: continue #if prime is 2 then sieve only with soln_1_p for idx in range((M + factor.soln2) % factor.prime, 2*M, factor.prime): sieve_array[idx] += factor.log_p return sieve_array ``` ###Assistant : Sieve Stage of the Quadratic Sieve. For every prime in the factor_base that does not divide the coefficient `a` we add log_p over the sieve_array such that ``-M <= soln1 + i*p <= M`` and ``-M <= soln2 + i*p <= M`` where `i` is an integer. When p = 2 then log_p is only added using ``-M <= soln1 + i*p <= M``. Parameters ========== M : sieve interval factor_base : factor_base primes " 1340,"def rescue_docarray(): try: import docarray as docarray __docarray_version__ = docarray.__version__ except AttributeError: # Being here means docarray is not installed correctly, attempt to reinstall it # as recommended by pip https://pip.pypa.io/en/latest/user_guide/#using-pip-from-your-program import subprocess subprocess.check_call( [sys.executable, '-m', 'pip', 'uninstall', '--yes', 'docarray'] ) subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'docarray']) ","Upgrading from 2.x to 3.x is broken (https://github.com/jina-ai/jina/issues/4194) This function checks if docarray is broken and if so attempts to rescue it ",22,46,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rescue_docarray(): try: import docarray as docarray __docarray_version__ = docarray.__version__ except AttributeError: # Being here means docarray is not installed correctly, attempt to reinstall it # as recommended by pip https://pip.pypa.io/en/latest/user_guide/#using-pip-from-your-program import subprocess subprocess.check_call( [sys.executable, '-m', 'pip', 'uninstall', '--yes', 'docarray'] ) subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'docarray']) ``` ###Assistant : Upgrading from 2.x to 3.x is broken (https://github.com/jina-ai/jina/issues/4194) This function checks if docarray is broken and if so attempts to rescue it " 1341,"def transform(self, X): msg = ( ""%(name)s is not fitted. Call fit to set the parameters before"" "" calling transform"" ) check_is_fitted(self, msg=msg) X = self._validate_data(X, accept_sparse=""csr"", reset=False) check_non_negative(X, ""X in AdditiveChi2Sampler.transform"") sparse = sp.issparse(X) # zeroth component # 1/cosh = sech # cosh(0) = 1.0 transf = self._transform_sparse if sparse else self._transform_dense return transf(X) ","Apply approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- X_new : {ndarray, sparse matrix}, \ shape = (n_samples, n_features * (2*sample_steps - 1)) Whether the return value is an array or sparse matrix depends on the type of the input X. ",66,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def transform(self, X): msg = ( ""%(name)s is not fitted. Call fit to set the parameters before"" "" calling transform"" ) check_is_fitted(self, msg=msg) X = self._validate_data(X, accept_sparse=""csr"", reset=False) check_non_negative(X, ""X in AdditiveChi2Sampler.transform"") sparse = sp.issparse(X) # zeroth component # 1/cosh = sech # cosh(0) = 1.0 transf = self._transform_sparse if sparse else self._transform_dense return transf(X) ``` ###Assistant : Apply approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- X_new : {ndarray, sparse matrix}, \ shape = (n_samples, n_features * (2*sample_steps - 1)) Whether the return value is an array or sparse matrix depends on the type of the input X. " 1342,"def get_repository_root(cls, location): # type: (str) -> Optional[str] if cls.is_repository_directory(location): return location return None "," Return the ""root"" (top-level) directory controlled by the vcs, or `None` if the directory is not in any. It is meant to be overridden to implement smarter detection mechanisms for specific vcs. This can do more than is_repository_directory() alone. For example, the Git override checks that Git is actually available. ",50,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_repository_root(cls, location): # type: (str) -> Optional[str] if cls.is_repository_directory(location): return location return None ``` ###Assistant : Return the ""root"" (top-level) directory controlled by the vcs, or `None` if the directory is not in any. It is meant to be overridden to implement smarter detection mechanisms for specific vcs. This can do more than is_repository_directory() alone. For example, the Git override checks that Git is actually available. " 1343,"def test_it_should_not_read_quotes_stream_if_it_does_not_exist_in_client(oauth_config, configured_catalog): source = SourceHubspot() all_records = list(source.read(logger, config=oauth_config, catalog=configured_catalog, state=None)) records = [record for record in all_records if record.type == Type.RECORD] assert not records "," If 'quotes' stream is not in the client, it should skip it. ",12,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_it_should_not_read_quotes_stream_if_it_does_not_exist_in_client(oauth_config, configured_catalog): source = SourceHubspot() all_records = list(source.read(logger, config=oauth_config, catalog=configured_catalog, state=None)) records = [record for record in all_records if record.type == Type.RECORD] assert not records ``` ###Assistant : If 'quotes' stream is not in the client, it should skip it. " 1344,"def get_conda_env_dir(env_name): conda_prefix = os.environ.get(""CONDA_PREFIX"") if conda_prefix is None: # The caller is neither in a conda env or in (base) env. This is rare # because by default, new terminals start in (base), but we can still # support this case. conda_exe = os.environ.get(""CONDA_EXE"") if conda_exe is None: raise ValueError( ""Cannot find environment variables set by conda. "" ""Please verify conda is installed."" ) # Example: CONDA_EXE=$HOME/anaconda3/bin/python # Strip out /bin/python by going up two parent directories. conda_prefix = str(Path(conda_exe).parent.parent) # There are two cases: # 1. We are in a conda (base) env: CONDA_DEFAULT_ENV=base and # CONDA_PREFIX=$HOME/anaconda3 # 2. We are in a user-created conda env: CONDA_DEFAULT_ENV=$env_name and # CONDA_PREFIX=$HOME/anaconda3/envs/$current_env_name if os.environ.get(""CONDA_DEFAULT_ENV"") == ""base"": # Caller's curent environment is (base). # Not recommended by conda, but we can still support it. if env_name == ""base"": # Desired environment is (base), located at e.g. $HOME/anaconda3 env_dir = conda_prefix else: # Desired environment is user-created, e.g. # $HOME/anaconda3/envs/$env_name env_dir = os.path.join(conda_prefix, ""envs"", env_name) else: # Now `conda_prefix` should be something like # $HOME/anaconda3/envs/$current_env_name # We want to replace the last component with the desired env name. conda_envs_dir = os.path.split(conda_prefix)[0] env_dir = os.path.join(conda_envs_dir, env_name) if not os.path.isdir(env_dir): raise ValueError( ""conda env "" + env_name + "" not found in conda envs directory. Run `conda env list` to "" + ""verify the name is correct."" ) return env_dir ","Find and validate the conda directory for a given conda environment. For example, given the environment name `tf1`, this function checks the existence of the corresponding conda directory, e.g. `/Users/scaly/anaconda3/envs/tf1`, and returns it. ",33,226,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_conda_env_dir(env_name): conda_prefix = os.environ.get(""CONDA_PREFIX"") if conda_prefix is None: # The caller is neither in a conda env or in (base) env. This is rare # because by default, new terminals start in (base), but we can still # support this case. conda_exe = os.environ.get(""CONDA_EXE"") if conda_exe is None: raise ValueError( ""Cannot find environment variables set by conda. "" ""Please verify conda is installed."" ) # Example: CONDA_EXE=$HOME/anaconda3/bin/python # Strip out /bin/python by going up two parent directories. conda_prefix = str(Path(conda_exe).parent.parent) # There are two cases: # 1. We are in a conda (base) env: CONDA_DEFAULT_ENV=base and # CONDA_PREFIX=$HOME/anaconda3 # 2. We are in a user-created conda env: CONDA_DEFAULT_ENV=$env_name and # CONDA_PREFIX=$HOME/anaconda3/envs/$current_env_name if os.environ.get(""CONDA_DEFAULT_ENV"") == ""base"": # Caller's curent environment is (base). # Not recommended by conda, but we can still support it. if env_name == ""base"": # Desired environment is (base), located at e.g. $HOME/anaconda3 env_dir = conda_prefix else: # Desired environment is user-created, e.g. # $HOME/anaconda3/envs/$env_name env_dir = os.path.join(conda_prefix, ""envs"", env_name) else: # Now `conda_prefix` should be something like # $HOME/anaconda3/envs/$current_env_name # We want to replace the last component with the desired env name. conda_envs_dir = os.path.split(conda_prefix)[0] env_dir = os.path.join(conda_envs_dir, env_name) if not os.path.isdir(env_dir): raise ValueError( ""conda env "" + env_name + "" not found in conda envs directory. Run `conda env list` to "" + ""verify the name is correct."" ) return env_dir ``` ###Assistant : Find and validate the conda directory for a given conda environment. For example, given the environment name `tf1`, this function checks the existence of the corresponding conda directory, e.g. `/Users/scaly/anaconda3/envs/tf1`, and returns it. " 1345,"async def test_switch_change_outlet_state(hass, utcnow): helper = await setup_test_component(hass, create_switch_service) await hass.services.async_call( ""switch"", ""turn_on"", {""entity_id"": ""switch.testdevice""}, blocking=True ) helper.async_assert_service_values( ServicesTypes.OUTLET, { CharacteristicsTypes.ON: 1, }, ) await hass.services.async_call( ""switch"", ""turn_off"", {""entity_id"": ""switch.testdevice""}, blocking=True ) helper.async_assert_service_values( ServicesTypes.OUTLET, { CharacteristicsTypes.ON: 0, }, ) ",Test that we can turn a HomeKit outlet on and off again.,12,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_switch_change_outlet_state(hass, utcnow): helper = await setup_test_component(hass, create_switch_service) await hass.services.async_call( ""switch"", ""turn_on"", {""entity_id"": ""switch.testdevice""}, blocking=True ) helper.async_assert_service_values( ServicesTypes.OUTLET, { CharacteristicsTypes.ON: 1, }, ) await hass.services.async_call( ""switch"", ""turn_off"", {""entity_id"": ""switch.testdevice""}, blocking=True ) helper.async_assert_service_values( ServicesTypes.OUTLET, { CharacteristicsTypes.ON: 0, }, ) ``` ###Assistant : Test that we can turn a HomeKit outlet on and off again. " 1346,"def _update_label_position(self, renderer): if not self._autolabelpos: return # get bounding boxes for this axis and any siblings # that have been set by `fig.align_xlabels()` bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer) x, y = self.label.get_position() if self.label_position == 'bottom': try: spine = self.axes.spines['bottom'] spinebbox = spine.get_window_extent() except KeyError: # use axes if spine doesn't exist spinebbox = self.axes.bbox bbox = mtransforms.Bbox.union(bboxes + [spinebbox]) bottom = bbox.y0 self.label.set_position( (x, bottom - self.labelpad * self.figure.dpi / 72) ) else: try: spine = self.axes.spines['top'] spinebbox = spine.get_window_extent() except KeyError: # use axes if spine doesn't exist spinebbox = self.axes.bbox bbox = mtransforms.Bbox.union(bboxes2 + [spinebbox]) top = bbox.y1 self.label.set_position( (x, top + self.labelpad * self.figure.dpi / 72) ) "," Update the label position based on the bounding box enclosing all the ticklabels and axis spine ",16,111,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _update_label_position(self, renderer): if not self._autolabelpos: return # get bounding boxes for this axis and any siblings # that have been set by `fig.align_xlabels()` bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer) x, y = self.label.get_position() if self.label_position == 'bottom': try: spine = self.axes.spines['bottom'] spinebbox = spine.get_window_extent() except KeyError: # use axes if spine doesn't exist spinebbox = self.axes.bbox bbox = mtransforms.Bbox.union(bboxes + [spinebbox]) bottom = bbox.y0 self.label.set_position( (x, bottom - self.labelpad * self.figure.dpi / 72) ) else: try: spine = self.axes.spines['top'] spinebbox = spine.get_window_extent() except KeyError: # use axes if spine doesn't exist spinebbox = self.axes.bbox bbox = mtransforms.Bbox.union(bboxes2 + [spinebbox]) top = bbox.y1 self.label.set_position( (x, top + self.labelpad * self.figure.dpi / 72) ) ``` ###Assistant : Update the label position based on the bounding box enclosing all the ticklabels and axis spine " 1347,"def poly_union(poly_det, poly_gt): assert isinstance(poly_det, plg.Polygon) assert isinstance(poly_gt, plg.Polygon) area_det = poly_det.area() area_gt = poly_gt.area() area_inters, _ = poly_intersection(poly_det, poly_gt) return area_det + area_gt - area_inters ","Calculate the union area between two polygon. Args: poly_det (Polygon): A polygon predicted by detector. poly_gt (Polygon): A gt polygon. Returns: union_area (float): The union area between two polygons. ",29,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def poly_union(poly_det, poly_gt): assert isinstance(poly_det, plg.Polygon) assert isinstance(poly_gt, plg.Polygon) area_det = poly_det.area() area_gt = poly_gt.area() area_inters, _ = poly_intersection(poly_det, poly_gt) return area_det + area_gt - area_inters ``` ###Assistant : Calculate the union area between two polygon. Args: poly_det (Polygon): A polygon predicted by detector. poly_gt (Polygon): A gt polygon. Returns: union_area (float): The union area between two polygons. " 1348,"def test_whitelist_idna_result(self) -> None: config: JsonDict = { ""federation_certificate_verification_whitelist"": [ ""example.com"", ""*.xn--eckwd4c7c.xn--zckzah"", ] } t = TestConfig() t.tls.read_config(config, config_dir_path="""", data_dir_path="""") cf = FederationPolicyForHTTPS(cast(HomeServerConfig, t)) # Not in the whitelist opts = cf.get_options(b""notexample.com"") assert isinstance(opts, SSLClientConnectionCreator) self.assertTrue(opts._verifier._verify_certs) # Caught by the wildcard opts = cf.get_options(idna.encode(""テスト.ドメイン.テスト"")) assert isinstance(opts, SSLClientConnectionCreator) self.assertFalse(opts._verifier._verify_certs) "," The federation certificate whitelist will match on IDNA encoded names. ",10,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_whitelist_idna_result(self) -> None: config: JsonDict = { ""federation_certificate_verification_whitelist"": [ ""example.com"", ""*.xn--eckwd4c7c.xn--zckzah"", ] } t = TestConfig() t.tls.read_config(config, config_dir_path="""", data_dir_path="""") cf = FederationPolicyForHTTPS(cast(HomeServerConfig, t)) # Not in the whitelist opts = cf.get_options(b""notexample.com"") assert isinstance(opts, SSLClientConnectionCreator) self.assertTrue(opts._verifier._verify_certs) # Caught by the wildcard opts = cf.get_options(idna.encode(""テスト.ドメイン.テスト"")) assert isinstance(opts, SSLClientConnectionCreator) self.assertFalse(opts._verifier._verify_certs) ``` ###Assistant : The federation certificate whitelist will match on IDNA encoded names. " 1349,"def simple_test_rpn(self, x, img_metas): rpn_outs = self(x) proposal_list = self.get_results(*rpn_outs, img_metas=img_metas) return proposal_list ","Test without augmentation, only for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): Meta info of each image. Returns: list[Tensor]: Proposals of each image, each item has shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). ",51,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def simple_test_rpn(self, x, img_metas): rpn_outs = self(x) proposal_list = self.get_results(*rpn_outs, img_metas=img_metas) return proposal_list ``` ###Assistant : Test without augmentation, only for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): Meta info of each image. Returns: list[Tensor]: Proposals of each image, each item has shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). " 1350,"def _get_tick_boxes_siblings(self, renderer): # Get the Grouper keeping track of x or y label groups for this figure. axis_names = [ name for name, axis in self.axes._get_axis_map().items() if name in self.figure._align_label_groups and axis is self] if len(axis_names) != 1: return [], [] axis_name, = axis_names grouper = self.figure._align_label_groups[axis_name] bboxes = [] bboxes2 = [] # If we want to align labels from other Axes: for ax in grouper.get_siblings(self.axes): axis = getattr(ax, f""{axis_name}axis"") ticks_to_draw = axis._update_ticks() tlb, tlb2 = axis._get_ticklabel_bboxes(ticks_to_draw, renderer) bboxes.extend(tlb) bboxes2.extend(tlb2) return bboxes, bboxes2 "," Get the bounding boxes for this `.axis` and its siblings as set by `.Figure.align_xlabels` or `.Figure.align_ylabels`. By default it just gets bboxes for self. ",24,85,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_tick_boxes_siblings(self, renderer): # Get the Grouper keeping track of x or y label groups for this figure. axis_names = [ name for name, axis in self.axes._get_axis_map().items() if name in self.figure._align_label_groups and axis is self] if len(axis_names) != 1: return [], [] axis_name, = axis_names grouper = self.figure._align_label_groups[axis_name] bboxes = [] bboxes2 = [] # If we want to align labels from other Axes: for ax in grouper.get_siblings(self.axes): axis = getattr(ax, f""{axis_name}axis"") ticks_to_draw = axis._update_ticks() tlb, tlb2 = axis._get_ticklabel_bboxes(ticks_to_draw, renderer) bboxes.extend(tlb) bboxes2.extend(tlb2) return bboxes, bboxes2 ``` ###Assistant : Get the bounding boxes for this `.axis` and its siblings as set by `.Figure.align_xlabels` or `.Figure.align_ylabels`. By default it just gets bboxes for self. " 1351,"def default(method): method._is_default = True # pylint: disable=protected-access return method ",Decorates a method to detect overrides in subclasses.,8,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def default(method): method._is_default = True # pylint: disable=protected-access return method ``` ###Assistant : Decorates a method to detect overrides in subclasses. " 1352,"def test_help_tooltip(self): st.camera_input(""the label"", help=""help_label"") c = self.get_delta_from_queue().new_element.camera_input self.assertEqual(c.help, ""help_label"") ",Test that it can be called using a string for type parameter.,12,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_help_tooltip(self): st.camera_input(""the label"", help=""help_label"") c = self.get_delta_from_queue().new_element.camera_input self.assertEqual(c.help, ""help_label"") ``` ###Assistant : Test that it can be called using a string for type parameter. " 1353,"async def test_multiple_event_images(hass, auth): subscriber = await async_setup_camera(hass, DEVICE_TRAITS, auth=auth) assert len(hass.states.async_all()) == 1 assert hass.states.get(""camera.my_camera"") event_timestamp = utcnow() await subscriber.async_receive_event( make_motion_event(event_session_id=""event-session-1"", timestamp=event_timestamp) ) await hass.async_block_till_done() auth.responses = [ # Fake response from API that returns url image aiohttp.web.json_response(GENERATE_IMAGE_URL_RESPONSE), # Fake response for the image content fetch aiohttp.web.Response(body=IMAGE_BYTES_FROM_EVENT), # Image is refetched after being cleared by expiration alarm aiohttp.web.json_response(GENERATE_IMAGE_URL_RESPONSE), aiohttp.web.Response(body=b""updated image bytes""), ] image = await async_get_image(hass) assert image.content == IMAGE_BYTES_FROM_EVENT next_event_timestamp = event_timestamp + datetime.timedelta(seconds=25) await subscriber.async_receive_event( make_motion_event( event_id=""updated-event-id"", event_session_id=""event-session-2"", timestamp=next_event_timestamp, ) ) await hass.async_block_till_done() image = await async_get_image(hass) assert image.content == b""updated image bytes"" ",Test fallback for an event event image that has been cleaned up on expiration.,14,96,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_multiple_event_images(hass, auth): subscriber = await async_setup_camera(hass, DEVICE_TRAITS, auth=auth) assert len(hass.states.async_all()) == 1 assert hass.states.get(""camera.my_camera"") event_timestamp = utcnow() await subscriber.async_receive_event( make_motion_event(event_session_id=""event-session-1"", timestamp=event_timestamp) ) await hass.async_block_till_done() auth.responses = [ # Fake response from API that returns url image aiohttp.web.json_response(GENERATE_IMAGE_URL_RESPONSE), # Fake response for the image content fetch aiohttp.web.Response(body=IMAGE_BYTES_FROM_EVENT), # Image is refetched after being cleared by expiration alarm aiohttp.web.json_response(GENERATE_IMAGE_URL_RESPONSE), aiohttp.web.Response(body=b""updated image bytes""), ] image = await async_get_image(hass) assert image.content == IMAGE_BYTES_FROM_EVENT next_event_timestamp = event_timestamp + datetime.timedelta(seconds=25) await subscriber.async_receive_event( make_motion_event( event_id=""updated-event-id"", event_session_id=""event-session-2"", timestamp=next_event_timestamp, ) ) await hass.async_block_till_done() image = await async_get_image(hass) assert image.content == b""updated image bytes"" ``` ###Assistant : Test fallback for an event event image that has been cleaned up on expiration. " 1354,"def test_delete_same_room_twice(self) -> None: body = {""new_room_user_id"": self.admin_user} # first call to delete room # and do not wait for finish the task first_channel = self.make_request( ""DELETE"", self.url.encode(""ascii""), content=body, access_token=self.admin_user_tok, await_result=False, ) # second call to delete room second_channel = self.make_request( ""DELETE"", self.url.encode(""ascii""), content=body, access_token=self.admin_user_tok, ) self.assertEqual( HTTPStatus.BAD_REQUEST, second_channel.code, msg=second_channel.json_body ) self.assertEqual(Codes.UNKNOWN, second_channel.json_body[""errcode""]) self.assertEqual( f""History purge already in progress for {self.room_id}"", second_channel.json_body[""error""], ) # get result of first call first_channel.await_result() self.assertEqual(200, first_channel.code, msg=first_channel.json_body) self.assertIn(""delete_id"", first_channel.json_body) # check status after finish the task self._test_result( first_channel.json_body[""delete_id""], self.other_user, expect_new_room=True, ) ",Test that the call for delete a room at second time gives an exception.,14,87,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_delete_same_room_twice(self) -> None: body = {""new_room_user_id"": self.admin_user} # first call to delete room # and do not wait for finish the task first_channel = self.make_request( ""DELETE"", self.url.encode(""ascii""), content=body, access_token=self.admin_user_tok, await_result=False, ) # second call to delete room second_channel = self.make_request( ""DELETE"", self.url.encode(""ascii""), content=body, access_token=self.admin_user_tok, ) self.assertEqual( HTTPStatus.BAD_REQUEST, second_channel.code, msg=second_channel.json_body ) self.assertEqual(Codes.UNKNOWN, second_channel.json_body[""errcode""]) self.assertEqual( f""History purge already in progress for {self.room_id}"", second_channel.json_body[""error""], ) # get result of first call first_channel.await_result() self.assertEqual(200, first_channel.code, msg=first_channel.json_body) self.assertIn(""delete_id"", first_channel.json_body) # check status after finish the task self._test_result( first_channel.json_body[""delete_id""], self.other_user, expect_new_room=True, ) ``` ###Assistant : Test that the call for delete a room at second time gives an exception. " 1355,"async def _async_refresh_device_detail_by_ids(self, device_ids_list): for device_id in device_ids_list: try: await self._async_refresh_device_detail_by_id(device_id) except asyncio.TimeoutError: _LOGGER.warning( ""Timed out calling august api during refresh of device: %s"", device_id, ) except (ClientResponseError, CannotConnect) as err: _LOGGER.warning( ""Error from august api during refresh of device: %s"", device_id, exc_info=err, ) ","Refresh each device in sequence. This used to be a gather but it was less reliable with august's recent api changes. The august api has been timing out for some devices so we want the ones that it isn't timing out for to keep working. ",45,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def _async_refresh_device_detail_by_ids(self, device_ids_list): for device_id in device_ids_list: try: await self._async_refresh_device_detail_by_id(device_id) except asyncio.TimeoutError: _LOGGER.warning( ""Timed out calling august api during refresh of device: %s"", device_id, ) except (ClientResponseError, CannotConnect) as err: _LOGGER.warning( ""Error from august api during refresh of device: %s"", device_id, exc_info=err, ) ``` ###Assistant : Refresh each device in sequence. This used to be a gather but it was less reliable with august's recent api changes. The august api has been timing out for some devices so we want the ones that it isn't timing out for to keep working. " 1356,"def masks_to_boxes(masks): if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing=""ij"") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) # Copied from transformers.models.detr.feature_extraction_detr.rgb_to_id"," Compute the bounding boxes around the provided panoptic segmentation masks. The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns a [N, 4] tensor, with the boxes in corner (xyxy) format. ",44,86,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def masks_to_boxes(masks): if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing=""ij"") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) # Copied from transformers.models.detr.feature_extraction_detr.rgb_to_id ``` ###Assistant : Compute the bounding boxes around the provided panoptic segmentation masks. The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns a [N, 4] tensor, with the boxes in corner (xyxy) format. " 1357,"def _roots_with_zeros(p, num_leading_zeros): # Avoid lapack errors when p is all zero p = _where(len(p) == num_leading_zeros, 1.0, p) # Roll any leading zeros to the end & compute the roots roots = _roots_no_zeros(roll(p, -num_leading_zeros)) # Sort zero roots to the end. roots = lax.sort_key_val(roots == 0, roots)[1] # Set roots associated with num_leading_zeros to NaN return _where(arange(roots.size) < roots.size - num_leading_zeros, roots, complex(np.nan, np.nan)) @_wraps(np.roots, lax_description=, extra_params=)","\ Unlike the numpy version of this function, the JAX version returns the roots in a complex array regardless of the values of the roots. Additionally, the jax version of this function adds the ``strip_zeros`` function which must be set to False for the function to be compatible with JIT and other JAX transformations. With ``strip_zeros=False``, if your coefficients have leading zeros, the roots will be padded with NaN values: >>> coeffs = jnp.array([0, 1, 2]) # The default behavior matches numpy and strips leading zeros: >>> jnp.roots(coeffs) DeviceArray([-2.+0.j], dtype=complex64) # With strip_zeros=False, extra roots are set to NaN: >>> jnp.roots(coeffs, strip_zeros=False) DeviceArray([-2. +0.j, nan+nanj], dtype=complex64) strip_zeros : bool, default=True If set to True, then leading zeros in the coefficients will be stripped, similar to :func:`numpy.roots`. If set to False, leading zeros will not be stripped, and undefined roots will be represented by NaN values in the function output. ``strip_zeros`` must be set to ``False`` for the function to be compatible with :func:`jax.jit` and other JAX transformations. ",167,68,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _roots_with_zeros(p, num_leading_zeros): # Avoid lapack errors when p is all zero p = _where(len(p) == num_leading_zeros, 1.0, p) # Roll any leading zeros to the end & compute the roots roots = _roots_no_zeros(roll(p, -num_leading_zeros)) # Sort zero roots to the end. roots = lax.sort_key_val(roots == 0, roots)[1] # Set roots associated with num_leading_zeros to NaN return _where(arange(roots.size) < roots.size - num_leading_zeros, roots, complex(np.nan, np.nan)) @_wraps(np.roots, lax_description=, extra_params=) ``` ###Assistant : \ Unlike the numpy version of this function, the JAX version returns the roots in a complex array regardless of the values of the roots. Additionally, the jax version of this function adds the ``strip_zeros`` function which must be set to False for the function to be compatible with JIT and other JAX transformations. With ``strip_zeros=False``, if your coefficients have leading zeros, the roots will be padded with NaN values: >>> coeffs = jnp.array([0, 1, 2]) # The default behavior matches numpy and strips leading zeros: >>> jnp.roots(coeffs) DeviceArray([-2.+0.j], dtype=complex64) # With strip_zeros=False, extra roots are set to NaN: >>> jnp.roots(coeffs, strip_zeros=False) DeviceArray([-2. +0.j, nan+nanj], dtype=complex64) strip_zeros : bool, default=True If set to True, then leading zeros in the coefficients will be stripped, similar to :func:`numpy.roots`. If set to False, leading zeros will not be stripped, and undefined roots will be represented by NaN values in the function output. ``strip_zeros`` must be set to ``False`` for the function to be compatible with :func:`jax.jit` and other JAX transformations. " 1358,"def port_monitoring(self) -> int: if GATEWAY_NAME in self._deployment_nodes: return self[GATEWAY_NAME].args.port_monitoring else: return self._common_kwargs.get( 'port_monitoring', __default_port_monitoring__ ) ","Return if the monitoring is enabled .. # noqa: DAR201 ",10,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def port_monitoring(self) -> int: if GATEWAY_NAME in self._deployment_nodes: return self[GATEWAY_NAME].args.port_monitoring else: return self._common_kwargs.get( 'port_monitoring', __default_port_monitoring__ ) ``` ###Assistant : Return if the monitoring is enabled .. # noqa: DAR201 " 1359,"def autodiscover_modules(*args, **kwargs): from django.apps import apps register_to = kwargs.get(""register_to"") for app_config in apps.get_app_configs(): for module_to_search in args: # Attempt to import the app's module. try: if register_to: before_import_registry = copy.copy(register_to._registry) import_module(""%s.%s"" % (app_config.name, module_to_search)) except Exception: # Reset the registry to the state before the last import # as this import will have to reoccur on the next request and # this could raise NotRegistered and AlreadyRegistered # exceptions (see #8245). if register_to: register_to._registry = before_import_registry # Decide whether to bubble up this error. If the app just # doesn't have the module in question, we can ignore the error # attempting to import it, otherwise we want it to bubble up. if module_has_submodule(app_config.module, module_to_search): raise "," Auto-discover INSTALLED_APPS modules and fail silently when not present. This forces an import on them to register any admin bits they may want. You may provide a register_to keyword parameter as a way to access a registry. This register_to object must have a _registry instance variable to access it. ",49,117,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def autodiscover_modules(*args, **kwargs): from django.apps import apps register_to = kwargs.get(""register_to"") for app_config in apps.get_app_configs(): for module_to_search in args: # Attempt to import the app's module. try: if register_to: before_import_registry = copy.copy(register_to._registry) import_module(""%s.%s"" % (app_config.name, module_to_search)) except Exception: # Reset the registry to the state before the last import # as this import will have to reoccur on the next request and # this could raise NotRegistered and AlreadyRegistered # exceptions (see #8245). if register_to: register_to._registry = before_import_registry # Decide whether to bubble up this error. If the app just # doesn't have the module in question, we can ignore the error # attempting to import it, otherwise we want it to bubble up. if module_has_submodule(app_config.module, module_to_search): raise ``` ###Assistant : Auto-discover INSTALLED_APPS modules and fail silently when not present. This forces an import on them to register any admin bits they may want. You may provide a register_to keyword parameter as a way to access a registry. This register_to object must have a _registry instance variable to access it. " 1360,"def ensure_future(coro_or_future, *, loop=None): return _ensure_future(coro_or_future, loop=loop) ","Wrap a coroutine or an awaitable in a future. If the argument is a Future, it is returned directly. ",19,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ensure_future(coro_or_future, *, loop=None): return _ensure_future(coro_or_future, loop=loop) ``` ###Assistant : Wrap a coroutine or an awaitable in a future. If the argument is a Future, it is returned directly. " 1361,"def getpalette(self, rawmode=""RGB""): self.load() try: mode = self.im.getpalettemode() except ValueError: return None # no palette if rawmode is None: rawmode = mode return list(self.im.getpalette(mode, rawmode)) "," Returns the image palette as a list. :param rawmode: The mode in which to return the palette. ``None`` will return the palette in its current mode. :returns: A list of color values [r, g, b, ...], or None if the image has no palette. ",44,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def getpalette(self, rawmode=""RGB""): self.load() try: mode = self.im.getpalettemode() except ValueError: return None # no palette if rawmode is None: rawmode = mode return list(self.im.getpalette(mode, rawmode)) ``` ###Assistant : Returns the image palette as a list. :param rawmode: The mode in which to return the palette. ``None`` will return the palette in its current mode. :returns: A list of color values [r, g, b, ...], or None if the image has no palette. " 1362,"def last_executor(self): if len(self.proto_wo_data.routes) > 0: return self.proto_wo_data.routes[-1].executor "," Returns the name of the last Executor that has processed this Request :return: the name of the last Executor that processed this Request ",23,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def last_executor(self): if len(self.proto_wo_data.routes) > 0: return self.proto_wo_data.routes[-1].executor ``` ###Assistant : Returns the name of the last Executor that has processed this Request :return: the name of the last Executor that processed this Request " 1363,"def custom_generator_multi_io_temporal(self, sample_weights=None): batch_size = 3 num_samples = 3 iteration = 0 while True: batch_index = iteration * batch_size % num_samples iteration += 1 start = batch_index end = start + batch_size x = [self.x[start:end], self.x[start:end]] y = [self.y1[start:end], self.y2[start:end]] if sample_weights: sw = tf.nest.map_structure( lambda w: w[start:end], sample_weights ) else: sw = None yield x, y, sw ","Generator for getting data for temporal multi io model. Args: sample_weights: List of sample_weights. Yields: Tuple of inputs, label, sample weights data. ",22,58,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def custom_generator_multi_io_temporal(self, sample_weights=None): batch_size = 3 num_samples = 3 iteration = 0 while True: batch_index = iteration * batch_size % num_samples iteration += 1 start = batch_index end = start + batch_size x = [self.x[start:end], self.x[start:end]] y = [self.y1[start:end], self.y2[start:end]] if sample_weights: sw = tf.nest.map_structure( lambda w: w[start:end], sample_weights ) else: sw = None yield x, y, sw ``` ###Assistant : Generator for getting data for temporal multi io model. Args: sample_weights: List of sample_weights. Yields: Tuple of inputs, label, sample weights data. " 1364,"def _just_docs(self): try: for child in self.ast.body: if not isinstance(child, ast.Assign): # allow string constant expressions (these are docstrings) if isinstance(child, ast.Expr) and isinstance(child.value, ast.Constant) and isinstance(child.value.value, str): continue # allowed from __future__ imports if isinstance(child, ast.ImportFrom) and child.module == '__future__': for future_import in child.names: if future_import.name not in self.ACCEPTLIST_FUTURE_IMPORTS: break else: continue return False return True except AttributeError: return False ","Module can contain just docs and from __future__ boilerplate ",9,61,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _just_docs(self): try: for child in self.ast.body: if not isinstance(child, ast.Assign): # allow string constant expressions (these are docstrings) if isinstance(child, ast.Expr) and isinstance(child.value, ast.Constant) and isinstance(child.value.value, str): continue # allowed from __future__ imports if isinstance(child, ast.ImportFrom) and child.module == '__future__': for future_import in child.names: if future_import.name not in self.ACCEPTLIST_FUTURE_IMPORTS: break else: continue return False return True except AttributeError: return False ``` ###Assistant : Module can contain just docs and from __future__ boilerplate " 1365,"def response_validator(self) -> RequestValidator: self.check_reload() assert self._response_validator is not None return self._response_validator ","Reload the OpenAPI file if it has been modified after the last time it was read, and then return the openapi_core validator object. Similar to preceding functions. Used for proper access to OpenAPI objects. ",34,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def response_validator(self) -> RequestValidator: self.check_reload() assert self._response_validator is not None return self._response_validator ``` ###Assistant : Reload the OpenAPI file if it has been modified after the last time it was read, and then return the openapi_core validator object. Similar to preceding functions. Used for proper access to OpenAPI objects. " 1366,"def roots(p, *, strip_zeros=True): # ported from https://github.com/numpy/numpy/blob/v1.17.0/numpy/lib/polynomial.py#L168-L251 p = atleast_1d(p) if p.ndim != 1: raise ValueError(""Input must be a rank-1 array."") # strip_zeros=False is unsafe because leading zeros aren't removed if not strip_zeros: if p.size > 1: return _roots_no_zeros(p) else: return array([]) if all(p == 0): return array([]) # factor out trivial roots start, end = _nonzero_range(p) # number of trailing zeros = number of roots at 0 trailing_zeros = p.size - end # strip leading and trailing zeros p = p[start:end] if p.size < 2: return zeros(trailing_zeros, p.dtype) else: roots = _roots_no_zeros(p) # combine roots and zero roots roots = hstack((roots, zeros(trailing_zeros, p.dtype))) return roots _POLYFIT_DOC = @_wraps(np.polyfit, lax_description=_POLYFIT_DOC) @partial(jit, static_argnames=('deg', 'rcond', 'full', 'cov'))","\ Unlike NumPy's implementation of polyfit, :py:func:`jax.numpy.polyfit` will not warn on rank reduction, which indicates an ill conditioned matrix Also, it works best on rcond <= 10e-3 values. ",28,116,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def roots(p, *, strip_zeros=True): # ported from https://github.com/numpy/numpy/blob/v1.17.0/numpy/lib/polynomial.py#L168-L251 p = atleast_1d(p) if p.ndim != 1: raise ValueError(""Input must be a rank-1 array."") # strip_zeros=False is unsafe because leading zeros aren't removed if not strip_zeros: if p.size > 1: return _roots_no_zeros(p) else: return array([]) if all(p == 0): return array([]) # factor out trivial roots start, end = _nonzero_range(p) # number of trailing zeros = number of roots at 0 trailing_zeros = p.size - end # strip leading and trailing zeros p = p[start:end] if p.size < 2: return zeros(trailing_zeros, p.dtype) else: roots = _roots_no_zeros(p) # combine roots and zero roots roots = hstack((roots, zeros(trailing_zeros, p.dtype))) return roots _POLYFIT_DOC = @_wraps(np.polyfit, lax_description=_POLYFIT_DOC) @partial(jit, static_argnames=('deg', 'rcond', 'full', 'cov')) ``` ###Assistant : \ Unlike NumPy's implementation of polyfit, :py:func:`jax.numpy.polyfit` will not warn on rank reduction, which indicates an ill conditioned matrix Also, it works best on rcond <= 10e-3 values. " 1367,"def _storage_path(self, local_path): rel_local_path = os.path.relpath(local_path, self.logdir) return os.path.join(self.remote_checkpoint_dir, rel_local_path) ","Converts a `local_path` to be based off of `self.remote_checkpoint_dir`.",9,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _storage_path(self, local_path): rel_local_path = os.path.relpath(local_path, self.logdir) return os.path.join(self.remote_checkpoint_dir, rel_local_path) ``` ###Assistant : Converts a `local_path` to be based off of `self.remote_checkpoint_dir`. " 1368,"def get_time_since_last_update(self) -> float: return time.time() - self.last_update_time ",How much time has passed since the last call to update.,11,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_time_since_last_update(self) -> float: return time.time() - self.last_update_time ``` ###Assistant : How much time has passed since the last call to update. " 1369,"def train_fixbn(self, mode=True, freeze_bn=True, freeze_bn_affine=False): r super(DeepLabv3_plus, self).train(mode) if freeze_bn: print(""Freezing Mean/Var of BatchNorm2D."") if freeze_bn_affine: print(""Freezing Weight/Bias of BatchNorm2D."") if freeze_bn: for m in self.xception_features.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() if freeze_bn_affine: m.weight.requires_grad = False m.bias.requires_grad = False # for m in self.aspp1.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.aspp2.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.aspp3.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.aspp4.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.global_avg_pool.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.concat_projection_bn1.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.feature_projection_bn1.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False ","Sets the module in training mode. This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, etc. Returns: Module: self ",38,192,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def train_fixbn(self, mode=True, freeze_bn=True, freeze_bn_affine=False): r super(DeepLabv3_plus, self).train(mode) if freeze_bn: print(""Freezing Mean/Var of BatchNorm2D."") if freeze_bn_affine: print(""Freezing Weight/Bias of BatchNorm2D."") if freeze_bn: for m in self.xception_features.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() if freeze_bn_affine: m.weight.requires_grad = False m.bias.requires_grad = False # for m in self.aspp1.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.aspp2.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.aspp3.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.aspp4.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.global_avg_pool.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.concat_projection_bn1.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.feature_projection_bn1.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False ``` ###Assistant : Sets the module in training mode. This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, etc. Returns: Module: self " 1370,"def assert_params_all_zeros(module) -> bool: weight_data = module.weight.data is_weight_zero = weight_data.allclose( weight_data.new_zeros(weight_data.size())) if hasattr(module, 'bias') and module.bias is not None: bias_data = module.bias.data is_bias_zero = bias_data.allclose( bias_data.new_zeros(bias_data.size())) else: is_bias_zero = True return is_weight_zero and is_bias_zero ","Check if the parameters of the module is all zeros. Args: module (nn.Module): The module to be checked. Returns: bool: Whether the parameters of the module is all zeros. ",29,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def assert_params_all_zeros(module) -> bool: weight_data = module.weight.data is_weight_zero = weight_data.allclose( weight_data.new_zeros(weight_data.size())) if hasattr(module, 'bias') and module.bias is not None: bias_data = module.bias.data is_bias_zero = bias_data.allclose( bias_data.new_zeros(bias_data.size())) else: is_bias_zero = True return is_weight_zero and is_bias_zero ``` ###Assistant : Check if the parameters of the module is all zeros. Args: module (nn.Module): The module to be checked. Returns: bool: Whether the parameters of the module is all zeros. " 1371,"def cleanup_state(self): # type: () -> None for f in self.cleanup_functions: if not callable(f): continue try: if not f(self.socket, self.configuration): log_automotive.info( ""Cleanup function %s failed"", repr(f)) except (OSError, ValueError, Scapy_Exception) as e: log_automotive.critical(""Exception during cleanup: %s"", e) self.cleanup_functions = list() "," Executes all collected cleanup functions from a traversed path :return: None ",11,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cleanup_state(self): # type: () -> None for f in self.cleanup_functions: if not callable(f): continue try: if not f(self.socket, self.configuration): log_automotive.info( ""Cleanup function %s failed"", repr(f)) except (OSError, ValueError, Scapy_Exception) as e: log_automotive.critical(""Exception during cleanup: %s"", e) self.cleanup_functions = list() ``` ###Assistant : Executes all collected cleanup functions from a traversed path :return: None " 1372,"def test_model_with_two_tabbed_panels_only(self): Publisher.settings_panels = [FieldPanel(""name"")] Publisher.promote_panels = [FieldPanel(""headquartered_in"")] warning_1 = checks.Warning( ""Publisher.promote_panels will have no effect on modeladmin editing"", hint=, obj=Publisher, id=""wagtailadmin.W002"", ) warning_2 = checks.Warning( ""Publisher.settings_panels will have no effect on modeladmin editing"", hint=, obj=Publisher, id=""wagtailadmin.W002"", ) checks_results = self.get_checks_result() self.assertIn(warning_1, checks_results) self.assertIn(warning_2, checks_results) # clean up for future checks delattr(Publisher, ""settings_panels"") delattr(Publisher, ""promote_panels"") ","Ensure that Publisher uses `panels` instead of `promote_panels`\ or set up an `edit_handler` if you want a tabbed editing interface. There are no default tabs on non-Page models so there will be no\ Promote tab for the promote_panels to render in.Ensure that Publisher uses `panels` instead of `settings_panels`\ or set up an `edit_handler` if you want a tabbed editing interface. There are no default tabs on non-Page models so there will be no\ Settings tab for the settings_panels to render in.",81,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_model_with_two_tabbed_panels_only(self): Publisher.settings_panels = [FieldPanel(""name"")] Publisher.promote_panels = [FieldPanel(""headquartered_in"")] warning_1 = checks.Warning( ""Publisher.promote_panels will have no effect on modeladmin editing"", hint=, obj=Publisher, id=""wagtailadmin.W002"", ) warning_2 = checks.Warning( ""Publisher.settings_panels will have no effect on modeladmin editing"", hint=, obj=Publisher, id=""wagtailadmin.W002"", ) checks_results = self.get_checks_result() self.assertIn(warning_1, checks_results) self.assertIn(warning_2, checks_results) # clean up for future checks delattr(Publisher, ""settings_panels"") delattr(Publisher, ""promote_panels"") ``` ###Assistant : Ensure that Publisher uses `panels` instead of `promote_panels`\ or set up an `edit_handler` if you want a tabbed editing interface. There are no default tabs on non-Page models so there will be no\ Promote tab for the promote_panels to render in.Ensure that Publisher uses `panels` instead of `settings_panels`\ or set up an `edit_handler` if you want a tabbed editing interface. There are no default tabs on non-Page models so there will be no\ Settings tab for the settings_panels to render in. " 1373,"def get_builtin_layer(class_name): if not hasattr(LOCAL, ""ALL_OBJECTS""): populate_deserializable_objects() return LOCAL.ALL_OBJECTS.get(class_name) ","Returns class if `class_name` is registered, else returns None.",9,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_builtin_layer(class_name): if not hasattr(LOCAL, ""ALL_OBJECTS""): populate_deserializable_objects() return LOCAL.ALL_OBJECTS.get(class_name) ``` ###Assistant : Returns class if `class_name` is registered, else returns None. " 1374,"def abelian_invariants(self): if self.is_trivial: return [] gns = self.generators inv = [] G = self H = G.derived_subgroup() Hgens = H.generators for p in primefactors(G.order()): ranks = [] while True: pows = [] for g in gns: elm = g**p if not H.contains(elm): pows.append(elm) K = PermutationGroup(Hgens + pows) if pows else H r = G.order()//K.order() G = K gns = pows if r == 1: break ranks.append(multiplicity(p, r)) if ranks: pows = [1]*ranks[0] for i in ranks: for j in range(0, i): pows[j] = pows[j]*p inv.extend(pows) inv.sort() return inv "," Returns the abelian invariants for the given group. Let ``G`` be a nontrivial finite abelian group. Then G is isomorphic to the direct product of finitely many nontrivial cyclic groups of prime-power order. Explanation =========== The prime-powers that occur as the orders of the factors are uniquely determined by G. More precisely, the primes that occur in the orders of the factors in any such decomposition of ``G`` are exactly the primes that divide ``|G|`` and for any such prime ``p``, if the orders of the factors that are p-groups in one such decomposition of ``G`` are ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``, then the orders of the factors that are p-groups in any such decomposition of ``G`` are ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``. The uniquely determined integers ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``, taken for all primes that divide ``|G|`` are called the invariants of the nontrivial group ``G`` as suggested in ([14], p. 542). Notes ===== We adopt the convention that the invariants of a trivial group are []. Examples ======== >>> from sympy.combinatorics import Permutation, PermutationGroup >>> a = Permutation([0, 2, 1]) >>> b = Permutation([1, 0, 2]) >>> G = PermutationGroup([a, b]) >>> G.abelian_invariants() [2] >>> from sympy.combinatorics import CyclicGroup >>> G = CyclicGroup(7) >>> G.abelian_invariants() [7] ",212,90,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def abelian_invariants(self): if self.is_trivial: return [] gns = self.generators inv = [] G = self H = G.derived_subgroup() Hgens = H.generators for p in primefactors(G.order()): ranks = [] while True: pows = [] for g in gns: elm = g**p if not H.contains(elm): pows.append(elm) K = PermutationGroup(Hgens + pows) if pows else H r = G.order()//K.order() G = K gns = pows if r == 1: break ranks.append(multiplicity(p, r)) if ranks: pows = [1]*ranks[0] for i in ranks: for j in range(0, i): pows[j] = pows[j]*p inv.extend(pows) inv.sort() return inv ``` ###Assistant : Returns the abelian invariants for the given group. Let ``G`` be a nontrivial finite abelian group. Then G is isomorphic to the direct product of finitely many nontrivial cyclic groups of prime-power order. Explanation =========== The prime-powers that occur as the orders of the factors are uniquely determined by G. More precisely, the primes that occur in the orders of the factors in any such decomposition of ``G`` are exactly the primes that divide ``|G|`` and for any such prime ``p``, if the orders of the factors that are p-groups in one such decomposition of ``G`` are ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``, then the orders of the factors that are p-groups in any such decomposition of ``G`` are ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``. The uniquely determined integers ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``, taken for all primes that divide ``|G|`` are called the invariants of the nontrivial group ``G`` as suggested in ([14], p. 542). Notes ===== We adopt the convention that the invariants of a trivial group are []. Examples ======== >>> from sympy.combinatorics import Permutation, PermutationGroup >>> a = Permutation([0, 2, 1]) >>> b = Permutation([1, 0, 2]) >>> G = PermutationGroup([a, b]) >>> G.abelian_invariants() [2] >>> from sympy.combinatorics import CyclicGroup >>> G = CyclicGroup(7) >>> G.abelian_invariants() [7] " 1375,"def clone_graph_nodes(inputs, outputs): nodes_to_clone = find_nodes_by_inputs_and_outputs(inputs, outputs) cloned_inputs = [] cloned_outputs = [] # We not only need to create copies of Nodes (mimic the calls), also need to # clone keras_tensors to avoid the override of _keras_history attached on the # keras_tensor. The following dict is used to track any keras tensor we cloned # The key is the string ID of the original keras tensor, and value is the # cloned keras_tensor instance. kt_id_mapping = {} for kt_input in tf.nest.flatten(inputs): if kt_input.node.is_input: # For any existing keras_tensor from tf.keras.Input, we leave them as is. cloned_inputs.append(kt_input) kt_id_mapping[id(kt_input)] = kt_input else: # We need to create a new tf.keras.Input for any intermediate keras_tensor cpy = _clone_keras_tensor(kt_input) cloned_input = input_layer_module.Input(tensor=cpy) cloned_inputs.append(cloned_input) kt_id_mapping[id(kt_input)] = cloned_input cloned_inputs = tf.nest.pack_sequence_as(inputs, cloned_inputs) for kt_output in tf.nest.flatten(outputs): cpy = _clone_keras_tensor(kt_output) # We reuse the _keras_history here, which contains the old information. It # is used in the Node constructor to check if the tensor ""is_keras_tensor()"" # The history will be override by the Node constructor anyway for the # corresponding layer output anyway. cpy._keras_history = ( kt_output._keras_history ) # pylint: disable=protected-access cloned_outputs.append(cpy) kt_id_mapping[id(kt_output)] = cpy cloned_outputs = tf.nest.pack_sequence_as(outputs, cloned_outputs) for node in nodes_to_clone: # Clone any keras_tensors to avoid override of _keras_history # Or reuse an existing keras_tensor if it has already been cloned. output_copy = clone_keras_tensors(node.output_tensors, kt_id_mapping) call_args_copy = clone_keras_tensors(node.call_args, kt_id_mapping) call_kwargs_copy = clone_keras_tensors(node.call_kwargs, kt_id_mapping) # Creating new nodes based on the existing node information. # Node wires itself to inbound and outbound layers. # The Node constructor actually updates this layer's self._inbound_nodes, # sets _keras_history on the outputs, and adds itself to the # `_outbound_nodes` of the layers that produced the inputs to this # layer call. node_module.Node( node.layer, call_args=call_args_copy, call_kwargs=call_kwargs_copy, outputs=output_copy, ) return cloned_inputs, cloned_outputs ","Clone the `Node` between the inputs and output tensors. This function is used to create a new functional model from any intermediate keras tensors. The clone of the nodes mimic the behavior of reconstructing the functional graph network by re-executing all the __call__ methods. The cloned nodes will be appended to the layers. Note that a new tf.keras.Inputs will be created for any items in the `inputs` Args: inputs: A nested structure of keras_tensors. outputs: A nested structure of keras_tensors. Returns: A pair of inputs and outputs, with cloned keras_tensors. They can be used to create a new functional model. ",100,292,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def clone_graph_nodes(inputs, outputs): nodes_to_clone = find_nodes_by_inputs_and_outputs(inputs, outputs) cloned_inputs = [] cloned_outputs = [] # We not only need to create copies of Nodes (mimic the calls), also need to # clone keras_tensors to avoid the override of _keras_history attached on the # keras_tensor. The following dict is used to track any keras tensor we cloned # The key is the string ID of the original keras tensor, and value is the # cloned keras_tensor instance. kt_id_mapping = {} for kt_input in tf.nest.flatten(inputs): if kt_input.node.is_input: # For any existing keras_tensor from tf.keras.Input, we leave them as is. cloned_inputs.append(kt_input) kt_id_mapping[id(kt_input)] = kt_input else: # We need to create a new tf.keras.Input for any intermediate keras_tensor cpy = _clone_keras_tensor(kt_input) cloned_input = input_layer_module.Input(tensor=cpy) cloned_inputs.append(cloned_input) kt_id_mapping[id(kt_input)] = cloned_input cloned_inputs = tf.nest.pack_sequence_as(inputs, cloned_inputs) for kt_output in tf.nest.flatten(outputs): cpy = _clone_keras_tensor(kt_output) # We reuse the _keras_history here, which contains the old information. It # is used in the Node constructor to check if the tensor ""is_keras_tensor()"" # The history will be override by the Node constructor anyway for the # corresponding layer output anyway. cpy._keras_history = ( kt_output._keras_history ) # pylint: disable=protected-access cloned_outputs.append(cpy) kt_id_mapping[id(kt_output)] = cpy cloned_outputs = tf.nest.pack_sequence_as(outputs, cloned_outputs) for node in nodes_to_clone: # Clone any keras_tensors to avoid override of _keras_history # Or reuse an existing keras_tensor if it has already been cloned. output_copy = clone_keras_tensors(node.output_tensors, kt_id_mapping) call_args_copy = clone_keras_tensors(node.call_args, kt_id_mapping) call_kwargs_copy = clone_keras_tensors(node.call_kwargs, kt_id_mapping) # Creating new nodes based on the existing node information. # Node wires itself to inbound and outbound layers. # The Node constructor actually updates this layer's self._inbound_nodes, # sets _keras_history on the outputs, and adds itself to the # `_outbound_nodes` of the layers that produced the inputs to this # layer call. node_module.Node( node.layer, call_args=call_args_copy, call_kwargs=call_kwargs_copy, outputs=output_copy, ) return cloned_inputs, cloned_outputs ``` ###Assistant : Clone the `Node` between the inputs and output tensors. This function is used to create a new functional model from any intermediate keras tensors. The clone of the nodes mimic the behavior of reconstructing the functional graph network by re-executing all the __call__ methods. The cloned nodes will be appended to the layers. Note that a new tf.keras.Inputs will be created for any items in the `inputs` Args: inputs: A nested structure of keras_tensors. outputs: A nested structure of keras_tensors. Returns: A pair of inputs and outputs, with cloned keras_tensors. They can be used to create a new functional model. " 1376,"def parallel_axis(self, point, frame=None): # circular import issue from sympy.physics.mechanics.functions import inertia_of_point_mass if frame is None: frame = self.frame return self.central_inertia.express(frame) + inertia_of_point_mass( self.mass, self.masscenter.pos_from(point), frame) ","Returns the inertia dyadic of the body with respect to another point. Parameters ========== point : sympy.physics.vector.Point The point to express the inertia dyadic about. frame : sympy.physics.vector.ReferenceFrame The reference frame used to construct the dyadic. Returns ======= inertia : sympy.physics.vector.Dyadic The inertia dyadic of the rigid body expressed about the provided point. ",53,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parallel_axis(self, point, frame=None): # circular import issue from sympy.physics.mechanics.functions import inertia_of_point_mass if frame is None: frame = self.frame return self.central_inertia.express(frame) + inertia_of_point_mass( self.mass, self.masscenter.pos_from(point), frame) ``` ###Assistant : Returns the inertia dyadic of the body with respect to another point. Parameters ========== point : sympy.physics.vector.Point The point to express the inertia dyadic about. frame : sympy.physics.vector.ReferenceFrame The reference frame used to construct the dyadic. Returns ======= inertia : sympy.physics.vector.Dyadic The inertia dyadic of the rigid body expressed about the provided point. " 1377,"def test_class_weight_does_not_contains_more_classses(): tree = DecisionTreeClassifier(class_weight={0: 1, 1: 10, 2: 20}) # Does not raise tree.fit([[0, 0, 1], [1, 0, 1], [1, 2, 0]], [0, 0, 1]) ","Check that class_weight can contain more labels than in y. Non-regression test for #22413 ",14,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_class_weight_does_not_contains_more_classses(): tree = DecisionTreeClassifier(class_weight={0: 1, 1: 10, 2: 20}) # Does not raise tree.fit([[0, 0, 1], [1, 0, 1], [1, 2, 0]], [0, 0, 1]) ``` ###Assistant : Check that class_weight can contain more labels than in y. Non-regression test for #22413 " 1378,"def parse(self, state): # type: (ParserState) -> str if state.mode == ParserMode.PARSE: path = AnyParser().parse(state) if not os.path.isfile(path): raise ParserError(f'Not a file: {path}') else: path = '' with state.delimit(PATH_DELIMITER, required=False) as boundary: # type: ParserBoundary while boundary.ready: directory = path or '.' try: with os.scandir(directory) as scan: # type: t.Iterator[os.DirEntry] choices = [f'{item.name}{PATH_DELIMITER}' if item.is_dir() else item.name for item in scan] except OSError: choices = [] if not path: choices.append(PATH_DELIMITER) # allow absolute paths choices.append('../') # suggest relative paths part = RelativePathNameParser(choices).parse(state) path += f'{part}{boundary.match or """"}' return path ",Parse the input from the given state and return the result.,11,89,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parse(self, state): # type: (ParserState) -> str if state.mode == ParserMode.PARSE: path = AnyParser().parse(state) if not os.path.isfile(path): raise ParserError(f'Not a file: {path}') else: path = '' with state.delimit(PATH_DELIMITER, required=False) as boundary: # type: ParserBoundary while boundary.ready: directory = path or '.' try: with os.scandir(directory) as scan: # type: t.Iterator[os.DirEntry] choices = [f'{item.name}{PATH_DELIMITER}' if item.is_dir() else item.name for item in scan] except OSError: choices = [] if not path: choices.append(PATH_DELIMITER) # allow absolute paths choices.append('../') # suggest relative paths part = RelativePathNameParser(choices).parse(state) path += f'{part}{boundary.match or """"}' return path ``` ###Assistant : Parse the input from the given state and return the result. " 1379,"def layout(self) -> Layout: # self.log(""I"", self._inline_styles) # self.log(""C"", self._css_styles) # self.log(""S"", self.styles) assert self.styles.layout return self.styles.layout # @layout.setter # def layout(self, new_value: Layout) -> None: # # self.styles.layout = new_value ","Convenience property for accessing ``self.styles.layout``. Returns: The Layout associated with this view Convenience property setter for setting ``view.styles.layout``. # Args: # new_value: # Returns: # None # ",27,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def layout(self) -> Layout: # self.log(""I"", self._inline_styles) # self.log(""C"", self._css_styles) # self.log(""S"", self.styles) assert self.styles.layout return self.styles.layout # @layout.setter # def layout(self, new_value: Layout) -> None: # # self.styles.layout = new_value ``` ###Assistant : Convenience property for accessing ``self.styles.layout``. Returns: The Layout associated with this view Convenience property setter for setting ``view.styles.layout``. # Args: # new_value: # Returns: # None # " 1380,"def timeout_message(self, message): future = self.send_future_map.pop(message, None) # In a race condition the message might have been sent by the time # we're timing it out. Make sure the future is not None if future is not None: del self.send_timeout_map[message] if future.attempts < future.tries: future.attempts += 1 log.info( ""SaltReqTimeoutError, retrying. (%s/%s)"", future.attempts, future.tries, ) self.send( message, timeout=future.timeout, tries=future.tries, future=future, ) else: future.set_exception(SaltReqTimeoutError(""Message timed out"")) "," Handle a message timeout by removing it from the sending queue and informing the caller :raises: SaltReqTimeoutError ",17,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def timeout_message(self, message): future = self.send_future_map.pop(message, None) # In a race condition the message might have been sent by the time # we're timing it out. Make sure the future is not None if future is not None: del self.send_timeout_map[message] if future.attempts < future.tries: future.attempts += 1 log.info( ""SaltReqTimeoutError, retrying. (%s/%s)"", future.attempts, future.tries, ) self.send( message, timeout=future.timeout, tries=future.tries, future=future, ) else: future.set_exception(SaltReqTimeoutError(""Message timed out"")) ``` ###Assistant : Handle a message timeout by removing it from the sending queue and informing the caller :raises: SaltReqTimeoutError " 1381,"def test_not_all_records(self, requests_mock, authenticator, config, responses): expected_output = [ {""id"": 1, ""updated_at"": ""2018-01-02T00:00:00Z""}, {""id"": 2, ""updated_at"": ""2018-02-02T00:00:00Z""}, {""id"": 2, ""updated_at"": ""2018-02-02T00:00:00Z""}, # duplicate {""id"": 3, ""updated_at"": ""2018-03-02T00:00:00Z""}, {""id"": 3, ""updated_at"": ""2018-03-02T00:00:00Z""}, # duplicate {""id"": 4, ""updated_at"": ""2019-01-03T00:00:00Z""}, {""id"": 4, ""updated_at"": ""2019-01-03T00:00:00Z""}, # duplicate {""id"": 5, ""updated_at"": ""2019-02-03T00:00:00Z""}, {""id"": 5, ""updated_at"": ""2019-02-03T00:00:00Z""}, # duplicate {""id"": 6, ""updated_at"": ""2019-03-03T00:00:00Z""}, ] # INT value of page number where the switch state should be triggered. # in this test case values from: 1 - 4, assuming we want to switch state on this page. ticket_paginate_limit = 2 # This parameter mocks the ""per_page"" parameter in the API Call result_return_limit = 1 # Create test_stream instance. test_stream = Tickets(authenticator=authenticator, config=config) test_stream.ticket_paginate_limit = ticket_paginate_limit test_stream.result_return_limit = result_return_limit # Mocking Request for response in responses: requests_mock.register_uri( ""GET"", response[""url""], json=response[""json""], headers=response.get(""headers"", {}), ) records = list(test_stream.read_records(sync_mode=SyncMode.full_refresh)) # We're expecting 6 records to return from the tickets_stream assert records == expected_output "," TEST 1 - not all records are retrieved During test1 the tickets_stream changes the state of parameters on page: 2, by updating the params: `params[""order_by""] = ""updated_at""` `params[""updated_since""] = last_record` continues to fetch records from the source, using new cycle, and so on. NOTE: After switch of the state on ticket_paginate_limit = 2, is this example, we will experience the records duplication, because of the last_record state, starting at the point where we stoped causes the duplication of the output. The solution for this is to add at least 1 second to the last_record state. The DBT normalization should handle this for the end user, so the duplication issue is not a blocker in such cases. Main pricipal here is: airbyte is at-least-once delivery, but skipping records is data loss. ",130,152,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_not_all_records(self, requests_mock, authenticator, config, responses): expected_output = [ {""id"": 1, ""updated_at"": ""2018-01-02T00:00:00Z""}, {""id"": 2, ""updated_at"": ""2018-02-02T00:00:00Z""}, {""id"": 2, ""updated_at"": ""2018-02-02T00:00:00Z""}, # duplicate {""id"": 3, ""updated_at"": ""2018-03-02T00:00:00Z""}, {""id"": 3, ""updated_at"": ""2018-03-02T00:00:00Z""}, # duplicate {""id"": 4, ""updated_at"": ""2019-01-03T00:00:00Z""}, {""id"": 4, ""updated_at"": ""2019-01-03T00:00:00Z""}, # duplicate {""id"": 5, ""updated_at"": ""2019-02-03T00:00:00Z""}, {""id"": 5, ""updated_at"": ""2019-02-03T00:00:00Z""}, # duplicate {""id"": 6, ""updated_at"": ""2019-03-03T00:00:00Z""}, ] # INT value of page number where the switch state should be triggered. # in this test case values from: 1 - 4, assuming we want to switch state on this page. ticket_paginate_limit = 2 # This parameter mocks the ""per_page"" parameter in the API Call result_return_limit = 1 # Create test_stream instance. test_stream = Tickets(authenticator=authenticator, config=config) test_stream.ticket_paginate_limit = ticket_paginate_limit test_stream.result_return_limit = result_return_limit # Mocking Request for response in responses: requests_mock.register_uri( ""GET"", response[""url""], json=response[""json""], headers=response.get(""headers"", {}), ) records = list(test_stream.read_records(sync_mode=SyncMode.full_refresh)) # We're expecting 6 records to return from the tickets_stream assert records == expected_output ``` ###Assistant : TEST 1 - not all records are retrieved During test1 the tickets_stream changes the state of parameters on page: 2, by updating the params: `params[""order_by""] = ""updated_at""` `params[""updated_since""] = last_record` continues to fetch records from the source, using new cycle, and so on. NOTE: After switch of the state on ticket_paginate_limit = 2, is this example, we will experience the records duplication, because of the last_record state, starting at the point where we stoped causes the duplication of the output. The solution for this is to add at least 1 second to the last_record state. The DBT normalization should handle this for the end user, so the duplication issue is not a blocker in such cases. Main pricipal here is: airbyte is at-least-once delivery, but skipping records is data loss. " 1382,"def run_from_argv(self, argv): self.test_runner = get_command_line_option(argv, ""--testrunner"") super().run_from_argv(argv) "," Pre-parse the command line to extract the value of the --testrunner option. This allows a test runner to define additional command line arguments. ",23,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def run_from_argv(self, argv): self.test_runner = get_command_line_option(argv, ""--testrunner"") super().run_from_argv(argv) ``` ###Assistant : Pre-parse the command line to extract the value of the --testrunner option. This allows a test runner to define additional command line arguments. " 1383,"def test_new_processing_issue(self, mock_func): notification = NewProcessingIssuesActivityNotification( Activity( project=self.project, user=self.user, type=ActivityType.NEW_PROCESSING_ISSUES, data={ ""issues"": get_issues_data(), ""reprocessing_active"": True, }, ) ) with self.tasks(): notification.send() attachment, text = get_attachment() assert ( text == f""Processing issues on "" ) assert ( attachment[""text""] == f""Some events failed to process in your project {self.project.slug}"" ) assert ( attachment[""footer""] == f""{self.project.slug} | "" ) "," Test that a Slack message is sent with the expected payload when an issue is held back in reprocessing ",19,57,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_new_processing_issue(self, mock_func): notification = NewProcessingIssuesActivityNotification( Activity( project=self.project, user=self.user, type=ActivityType.NEW_PROCESSING_ISSUES, data={ ""issues"": get_issues_data(), ""reprocessing_active"": True, }, ) ) with self.tasks(): notification.send() attachment, text = get_attachment() assert ( text == f""Processing issues on "" ) assert ( attachment[""text""] == f""Some events failed to process in your project {self.project.slug}"" ) assert ( attachment[""footer""] == f""{self.project.slug} | "" ) ``` ###Assistant : Test that a Slack message is sent with the expected payload when an issue is held back in reprocessing " 1384,"def load_array(data_arrays, batch_size, is_train=True): dataset = data.TensorDataset(*data_arrays) return data.DataLoader(dataset, batch_size, shuffle=is_train) ","Construct a PyTorch data iterator. Defined in :numref:`sec_linear_concise`",8,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_array(data_arrays, batch_size, is_train=True): dataset = data.TensorDataset(*data_arrays) return data.DataLoader(dataset, batch_size, shuffle=is_train) ``` ###Assistant : Construct a PyTorch data iterator. Defined in :numref:`sec_linear_concise` " 1385,"def is_permanent_redirect(self): return ""location"" in self.headers and self.status_code in ( codes.moved_permanently, codes.permanent_redirect, ) ",True if this Response one of the permanent versions of redirect.,11,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_permanent_redirect(self): return ""location"" in self.headers and self.status_code in ( codes.moved_permanently, codes.permanent_redirect, ) ``` ###Assistant : True if this Response one of the permanent versions of redirect. " 1386,"def _estimate_step_duration(self, current, now): if current: # there are a few special scenarios here: # 1) somebody is calling the progress bar without ever supplying # step 1 # 2) somebody is calling the progress bar and supplies step one # multiple times, e.g. as part of a finalizing call # in these cases, we just fall back to the simple calculation if self._time_after_first_step is not None and current > 1: time_per_unit = (now - self._time_after_first_step) / ( current - 1 ) else: time_per_unit = (now - self._start) / current if current == 1: self._time_after_first_step = now return time_per_unit else: return 0 ","Estimate the duration of a single step. Given the step number `current` and the corresponding time `now` this function returns an estimate for how long a single step takes. If this is called before one step has been completed (i.e. `current == 0`) then zero is given as an estimate. The duration estimate ignores the duration of the (assumed to be non-representative) first step for estimates when more steps are available (i.e. `current>1`). Args: current: Index of current step. now: The current time. Returns: Estimate of the duration of a single step. ",92,102,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _estimate_step_duration(self, current, now): if current: # there are a few special scenarios here: # 1) somebody is calling the progress bar without ever supplying # step 1 # 2) somebody is calling the progress bar and supplies step one # multiple times, e.g. as part of a finalizing call # in these cases, we just fall back to the simple calculation if self._time_after_first_step is not None and current > 1: time_per_unit = (now - self._time_after_first_step) / ( current - 1 ) else: time_per_unit = (now - self._start) / current if current == 1: self._time_after_first_step = now return time_per_unit else: return 0 ``` ###Assistant : Estimate the duration of a single step. Given the step number `current` and the corresponding time `now` this function returns an estimate for how long a single step takes. If this is called before one step has been completed (i.e. `current == 0`) then zero is given as an estimate. The duration estimate ignores the duration of the (assumed to be non-representative) first step for estimates when more steps are available (i.e. `current>1`). Args: current: Index of current step. now: The current time. Returns: Estimate of the duration of a single step. " 1387,"def _propagate_index_objs(self, axis=None): self._filter_empties() if axis is None or axis == 0: cum_row_lengths = np.cumsum([0] + self._row_lengths) if axis is None or axis == 1: cum_col_widths = np.cumsum([0] + self._column_widths) if axis is None: "," Synchronize labels by applying the index object for specific `axis` to the `self._partitions` lazily. Adds `set_axis` function to call-queue of each partition from `self._partitions` to apply new axis. Parameters ---------- axis : int, default: None The axis to apply to. If it's None applies to both axes. ",47,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _propagate_index_objs(self, axis=None): self._filter_empties() if axis is None or axis == 0: cum_row_lengths = np.cumsum([0] + self._row_lengths) if axis is None or axis == 1: cum_col_widths = np.cumsum([0] + self._column_widths) if axis is None: ``` ###Assistant : Synchronize labels by applying the index object for specific `axis` to the `self._partitions` lazily. Adds `set_axis` function to call-queue of each partition from `self._partitions` to apply new axis. Parameters ---------- axis : int, default: None The axis to apply to. If it's None applies to both axes. " 1388,"def test_query_devices_remote_no_sync(self) -> None: remote_user_id = ""@test:other"" local_user_id = ""@test:test"" remote_master_key = ""85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY"" remote_self_signing_key = ""QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ"" self.hs.get_federation_client().query_client_keys = mock.Mock( return_value=defer.succeed( { ""device_keys"": {remote_user_id: {}}, ""master_keys"": { remote_user_id: { ""user_id"": remote_user_id, ""usage"": [""master""], ""keys"": {""ed25519:"" + remote_master_key: remote_master_key}, }, }, ""self_signing_keys"": { remote_user_id: { ""user_id"": remote_user_id, ""usage"": [""self_signing""], ""keys"": { ""ed25519:"" + remote_self_signing_key: remote_self_signing_key }, } }, } ) ) e2e_handler = self.hs.get_e2e_keys_handler() query_result = self.get_success( e2e_handler.query_devices( { ""device_keys"": {remote_user_id: []}, }, timeout=10, from_user_id=local_user_id, from_device_id=""some_device_id"", ) ) self.assertEqual(query_result[""failures""], {}) self.assertEqual( query_result[""master_keys""], { remote_user_id: { ""user_id"": remote_user_id, ""usage"": [""master""], ""keys"": {""ed25519:"" + remote_master_key: remote_master_key}, }, }, ) self.assertEqual( query_result[""self_signing_keys""], { remote_user_id: { ""user_id"": remote_user_id, ""usage"": [""self_signing""], ""keys"": { ""ed25519:"" + remote_self_signing_key: remote_self_signing_key }, } }, ) ","Tests that querying keys for a remote user that we don't share a room with returns the cross signing keys correctly. ",21,114,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_query_devices_remote_no_sync(self) -> None: remote_user_id = ""@test:other"" local_user_id = ""@test:test"" remote_master_key = ""85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY"" remote_self_signing_key = ""QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ"" self.hs.get_federation_client().query_client_keys = mock.Mock( return_value=defer.succeed( { ""device_keys"": {remote_user_id: {}}, ""master_keys"": { remote_user_id: { ""user_id"": remote_user_id, ""usage"": [""master""], ""keys"": {""ed25519:"" + remote_master_key: remote_master_key}, }, }, ""self_signing_keys"": { remote_user_id: { ""user_id"": remote_user_id, ""usage"": [""self_signing""], ""keys"": { ""ed25519:"" + remote_self_signing_key: remote_self_signing_key }, } }, } ) ) e2e_handler = self.hs.get_e2e_keys_handler() query_result = self.get_success( e2e_handler.query_devices( { ""device_keys"": {remote_user_id: []}, }, timeout=10, from_user_id=local_user_id, from_device_id=""some_device_id"", ) ) self.assertEqual(query_result[""failures""], {}) self.assertEqual( query_result[""master_keys""], { remote_user_id: { ""user_id"": remote_user_id, ""usage"": [""master""], ""keys"": {""ed25519:"" + remote_master_key: remote_master_key}, }, }, ) self.assertEqual( query_result[""self_signing_keys""], { remote_user_id: { ""user_id"": remote_user_id, ""usage"": [""self_signing""], ""keys"": { ""ed25519:"" + remote_self_signing_key: remote_self_signing_key }, } }, ) ``` ###Assistant : Tests that querying keys for a remote user that we don't share a room with returns the cross signing keys correctly. " 1389,"def _matches_get_other_nodes(dictionary, nodes, node_ind): ind_node = nodes[node_ind] return [ind for ind in dictionary if nodes[ind] == ind_node] ",Find other wildcards that may have already been matched.,9,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _matches_get_other_nodes(dictionary, nodes, node_ind): ind_node = nodes[node_ind] return [ind for ind in dictionary if nodes[ind] == ind_node] ``` ###Assistant : Find other wildcards that may have already been matched. " 1390,"def save_pretrained(self, save_directory): for attribute_name in self.attributes: attribute = getattr(self, attribute_name) # Include the processor class in the attribute config so this processor can then be reloaded with the # `AutoProcessor` API. if hasattr(attribute, ""_set_processor_class""): attribute._set_processor_class(self.__class__.__name__) attribute.save_pretrained(save_directory) "," Saves the attributes of this processor (feature extractor, tokenizer...) in the specified directory so that it can be reloaded using the [`~ProcessorMixin.from_pretrained`] method. This class method is simply calling [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] and [`~tokenization_utils_base.PreTrainedTokenizer.save_pretrained`]. Please refer to the docstrings of the methods above for more information. Args: save_directory (`str` or `os.PathLike`): Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist). ",74,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save_pretrained(self, save_directory): for attribute_name in self.attributes: attribute = getattr(self, attribute_name) # Include the processor class in the attribute config so this processor can then be reloaded with the # `AutoProcessor` API. if hasattr(attribute, ""_set_processor_class""): attribute._set_processor_class(self.__class__.__name__) attribute.save_pretrained(save_directory) ``` ###Assistant : Saves the attributes of this processor (feature extractor, tokenizer...) in the specified directory so that it can be reloaded using the [`~ProcessorMixin.from_pretrained`] method. This class method is simply calling [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] and [`~tokenization_utils_base.PreTrainedTokenizer.save_pretrained`]. Please refer to the docstrings of the methods above for more information. Args: save_directory (`str` or `os.PathLike`): Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will be created if it does not exist). " 1391,"def _user_has_module_perms(user, app_label): for backend in auth.get_backends(): if not hasattr(backend, ""has_module_perms""): continue try: if backend.has_module_perms(user, app_label): return True except PermissionDenied: return False return False ",Backend can raise `PermissionDenied` to short-circuit permission checking.,8,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _user_has_module_perms(user, app_label): for backend in auth.get_backends(): if not hasattr(backend, ""has_module_perms""): continue try: if backend.has_module_perms(user, app_label): return True except PermissionDenied: return False return False ``` ###Assistant : Backend can raise `PermissionDenied` to short-circuit permission checking. " 1392,"def get_default_address(out, name): shipping_addresses = frappe.db.sql( , (name), as_dict=1, ) if shipping_addresses: for out.shipping_address in shipping_addresses: if out.shipping_address.is_shipping_address: return out.shipping_address out.shipping_address = shipping_addresses[0] return out.shipping_address @frappe.whitelist()"," SELECT parent, (SELECT is_shipping_address FROM tabAddress a WHERE a.name=dl.parent) AS is_shipping_address FROM `tabDynamic Link` dl WHERE dl.link_doctype=""Customer"" AND dl.link_name=%s AND dl.parenttype = ""Address"" ",23,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_default_address(out, name): shipping_addresses = frappe.db.sql( , (name), as_dict=1, ) if shipping_addresses: for out.shipping_address in shipping_addresses: if out.shipping_address.is_shipping_address: return out.shipping_address out.shipping_address = shipping_addresses[0] return out.shipping_address @frappe.whitelist() ``` ###Assistant : SELECT parent, (SELECT is_shipping_address FROM tabAddress a WHERE a.name=dl.parent) AS is_shipping_address FROM `tabDynamic Link` dl WHERE dl.link_doctype=""Customer"" AND dl.link_name=%s AND dl.parenttype = ""Address"" " 1393,"def force_list(elements=None, to_tuple=False): ctor = list if to_tuple is True: ctor = tuple return ctor() if elements is None else ctor(elements) \ if type(elements) in [list, set, tuple] else ctor([elements]) "," Makes sure `elements` is returned as a list, whether `elements` is a single item, already a list, or a tuple. Args: elements (Optional[any]): The inputs as single item, list, or tuple to be converted into a list/tuple. If None, returns empty list/tuple. to_tuple (bool): Whether to use tuple (instead of list). Returns: Union[list,tuple]: All given elements in a list/tuple depending on `to_tuple`'s value. If elements is None, returns an empty list/tuple. ",71,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def force_list(elements=None, to_tuple=False): ctor = list if to_tuple is True: ctor = tuple return ctor() if elements is None else ctor(elements) \ if type(elements) in [list, set, tuple] else ctor([elements]) ``` ###Assistant : Makes sure `elements` is returned as a list, whether `elements` is a single item, already a list, or a tuple. Args: elements (Optional[any]): The inputs as single item, list, or tuple to be converted into a list/tuple. If None, returns empty list/tuple. to_tuple (bool): Whether to use tuple (instead of list). Returns: Union[list,tuple]: All given elements in a list/tuple depending on `to_tuple`'s value. If elements is None, returns an empty list/tuple. " 1394,"def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): if order == 3: K = steps // 3 + 1 if steps % 3 == 0: orders = [3, ] * (K - 2) + [2, 1] elif steps % 3 == 1: orders = [3, ] * (K - 1) + [1] else: orders = [3, ] * (K - 1) + [2] elif order == 2: if steps % 2 == 0: K = steps // 2 orders = [2, ] * K else: K = steps // 2 + 1 orders = [2, ] * (K - 1) + [1] elif order == 1: K = 1 orders = [1, ] * steps else: raise ValueError(""'order' must be '1' or '2' or '3'."") if skip_type == 'logSNR': # To reproduce the results in DPM-Solver paper timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) else: timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[ torch.cumsum(torch.tensor([0, ] + orders)).to(device)] return timesteps_outer, orders "," Get the order of each step for sampling by the singlestep DPM-Solver. We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as ""DPM-Solver-fast"". Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is: - If order == 1: We take `steps` of DPM-Solver-1 (i.e. DDIM). - If order == 2: - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling. - If steps % 2 == 0, we use K steps of DPM-Solver-2. - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1. - If order == 3: - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1. - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1. - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2. ============================================ Args: order: A `int`. The max order for the solver (2 or 3). steps: A `int`. The total number of function evaluations (NFE). skip_type: A `str`. The type for the spacing of the time steps. We support three types: - 'logSNR': uniform logSNR for the time steps. - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) t_T: A `float`. The starting time of the sampling (default is T). t_0: A `float`. The ending time of the sampling (default is epsilon). device: A torch device. Returns: orders: A list of the solver order of each step. ",309,159,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): if order == 3: K = steps // 3 + 1 if steps % 3 == 0: orders = [3, ] * (K - 2) + [2, 1] elif steps % 3 == 1: orders = [3, ] * (K - 1) + [1] else: orders = [3, ] * (K - 1) + [2] elif order == 2: if steps % 2 == 0: K = steps // 2 orders = [2, ] * K else: K = steps // 2 + 1 orders = [2, ] * (K - 1) + [1] elif order == 1: K = 1 orders = [1, ] * steps else: raise ValueError(""'order' must be '1' or '2' or '3'."") if skip_type == 'logSNR': # To reproduce the results in DPM-Solver paper timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) else: timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[ torch.cumsum(torch.tensor([0, ] + orders)).to(device)] return timesteps_outer, orders ``` ###Assistant : Get the order of each step for sampling by the singlestep DPM-Solver. We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as ""DPM-Solver-fast"". Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is: - If order == 1: We take `steps` of DPM-Solver-1 (i.e. DDIM). - If order == 2: - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling. - If steps % 2 == 0, we use K steps of DPM-Solver-2. - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1. - If order == 3: - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1. - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1. - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2. ============================================ Args: order: A `int`. The max order for the solver (2 or 3). steps: A `int`. The total number of function evaluations (NFE). skip_type: A `str`. The type for the spacing of the time steps. We support three types: - 'logSNR': uniform logSNR for the time steps. - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) t_T: A `float`. The starting time of the sampling (default is T). t_0: A `float`. The ending time of the sampling (default is epsilon). device: A torch device. Returns: orders: A list of the solver order of each step. " 1395,"def _aligned_zeros(shape, dtype=float, order=""C"", align=None): dtype = np.dtype(dtype) if dtype == np.dtype(object): # Can't do this, fall back to standard allocation (which # should always be sufficiently aligned) if align is not None: raise ValueError(""object array alignment not supported"") return np.zeros(shape, dtype=dtype, order=order) if align is None: align = dtype.alignment if not hasattr(shape, '__len__'): shape = (shape,) size = functools.reduce(operator.mul, shape) * dtype.itemsize buf = np.empty(size + 2*align + 1, np.uint8) ptr = buf.__array_interface__['data'][0] offset = ptr % align if offset != 0: offset = align - offset if (ptr % (2*align)) == 0: offset += align # Note: slices producing 0-size arrays do not necessarily change # data pointer --- so we use and allocate size+1 buf = buf[offset:offset+size+1][:-1] buf.fill(0) data = np.ndarray(shape, dtype, buf, order=order) return data "," Allocate a new ndarray with aligned memory. The ndarray is guaranteed *not* aligned to twice the requested alignment. Eg, if align=4, guarantees it is not aligned to 8. If align=None uses dtype.alignment.",32,129,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _aligned_zeros(shape, dtype=float, order=""C"", align=None): dtype = np.dtype(dtype) if dtype == np.dtype(object): # Can't do this, fall back to standard allocation (which # should always be sufficiently aligned) if align is not None: raise ValueError(""object array alignment not supported"") return np.zeros(shape, dtype=dtype, order=order) if align is None: align = dtype.alignment if not hasattr(shape, '__len__'): shape = (shape,) size = functools.reduce(operator.mul, shape) * dtype.itemsize buf = np.empty(size + 2*align + 1, np.uint8) ptr = buf.__array_interface__['data'][0] offset = ptr % align if offset != 0: offset = align - offset if (ptr % (2*align)) == 0: offset += align # Note: slices producing 0-size arrays do not necessarily change # data pointer --- so we use and allocate size+1 buf = buf[offset:offset+size+1][:-1] buf.fill(0) data = np.ndarray(shape, dtype, buf, order=order) return data ``` ###Assistant : Allocate a new ndarray with aligned memory. The ndarray is guaranteed *not* aligned to twice the requested alignment. Eg, if align=4, guarantees it is not aligned to 8. If align=None uses dtype.alignment. " 1396,"def smart_resize(x, size, interpolation='bilinear'): if len(size) != 2: raise ValueError('Expected `size` to be a tuple of 2 integers, ' f'but got: {size}.') img = tf.convert_to_tensor(x) if img.shape.rank is not None: if img.shape.rank < 3 or img.shape.rank > 4: raise ValueError( 'Expected an image array with shape `(height, width, channels)`, ' 'or `(batch_size, height, width, channels)`, but ' f'got input with incorrect rank, of shape {img.shape}.') shape = tf.shape(img) height, width = shape[-3], shape[-2] target_height, target_width = size if img.shape.rank is not None: static_num_channels = img.shape[-1] else: static_num_channels = None crop_height = tf.cast( tf.cast(width * target_height, 'float32') / target_width, 'int32') crop_width = tf.cast( tf.cast(height * target_width, 'float32') / target_height, 'int32') # Set back to input height / width if crop_height / crop_width is not smaller. crop_height = tf.minimum(height, crop_height) crop_width = tf.minimum(width, crop_width) crop_box_hstart = tf.cast( tf.cast(height - crop_height, 'float32') / 2, 'int32') crop_box_wstart = tf.cast(tf.cast(width - crop_width, 'float32') / 2, 'int32') if img.shape.rank == 4: crop_box_start = tf.stack([0, crop_box_hstart, crop_box_wstart, 0]) crop_box_size = tf.stack([-1, crop_height, crop_width, -1]) else: crop_box_start = tf.stack([crop_box_hstart, crop_box_wstart, 0]) crop_box_size = tf.stack([crop_height, crop_width, -1]) img = tf.slice(img, crop_box_start, crop_box_size) img = tf.image.resize(images=img, size=size, method=interpolation) # Apparent bug in resize_images_v2 may cause shape to be lost if img.shape.rank is not None: if img.shape.rank == 4: img.set_shape((None, None, None, static_num_channels)) if img.shape.rank == 3: img.set_shape((None, None, static_num_channels)) if isinstance(x, np.ndarray): return img.numpy() return img @keras_export('keras.utils.array_to_img', 'keras.preprocessing.image.array_to_img')","Resize images to a target size without aspect ratio distortion. TensorFlow image datasets typically yield images that have each a different size. However, these images need to be batched before they can be processed by Keras layers. To be batched, images need to share the same height and width. You could simply do: ```python size = (200, 200) ds = ds.map(lambda img: tf.image.resize(img, size)) ``` However, if you do this, you distort the aspect ratio of your images, since in general they do not all have the same aspect ratio as `size`. This is fine in many cases, but not always (e.g. for GANs this can be a problem). Note that passing the argument `preserve_aspect_ratio=True` to `resize` will preserve the aspect ratio, but at the cost of no longer respecting the provided target size. Because `tf.image.resize` doesn't crop images, your output images will still have different sizes. This calls for: ```python size = (200, 200) ds = ds.map(lambda img: smart_resize(img, size)) ``` Your output images will actually be `(200, 200)`, and will not be distorted. Instead, the parts of the image that do not fit within the target size get cropped out. The resizing process is: 1. Take the largest centered crop of the image that has the same aspect ratio as the target size. For instance, if `size=(200, 200)` and the input image has size `(340, 500)`, we take a crop of `(340, 340)` centered along the width. 2. Resize the cropped image to the target size. In the example above, we resize the `(340, 340)` crop to `(200, 200)`. Args: x: Input image or batch of images (as a tensor or NumPy array). Must be in format `(height, width, channels)` or `(batch_size, height, width, channels)`. size: Tuple of `(height, width)` integer. Target size. interpolation: String, interpolation to use for resizing. Defaults to `'bilinear'`. Supports `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`. Returns: Array with shape `(size[0], size[1], channels)`. If the input image was a NumPy array, the output is a NumPy array, and if it was a TF tensor, the output is a TF tensor. ",348,228,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def smart_resize(x, size, interpolation='bilinear'): if len(size) != 2: raise ValueError('Expected `size` to be a tuple of 2 integers, ' f'but got: {size}.') img = tf.convert_to_tensor(x) if img.shape.rank is not None: if img.shape.rank < 3 or img.shape.rank > 4: raise ValueError( 'Expected an image array with shape `(height, width, channels)`, ' 'or `(batch_size, height, width, channels)`, but ' f'got input with incorrect rank, of shape {img.shape}.') shape = tf.shape(img) height, width = shape[-3], shape[-2] target_height, target_width = size if img.shape.rank is not None: static_num_channels = img.shape[-1] else: static_num_channels = None crop_height = tf.cast( tf.cast(width * target_height, 'float32') / target_width, 'int32') crop_width = tf.cast( tf.cast(height * target_width, 'float32') / target_height, 'int32') # Set back to input height / width if crop_height / crop_width is not smaller. crop_height = tf.minimum(height, crop_height) crop_width = tf.minimum(width, crop_width) crop_box_hstart = tf.cast( tf.cast(height - crop_height, 'float32') / 2, 'int32') crop_box_wstart = tf.cast(tf.cast(width - crop_width, 'float32') / 2, 'int32') if img.shape.rank == 4: crop_box_start = tf.stack([0, crop_box_hstart, crop_box_wstart, 0]) crop_box_size = tf.stack([-1, crop_height, crop_width, -1]) else: crop_box_start = tf.stack([crop_box_hstart, crop_box_wstart, 0]) crop_box_size = tf.stack([crop_height, crop_width, -1]) img = tf.slice(img, crop_box_start, crop_box_size) img = tf.image.resize(images=img, size=size, method=interpolation) # Apparent bug in resize_images_v2 may cause shape to be lost if img.shape.rank is not None: if img.shape.rank == 4: img.set_shape((None, None, None, static_num_channels)) if img.shape.rank == 3: img.set_shape((None, None, static_num_channels)) if isinstance(x, np.ndarray): return img.numpy() return img @keras_export('keras.utils.array_to_img', 'keras.preprocessing.image.array_to_img') ``` ###Assistant : Resize images to a target size without aspect ratio distortion. TensorFlow image datasets typically yield images that have each a different size. However, these images need to be batched before they can be processed by Keras layers. To be batched, images need to share the same height and width. You could simply do: ```python size = (200, 200) ds = ds.map(lambda img: tf.image.resize(img, size)) ``` However, if you do this, you distort the aspect ratio of your images, since in general they do not all have the same aspect ratio as `size`. This is fine in many cases, but not always (e.g. for GANs this can be a problem). Note that passing the argument `preserve_aspect_ratio=True` to `resize` will preserve the aspect ratio, but at the cost of no longer respecting the provided target size. Because `tf.image.resize` doesn't crop images, your output images will still have different sizes. This calls for: ```python size = (200, 200) ds = ds.map(lambda img: smart_resize(img, size)) ``` Your output images will actually be `(200, 200)`, and will not be distorted. Instead, the parts of the image that do not fit within the target size get cropped out. The resizing process is: 1. Take the largest centered crop of the image that has the same aspect ratio as the target size. For instance, if `size=(200, 200)` and the input image has size `(340, 500)`, we take a crop of `(340, 340)` centered along the width. 2. Resize the cropped image to the target size. In the example above, we resize the `(340, 340)` crop to `(200, 200)`. Args: x: Input image or batch of images (as a tensor or NumPy array). Must be in format `(height, width, channels)` or `(batch_size, height, width, channels)`. size: Tuple of `(height, width)` integer. Target size. interpolation: String, interpolation to use for resizing. Defaults to `'bilinear'`. Supports `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`. Returns: Array with shape `(size[0], size[1], channels)`. If the input image was a NumPy array, the output is a NumPy array, and if it was a TF tensor, the output is a TF tensor. " 1397,"def gather(tensor): if AcceleratorState().distributed_type == DistributedType.TPU: return _tpu_gather(tensor, name=""accelerate.utils.gather"") elif AcceleratorState().distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]: return _gpu_gather(tensor) elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU: return _cpu_gather(tensor) else: return tensor "," Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices. Args: tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`): The data to gather. Returns: The same data structure as :obj:`tensor` with all tensors sent to the proper device. ",37,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def gather(tensor): if AcceleratorState().distributed_type == DistributedType.TPU: return _tpu_gather(tensor, name=""accelerate.utils.gather"") elif AcceleratorState().distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]: return _gpu_gather(tensor) elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU: return _cpu_gather(tensor) else: return tensor ``` ###Assistant : Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices. Args: tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`): The data to gather. Returns: The same data structure as :obj:`tensor` with all tensors sent to the proper device. " 1398,"def speed_metrics(split, start_time, num_samples=None, num_steps=None): runtime = time.time() - start_time result = {f""{split}_runtime"": round(runtime, 4)} if num_samples is not None: samples_per_second = num_samples / runtime result[f""{split}_samples_per_second""] = round(samples_per_second, 3) if num_steps is not None: steps_per_second = num_steps / runtime result[f""{split}_steps_per_second""] = round(steps_per_second, 3) return result "," Measure and return speed performance metrics. This function requires a time snapshot `start_time` before the operation to be measured starts and this function should be run immediately after the operation to be measured has completed. Args: - split: name to prefix metric (like train, eval, test...) - start_time: operation start time - num_samples: number of samples processed ",57,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def speed_metrics(split, start_time, num_samples=None, num_steps=None): runtime = time.time() - start_time result = {f""{split}_runtime"": round(runtime, 4)} if num_samples is not None: samples_per_second = num_samples / runtime result[f""{split}_samples_per_second""] = round(samples_per_second, 3) if num_steps is not None: steps_per_second = num_steps / runtime result[f""{split}_steps_per_second""] = round(steps_per_second, 3) return result ``` ###Assistant : Measure and return speed performance metrics. This function requires a time snapshot `start_time` before the operation to be measured starts and this function should be run immediately after the operation to be measured has completed. Args: - split: name to prefix metric (like train, eval, test...) - start_time: operation start time - num_samples: number of samples processed " 1399,"def parse_date_fields(year_col, month_col, day_col) -> npt.NDArray[np.object_]: warnings.warn( , # noqa: E501 FutureWarning, stacklevel=find_stack_level(), ) year_col = _maybe_cast(year_col) month_col = _maybe_cast(month_col) day_col = _maybe_cast(day_col) return parsing.try_parse_year_month_day(year_col, month_col, day_col) "," Parse columns with years, months and days into a single date column. .. deprecated:: 1.2 Use pd.to_datetime({""year"": year_col, ""month"": month_col, ""day"": day_col}) instead to get a Pandas Series. Use ser = pd.to_datetime({""year"": year_col, ""month"": month_col, ""day"": day_col}) and np.array([s.to_pydatetime() for s in ser]) instead to get a Numpy array. ",49,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parse_date_fields(year_col, month_col, day_col) -> npt.NDArray[np.object_]: warnings.warn( , # noqa: E501 FutureWarning, stacklevel=find_stack_level(), ) year_col = _maybe_cast(year_col) month_col = _maybe_cast(month_col) day_col = _maybe_cast(day_col) return parsing.try_parse_year_month_day(year_col, month_col, day_col) ``` ###Assistant : Parse columns with years, months and days into a single date column. .. deprecated:: 1.2 Use pd.to_datetime({""year"": year_col, ""month"": month_col, ""day"": day_col}) instead to get a Pandas Series. Use ser = pd.to_datetime({""year"": year_col, ""month"": month_col, ""day"": day_col}) and np.array([s.to_pydatetime() for s in ser]) instead to get a Numpy array. " 1400,"def test_poisson_vs_mse(): rng = np.random.RandomState(42) n_train, n_test, n_features = 500, 500, 10 X = datasets.make_low_rank_matrix( n_samples=n_train + n_test, n_features=n_features, random_state=rng ) # We create a log-linear Poisson model and downscale coef as it will get # exponentiated. coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0) y = rng.poisson(lam=np.exp(X @ coef)) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=n_test, random_state=rng ) # We prevent some overfitting by setting min_samples_split=10. forest_poi = RandomForestRegressor( criterion=""poisson"", min_samples_leaf=10, max_features=""sqrt"", random_state=rng ) forest_mse = RandomForestRegressor( criterion=""squared_error"", min_samples_leaf=10, max_features=""sqrt"", random_state=rng, ) forest_poi.fit(X_train, y_train) forest_mse.fit(X_train, y_train) dummy = DummyRegressor(strategy=""mean"").fit(X_train, y_train) for X, y, val in [(X_train, y_train, ""train""), (X_test, y_test, ""test"")]: metric_poi = mean_poisson_deviance(y, forest_poi.predict(X)) # squared_error forest might produce non-positive predictions => clip # If y = 0 for those, the poisson deviance gets too good. # If we drew more samples, we would eventually get y > 0 and the # poisson deviance would explode, i.e. be undefined. Therefore, we do # not clip to a tiny value like 1e-15, but to 1e-6. This acts like a # small penalty to the non-positive predictions. metric_mse = mean_poisson_deviance( y, np.clip(forest_mse.predict(X), 1e-6, None) ) metric_dummy = mean_poisson_deviance(y, dummy.predict(X)) # As squared_error might correctly predict 0 in train set, its train # score can be better than Poisson. This is no longer the case for the # test set. But keep the above comment for clipping in mind. if val == ""test"": assert metric_poi < metric_mse assert metric_poi < 0.5 * metric_dummy @pytest.mark.parametrize(""criterion"", (""poisson"", ""squared_error""))","Test that random forest with poisson criterion performs better than mse for a poisson target. There is a similar test for DecisionTreeRegressor. ",22,247,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_poisson_vs_mse(): rng = np.random.RandomState(42) n_train, n_test, n_features = 500, 500, 10 X = datasets.make_low_rank_matrix( n_samples=n_train + n_test, n_features=n_features, random_state=rng ) # We create a log-linear Poisson model and downscale coef as it will get # exponentiated. coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0) y = rng.poisson(lam=np.exp(X @ coef)) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=n_test, random_state=rng ) # We prevent some overfitting by setting min_samples_split=10. forest_poi = RandomForestRegressor( criterion=""poisson"", min_samples_leaf=10, max_features=""sqrt"", random_state=rng ) forest_mse = RandomForestRegressor( criterion=""squared_error"", min_samples_leaf=10, max_features=""sqrt"", random_state=rng, ) forest_poi.fit(X_train, y_train) forest_mse.fit(X_train, y_train) dummy = DummyRegressor(strategy=""mean"").fit(X_train, y_train) for X, y, val in [(X_train, y_train, ""train""), (X_test, y_test, ""test"")]: metric_poi = mean_poisson_deviance(y, forest_poi.predict(X)) # squared_error forest might produce non-positive predictions => clip # If y = 0 for those, the poisson deviance gets too good. # If we drew more samples, we would eventually get y > 0 and the # poisson deviance would explode, i.e. be undefined. Therefore, we do # not clip to a tiny value like 1e-15, but to 1e-6. This acts like a # small penalty to the non-positive predictions. metric_mse = mean_poisson_deviance( y, np.clip(forest_mse.predict(X), 1e-6, None) ) metric_dummy = mean_poisson_deviance(y, dummy.predict(X)) # As squared_error might correctly predict 0 in train set, its train # score can be better than Poisson. This is no longer the case for the # test set. But keep the above comment for clipping in mind. if val == ""test"": assert metric_poi < metric_mse assert metric_poi < 0.5 * metric_dummy @pytest.mark.parametrize(""criterion"", (""poisson"", ""squared_error"")) ``` ###Assistant : Test that random forest with poisson criterion performs better than mse for a poisson target. There is a similar test for DecisionTreeRegressor. " 1401,"def _external_caller_info(): frame = inspect.currentframe() caller = frame levels = 0 while caller.f_code.co_filename == __file__: caller = caller.f_back levels += 1 return { ""lineno"": caller.f_lineno, ""filename"": os.path.basename(caller.f_code.co_filename), } ","Get the info from the caller frame. Used to override the logging function and line number with the correct ones. See the comment on _patched_makeRecord for more info. ",28,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _external_caller_info(): frame = inspect.currentframe() caller = frame levels = 0 while caller.f_code.co_filename == __file__: caller = caller.f_back levels += 1 return { ""lineno"": caller.f_lineno, ""filename"": os.path.basename(caller.f_code.co_filename), } ``` ###Assistant : Get the info from the caller frame. Used to override the logging function and line number with the correct ones. See the comment on _patched_makeRecord for more info. " 1402,"def _try_restart_fedora(self) -> None: try: util.run_script(['systemctl', 'restart', 'httpd']) except errors.SubprocessError as err: raise errors.MisconfigurationError(str(err)) # Finish with actual config check to see if systemctl restart helped super().config_test() "," Tries to restart httpd using systemctl to generate the self signed key pair. ",13,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _try_restart_fedora(self) -> None: try: util.run_script(['systemctl', 'restart', 'httpd']) except errors.SubprocessError as err: raise errors.MisconfigurationError(str(err)) # Finish with actual config check to see if systemctl restart helped super().config_test() ``` ###Assistant : Tries to restart httpd using systemctl to generate the self signed key pair. " 1403,"async def sock_accept(self, sock): base_events._check_ssl_socket(sock) if self._debug and sock.gettimeout() != 0: raise ValueError(""the socket must be non-blocking"") fut = self.create_future() self._sock_accept(fut, sock) return await fut ","Accept a connection. The socket must be bound to an address and listening for connections. The return value is a pair (conn, address) where conn is a new socket object usable to send and receive data on the connection, and address is the address bound to the socket on the other end of the connection. ",55,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def sock_accept(self, sock): base_events._check_ssl_socket(sock) if self._debug and sock.gettimeout() != 0: raise ValueError(""the socket must be non-blocking"") fut = self.create_future() self._sock_accept(fut, sock) return await fut ``` ###Assistant : Accept a connection. The socket must be bound to an address and listening for connections. The return value is a pair (conn, address) where conn is a new socket object usable to send and receive data on the connection, and address is the address bound to the socket on the other end of the connection. " 1404,"def test_state(self, api, state): stream = AdsInsights( api=api, start_date=datetime(2010, 1, 1), end_date=datetime(2011, 1, 1), ) assert stream.state == {} stream.state = state actual_state = stream.state actual_state[""slices""] = sorted(actual_state.get(""slices"", [])) state[""slices""] = sorted(state.get(""slices"", [])) assert actual_state == state ",State setter/getter should work with all combinations,7,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_state(self, api, state): stream = AdsInsights( api=api, start_date=datetime(2010, 1, 1), end_date=datetime(2011, 1, 1), ) assert stream.state == {} stream.state = state actual_state = stream.state actual_state[""slices""] = sorted(actual_state.get(""slices"", [])) state[""slices""] = sorted(state.get(""slices"", [])) assert actual_state == state ``` ###Assistant : State setter/getter should work with all combinations " 1405,"def get_snql_query(self) -> None: raise NotImplementedError(""get_snql_query cannot be implemented for MetricsQueryBuilder"") ","Because metrics table queries need to make multiple requests per metric type this function cannot be inmplemented see run_query",19,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_snql_query(self) -> None: raise NotImplementedError(""get_snql_query cannot be implemented for MetricsQueryBuilder"") ``` ###Assistant : Because metrics table queries need to make multiple requests per metric type this function cannot be inmplemented see run_query " 1406,"def _validate(self, params): remote_worker_stats = [w.validate.remote(**params) for w in self.remote_workers] return remote_worker_stats ",Runs validation for each worker. Returns results as promises.,9,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _validate(self, params): remote_worker_stats = [w.validate.remote(**params) for w in self.remote_workers] return remote_worker_stats ``` ###Assistant : Runs validation for each worker. Returns results as promises. " 1407,"def set_aspect(self, aspect, adjustable=None, anchor=None, share=False): _api.check_in_list(('auto', 'equal', 'equalxy', 'equalyz', 'equalxz'), aspect=aspect) super().set_aspect( aspect='auto', adjustable=adjustable, anchor=anchor, share=share) if aspect in ('equal', 'equalxy', 'equalxz', 'equalyz'): if aspect == 'equal': axis_indices = [0, 1, 2] elif aspect == 'equalxy': axis_indices = [0, 1] elif aspect == 'equalxz': axis_indices = [0, 2] elif aspect == 'equalyz': axis_indices = [1, 2] view_intervals = np.array([self.xaxis.get_view_interval(), self.yaxis.get_view_interval(), self.zaxis.get_view_interval()]) mean = np.mean(view_intervals, axis=1) delta = np.max(np.ptp(view_intervals, axis=1)) deltas = delta * self._box_aspect / min(self._box_aspect) for i, set_lim in enumerate((self.set_xlim3d, self.set_ylim3d, self.set_zlim3d)): if i in axis_indices: set_lim(mean[i] - deltas[i]/2., mean[i] + deltas[i]/2.) "," Set the aspect ratios. Parameters ---------- aspect : {'auto', 'equal', 'equalxy', 'equalxz', 'equalyz'} Possible values: ========= ================================================== value description ========= ================================================== 'auto' automatic; fill the position rectangle with data. 'equal' adapt all the axes to have equal aspect ratios. 'equalxy' adapt the x and y axes to have equal aspect ratios. 'equalxz' adapt the x and z axes to have equal aspect ratios. 'equalyz' adapt the y and z axes to have equal aspect ratios. ========= ================================================== adjustable : None Currently ignored by Axes3D If not *None*, this defines which parameter will be adjusted to meet the required aspect. See `.set_adjustable` for further details. anchor : None or str or 2-tuple of float, optional If not *None*, this defines where the Axes will be drawn if there is extra space due to aspect constraints. The most common way to to specify the anchor are abbreviations of cardinal directions: ===== ===================== value description ===== ===================== 'C' centered 'SW' lower left corner 'S' middle of bottom edge 'SE' lower right corner etc. ===== ===================== See `~.Axes.set_anchor` for further details. share : bool, default: False If ``True``, apply the settings to all shared Axes. See Also -------- mpl_toolkits.mplot3d.axes3d.Axes3D.set_box_aspect ",195,94,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_aspect(self, aspect, adjustable=None, anchor=None, share=False): _api.check_in_list(('auto', 'equal', 'equalxy', 'equalyz', 'equalxz'), aspect=aspect) super().set_aspect( aspect='auto', adjustable=adjustable, anchor=anchor, share=share) if aspect in ('equal', 'equalxy', 'equalxz', 'equalyz'): if aspect == 'equal': axis_indices = [0, 1, 2] elif aspect == 'equalxy': axis_indices = [0, 1] elif aspect == 'equalxz': axis_indices = [0, 2] elif aspect == 'equalyz': axis_indices = [1, 2] view_intervals = np.array([self.xaxis.get_view_interval(), self.yaxis.get_view_interval(), self.zaxis.get_view_interval()]) mean = np.mean(view_intervals, axis=1) delta = np.max(np.ptp(view_intervals, axis=1)) deltas = delta * self._box_aspect / min(self._box_aspect) for i, set_lim in enumerate((self.set_xlim3d, self.set_ylim3d, self.set_zlim3d)): if i in axis_indices: set_lim(mean[i] - deltas[i]/2., mean[i] + deltas[i]/2.) ``` ###Assistant : Set the aspect ratios. Parameters ---------- aspect : {'auto', 'equal', 'equalxy', 'equalxz', 'equalyz'} Possible values: ========= ================================================== value description ========= ================================================== 'auto' automatic; fill the position rectangle with data. 'equal' adapt all the axes to have equal aspect ratios. 'equalxy' adapt the x and y axes to have equal aspect ratios. 'equalxz' adapt the x and z axes to have equal aspect ratios. 'equalyz' adapt the y and z axes to have equal aspect ratios. ========= ================================================== adjustable : None Currently ignored by Axes3D If not *None*, this defines which parameter will be adjusted to meet the required aspect. See `.set_adjustable` for further details. anchor : None or str or 2-tuple of float, optional If not *None*, this defines where the Axes will be drawn if there is extra space due to aspect constraints. The most common way to to specify the anchor are abbreviations of cardinal directions: ===== ===================== value description ===== ===================== 'C' centered 'SW' lower left corner 'S' middle of bottom edge 'SE' lower right corner etc. ===== ===================== See `~.Axes.set_anchor` for further details. share : bool, default: False If ``True``, apply the settings to all shared Axes. See Also -------- mpl_toolkits.mplot3d.axes3d.Axes3D.set_box_aspect " 1408,"def __getattr__(cls, name): if _is_dunder(name): raise AttributeError(name) try: return cls._member_map_[name] except KeyError: raise AttributeError(name) from None "," Return the enum member matching `name` We use __getattr__ instead of descriptors or inserting into the enum class' __dict__ in order to support `name` and `value` being both properties for enum members (which live in the class' __dict__) and enum members themselves. ",42,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __getattr__(cls, name): if _is_dunder(name): raise AttributeError(name) try: return cls._member_map_[name] except KeyError: raise AttributeError(name) from None ``` ###Assistant : Return the enum member matching `name` We use __getattr__ instead of descriptors or inserting into the enum class' __dict__ in order to support `name` and `value` being both properties for enum members (which live in the class' __dict__) and enum members themselves. " 1409,"def test_n_clusters(n_clusters): rng = np.random.RandomState(0) X = rng.rand(10, 2) bisect_means = BisectingKMeans(n_clusters=n_clusters, random_state=0) bisect_means.fit(X) assert_array_equal(np.unique(bisect_means.labels_), np.arange(n_clusters)) ","Test if resulting labels are in range [0, n_clusters - 1].",11,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_n_clusters(n_clusters): rng = np.random.RandomState(0) X = rng.rand(10, 2) bisect_means = BisectingKMeans(n_clusters=n_clusters, random_state=0) bisect_means.fit(X) assert_array_equal(np.unique(bisect_means.labels_), np.arange(n_clusters)) ``` ###Assistant : Test if resulting labels are in range [0, n_clusters - 1]. " 1410,"def test_nested_ungrouped_nav(self): nav_cfg = [ {'Home': 'index.md'}, {'Contact': 'about/contact.md'}, {'License Title': 'about/sub/license.md'}, ] expected = dedent( ) cfg = load_config(nav=nav_cfg, site_url='http://example.com/') fs = [ File(list(item.values())[0], cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls']) for item in nav_cfg ] files = Files(fs) site_navigation = get_navigation(files, cfg) self.assertEqual(str(site_navigation).strip(), expected) self.assertEqual(len(site_navigation.items), 3) self.assertEqual(len(site_navigation.pages), 3) "," Page(title='Home', url='/') Page(title='Contact', url='/about/contact/') Page(title='License Title', url='/about/sub/license/') ",7,46,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_nested_ungrouped_nav(self): nav_cfg = [ {'Home': 'index.md'}, {'Contact': 'about/contact.md'}, {'License Title': 'about/sub/license.md'}, ] expected = dedent( ) cfg = load_config(nav=nav_cfg, site_url='http://example.com/') fs = [ File(list(item.values())[0], cfg['docs_dir'], cfg['site_dir'], cfg['use_directory_urls']) for item in nav_cfg ] files = Files(fs) site_navigation = get_navigation(files, cfg) self.assertEqual(str(site_navigation).strip(), expected) self.assertEqual(len(site_navigation.items), 3) self.assertEqual(len(site_navigation.pages), 3) ``` ###Assistant : Page(title='Home', url='/') Page(title='Contact', url='/about/contact/') Page(title='License Title', url='/about/sub/license/') " 1411,"def _get_log_file_names(self, name, unique=False): if unique: log_stdout = self._make_inc_temp( suffix="".out"", prefix=name, directory_name=self._logs_dir ) log_stderr = self._make_inc_temp( suffix="".err"", prefix=name, directory_name=self._logs_dir ) else: log_stdout = os.path.join(self._logs_dir, f""{name}.out"") log_stderr = os.path.join(self._logs_dir, f""{name}.err"") return log_stdout, log_stderr ","Generate partially randomized filenames for log files. Args: name (str): descriptive string for this log file. unique (bool): if true, a counter will be attached to `name` to ensure the returned filename is not already used. Returns: A tuple of two file names for redirecting (stdout, stderr). ",47,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_log_file_names(self, name, unique=False): if unique: log_stdout = self._make_inc_temp( suffix="".out"", prefix=name, directory_name=self._logs_dir ) log_stderr = self._make_inc_temp( suffix="".err"", prefix=name, directory_name=self._logs_dir ) else: log_stdout = os.path.join(self._logs_dir, f""{name}.out"") log_stderr = os.path.join(self._logs_dir, f""{name}.err"") return log_stdout, log_stderr ``` ###Assistant : Generate partially randomized filenames for log files. Args: name (str): descriptive string for this log file. unique (bool): if true, a counter will be attached to `name` to ensure the returned filename is not already used. Returns: A tuple of two file names for redirecting (stdout, stderr). " 1412,"def yeardayscalendar(self, year, width=3): months = [ self.monthdayscalendar(year, i) for i in range(January, January+12) ] return [months[i:i+width] for i in range(0, len(months), width) ] "," Return the data for the specified year ready for formatting (similar to yeardatescalendar()). Entries in the week lists are day numbers. Day numbers outside this month are zero. ",28,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def yeardayscalendar(self, year, width=3): months = [ self.monthdayscalendar(year, i) for i in range(January, January+12) ] return [months[i:i+width] for i in range(0, len(months), width) ] ``` ###Assistant : Return the data for the specified year ready for formatting (similar to yeardatescalendar()). Entries in the week lists are day numbers. Day numbers outside this month are zero. " 1413,"def test_simplelistfilter_without_parameter(self): modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site) request = self.request_factory.get(""/"", {}) request.user = self.alfred msg = ""The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'."" with self.assertRaisesMessage(ImproperlyConfigured, msg): modeladmin.get_changelist_instance(request) "," Any SimpleListFilter must define a parameter_name. ",6,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_simplelistfilter_without_parameter(self): modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site) request = self.request_factory.get(""/"", {}) request.user = self.alfred msg = ""The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'."" with self.assertRaisesMessage(ImproperlyConfigured, msg): modeladmin.get_changelist_instance(request) ``` ###Assistant : Any SimpleListFilter must define a parameter_name. " 1414,"def from_session_or_email(cls, request, organization, email, instance=None, logger=None): invite_token, invite_member_id = get_invite_details(request) try: if invite_token and invite_member_id: om = OrganizationMember.objects.get(token=invite_token, id=invite_member_id) else: om = OrganizationMember.objects.get( email=email, organization=organization, user=None ) except OrganizationMember.DoesNotExist: # Unable to locate the pending organization member. Cannot setup # the invite helper. return None return cls( request=request, member_id=om.id, token=om.token, instance=instance, logger=logger ) "," Initializes the ApiInviteHelper by locating the pending organization member via the currently set pending invite details in the session, or via the passed email if no cookie is currently set. ",30,54,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def from_session_or_email(cls, request, organization, email, instance=None, logger=None): invite_token, invite_member_id = get_invite_details(request) try: if invite_token and invite_member_id: om = OrganizationMember.objects.get(token=invite_token, id=invite_member_id) else: om = OrganizationMember.objects.get( email=email, organization=organization, user=None ) except OrganizationMember.DoesNotExist: # Unable to locate the pending organization member. Cannot setup # the invite helper. return None return cls( request=request, member_id=om.id, token=om.token, instance=instance, logger=logger ) ``` ###Assistant : Initializes the ApiInviteHelper by locating the pending organization member via the currently set pending invite details in the session, or via the passed email if no cookie is currently set. " 1415,"async def test_multiple_bleak_scanner_instances(hass): install_multiple_bleak_catcher() instance = bleak.BleakScanner() assert isinstance(instance, HaBleakScannerWrapper) uninstall_multiple_bleak_catcher() with patch(""bleak.get_platform_scanner_backend_type""): instance = bleak.BleakScanner() assert not isinstance(instance, HaBleakScannerWrapper) ",Test creating multiple BleakScanners without an integration.,7,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_multiple_bleak_scanner_instances(hass): install_multiple_bleak_catcher() instance = bleak.BleakScanner() assert isinstance(instance, HaBleakScannerWrapper) uninstall_multiple_bleak_catcher() with patch(""bleak.get_platform_scanner_backend_type""): instance = bleak.BleakScanner() assert not isinstance(instance, HaBleakScannerWrapper) ``` ###Assistant : Test creating multiple BleakScanners without an integration. " 1416,"def highlight_string(value, highlight, trim_pre=None, trim_post=None, trim_placeholder='...'): # Split value on highlight string try: pre, match, post = re.split(fr'({highlight})', value, maxsplit=1, flags=re.IGNORECASE) except ValueError: # Match not found return escape(value) # Trim pre/post sections to length if trim_pre and len(pre) > trim_pre: pre = trim_placeholder + pre[-trim_pre:] if trim_post and len(post) > trim_post: post = post[:trim_post] + trim_placeholder return f'{escape(pre)}{escape(match)}{escape(post)}' "," Highlight a string within a string and optionally trim the pre/post portions of the original string. ",16,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def highlight_string(value, highlight, trim_pre=None, trim_post=None, trim_placeholder='...'): # Split value on highlight string try: pre, match, post = re.split(fr'({highlight})', value, maxsplit=1, flags=re.IGNORECASE) except ValueError: # Match not found return escape(value) # Trim pre/post sections to length if trim_pre and len(pre) > trim_pre: pre = trim_placeholder + pre[-trim_pre:] if trim_post and len(post) > trim_post: post = post[:trim_post] + trim_placeholder return f'{escape(pre)}{escape(match)}{escape(post)}' ``` ###Assistant : Highlight a string within a string and optionally trim the pre/post portions of the original string. " 1417,"def node_table(self): self._check_connected() node_table = self.global_state_accessor.get_node_table() results = [] for node_info_item in node_table: item = gcs_utils.GcsNodeInfo.FromString(node_info_item) node_info = { ""NodeID"": ray._private.utils.binary_to_hex(item.node_id), ""Alive"": item.state == gcs_utils.GcsNodeInfo.GcsNodeState.Value(""ALIVE""), ""NodeManagerAddress"": item.node_manager_address, ""NodeManagerHostname"": item.node_manager_hostname, ""NodeManagerPort"": item.node_manager_port, ""ObjectManagerPort"": item.object_manager_port, ""ObjectStoreSocketName"": item.object_store_socket_name, ""RayletSocketName"": item.raylet_socket_name, ""MetricsExportPort"": item.metrics_export_port, ""NodeName"": item.node_name, } node_info[""alive""] = node_info[""Alive""] node_info[""Resources""] = ( {key: value for key, value in item.resources_total.items()} if node_info[""Alive""] else {} ) results.append(node_info) return results ","Fetch and parse the Gcs node info table. Returns: Information about the node in the cluster. ",16,63,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def node_table(self): self._check_connected() node_table = self.global_state_accessor.get_node_table() results = [] for node_info_item in node_table: item = gcs_utils.GcsNodeInfo.FromString(node_info_item) node_info = { ""NodeID"": ray._private.utils.binary_to_hex(item.node_id), ""Alive"": item.state == gcs_utils.GcsNodeInfo.GcsNodeState.Value(""ALIVE""), ""NodeManagerAddress"": item.node_manager_address, ""NodeManagerHostname"": item.node_manager_hostname, ""NodeManagerPort"": item.node_manager_port, ""ObjectManagerPort"": item.object_manager_port, ""ObjectStoreSocketName"": item.object_store_socket_name, ""RayletSocketName"": item.raylet_socket_name, ""MetricsExportPort"": item.metrics_export_port, ""NodeName"": item.node_name, } node_info[""alive""] = node_info[""Alive""] node_info[""Resources""] = ( {key: value for key, value in item.resources_total.items()} if node_info[""Alive""] else {} ) results.append(node_info) return results ``` ###Assistant : Fetch and parse the Gcs node info table. Returns: Information about the node in the cluster. " 1418,"def vocabulary_size(self): if tf.executing_eagerly(): return ( int(self.lookup_table.size().numpy()) + self._token_start_index() ) else: return self.lookup_table.size() + self._token_start_index() ","Gets the current size of the layer's vocabulary. Returns: The integer size of the vocabulary, including optional mask and oov indices. ",21,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def vocabulary_size(self): if tf.executing_eagerly(): return ( int(self.lookup_table.size().numpy()) + self._token_start_index() ) else: return self.lookup_table.size() + self._token_start_index() ``` ###Assistant : Gets the current size of the layer's vocabulary. Returns: The integer size of the vocabulary, including optional mask and oov indices. " 1419,"def test_version_with_invalid_names(): lslpp_mydog_out = ver_chk = MagicMock(return_value={""retcode"": 1, ""stdout"": lslpp_mydog_out}) with patch.dict(aixpkg.__grains__, {""osarch"": ""PowerPC_POWER8""}), patch.dict( aixpkg.__salt__, {""cmd.run_all"": ver_chk}, ): versions_checked = aixpkg.version( ""mydog"", versions_as_list=True, use_context=False ) assert ver_chk.call_count == 1 ver_chk.assert_called_with(""lslpp -Lq mydog"", python_shell=False) assert versions_checked == """" "," test version of packages lslpp: Fileset mydog not installed. State codes: A -- Applied. B -- Broken. C -- Committed. E -- EFIX Locked. O -- Obsolete. (partially migrated to newer version) ? -- Inconsistent State...Run lppchk -v. Type codes: F -- Installp Fileset P -- Product C -- Component T -- Feature R -- RPM Package E -- Interim Fix ",61,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_version_with_invalid_names(): lslpp_mydog_out = ver_chk = MagicMock(return_value={""retcode"": 1, ""stdout"": lslpp_mydog_out}) with patch.dict(aixpkg.__grains__, {""osarch"": ""PowerPC_POWER8""}), patch.dict( aixpkg.__salt__, {""cmd.run_all"": ver_chk}, ): versions_checked = aixpkg.version( ""mydog"", versions_as_list=True, use_context=False ) assert ver_chk.call_count == 1 ver_chk.assert_called_with(""lslpp -Lq mydog"", python_shell=False) assert versions_checked == """" ``` ###Assistant : test version of packages lslpp: Fileset mydog not installed. State codes: A -- Applied. B -- Broken. C -- Committed. E -- EFIX Locked. O -- Obsolete. (partially migrated to newer version) ? -- Inconsistent State...Run lppchk -v. Type codes: F -- Installp Fileset P -- Product C -- Component T -- Feature R -- RPM Package E -- Interim Fix " 1420,"def test_mapped_literal_length_increase_adds_additional_ti(dag_maker, session): with dag_maker(session=session) as dag: ","Test that when the length of mapped literal increases, additional ti is added",13,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_mapped_literal_length_increase_adds_additional_ti(dag_maker, session): with dag_maker(session=session) as dag: ``` ###Assistant : Test that when the length of mapped literal increases, additional ti is added " 1421,"def test_installed_without_username(self): # Remove username to simulate privacy mode del self.user_data_from_bitbucket[""principal""][""username""] response = self.client.post(self.path, data=self.user_data_from_bitbucket) assert response.status_code == 200 integration = Integration.objects.get(provider=self.provider, external_id=self.client_key) assert integration.name == self.user_display_name assert integration.metadata == self.user_metadata ",Test a user (not team) installation where the user has hidden their username from public view,16,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_installed_without_username(self): # Remove username to simulate privacy mode del self.user_data_from_bitbucket[""principal""][""username""] response = self.client.post(self.path, data=self.user_data_from_bitbucket) assert response.status_code == 200 integration = Integration.objects.get(provider=self.provider, external_id=self.client_key) assert integration.name == self.user_display_name assert integration.metadata == self.user_metadata ``` ###Assistant : Test a user (not team) installation where the user has hidden their username from public view " 1422,"def _command_display(self, command): build_tabs = getattr(self, f""_{command}_tabs"") build_tabs() "," Build the relevant command specific tabs based on the incoming Faceswap command. Parameters ---------- command: str The Faceswap command that is being executed ",23,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _command_display(self, command): build_tabs = getattr(self, f""_{command}_tabs"") build_tabs() ``` ###Assistant : Build the relevant command specific tabs based on the incoming Faceswap command. Parameters ---------- command: str The Faceswap command that is being executed " 1423,"def itermerged(self): for key in self: val = self._container[key.lower()] yield val[0], "", "".join(val[1:]) ","Iterate over all headers, merging duplicate ones together.",8,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def itermerged(self): for key in self: val = self._container[key.lower()] yield val[0], "", "".join(val[1:]) ``` ###Assistant : Iterate over all headers, merging duplicate ones together. " 1424,"def get_file_from_path(self, path): return self.src_paths.get(os.path.normpath(path)) ",Return a File instance with File.src_path equal to path.,9,5,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_file_from_path(self, path): return self.src_paths.get(os.path.normpath(path)) ``` ###Assistant : Return a File instance with File.src_path equal to path. " 1425,"def sleepdeprived(request): module = request.node.get_closest_marker( ""sleepdeprived_patched_module"").args[0] old_sleep, module.sleep = module.sleep, noop try: yield finally: module.sleep = old_sleep # Taken from # http://bitbucket.org/runeh/snippets/src/tip/missing_modules.py @pytest.fixture","Mock sleep method in patched module to do nothing. Example: >>> import time >>> @pytest.mark.sleepdeprived_patched_module(time) >>> def test_foo(self, sleepdeprived): >>> pass ",21,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def sleepdeprived(request): module = request.node.get_closest_marker( ""sleepdeprived_patched_module"").args[0] old_sleep, module.sleep = module.sleep, noop try: yield finally: module.sleep = old_sleep # Taken from # http://bitbucket.org/runeh/snippets/src/tip/missing_modules.py @pytest.fixture ``` ###Assistant : Mock sleep method in patched module to do nothing. Example: >>> import time >>> @pytest.mark.sleepdeprived_patched_module(time) >>> def test_foo(self, sleepdeprived): >>> pass " 1426,"def _set_mouse_bindings(self) -> None: logger.debug(""Binding mouse events"") if system() == ""Linux"": self._canvas.tag_bind(self._canvas.image_id, """", self._on_bound_zoom) self._canvas.tag_bind(self._canvas.image_id, """", self._on_bound_zoom) else: self._canvas.tag_bind(self._canvas.image_id, """", self._on_bound_zoom) self._canvas.tag_bind(self._canvas.image_id, """", self._on_mouse_click) self._canvas.tag_bind(self._canvas.image_id, """", self._on_mouse_drag) logger.debug(""Bound mouse events"") "," Set the mouse bindings for interacting with the preview image Mousewheel: Zoom in and out Mouse click: Move image ",19,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _set_mouse_bindings(self) -> None: logger.debug(""Binding mouse events"") if system() == ""Linux"": self._canvas.tag_bind(self._canvas.image_id, """", self._on_bound_zoom) self._canvas.tag_bind(self._canvas.image_id, """", self._on_bound_zoom) else: self._canvas.tag_bind(self._canvas.image_id, """", self._on_bound_zoom) self._canvas.tag_bind(self._canvas.image_id, """", self._on_mouse_click) self._canvas.tag_bind(self._canvas.image_id, """", self._on_mouse_drag) logger.debug(""Bound mouse events"") ``` ###Assistant : Set the mouse bindings for interacting with the preview image Mousewheel: Zoom in and out Mouse click: Move image " 1427,"async def test_create_area_with_id_already_in_use(registry): area1 = registry.async_create(""mock"") updated_area1 = registry.async_update(area1.id, name=""New Name"") assert updated_area1.id == area1.id area2 = registry.async_create(""mock"") assert area2.id == ""mock_2"" ",Make sure that we can't create an area with a name already in use.,14,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_create_area_with_id_already_in_use(registry): area1 = registry.async_create(""mock"") updated_area1 = registry.async_update(area1.id, name=""New Name"") assert updated_area1.id == area1.id area2 = registry.async_create(""mock"") assert area2.id == ""mock_2"" ``` ###Assistant : Make sure that we can't create an area with a name already in use. " 1428,"def forward(self, *args, **kwargs): labels = kwargs.pop('labels', None) pooled, encoded = super(ErnieModelForSequenceClassification, self).forward(*args, **kwargs) hidden = self.dropout(pooled) logits = self.classifier(hidden) if labels is not None: if len(labels.shape) != 1: labels = labels.squeeze() loss = F.cross_entropy(logits, labels) else: loss = None return loss, logits "," Args: labels (optional, `Variable` of shape [batch_size]): ground truth label id for each sentence Returns: loss (`Variable` of shape []): Cross entropy loss mean over batch if labels not set, returns None logits (`Variable` of shape [batch_size, hidden_size]): output logits of classifier ",42,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def forward(self, *args, **kwargs): labels = kwargs.pop('labels', None) pooled, encoded = super(ErnieModelForSequenceClassification, self).forward(*args, **kwargs) hidden = self.dropout(pooled) logits = self.classifier(hidden) if labels is not None: if len(labels.shape) != 1: labels = labels.squeeze() loss = F.cross_entropy(logits, labels) else: loss = None return loss, logits ``` ###Assistant : Args: labels (optional, `Variable` of shape [batch_size]): ground truth label id for each sentence Returns: loss (`Variable` of shape []): Cross entropy loss mean over batch if labels not set, returns None logits (`Variable` of shape [batch_size, hidden_size]): output logits of classifier " 1429,"def _get_project(self, name): raise NotImplementedError('Please implement in the subclass') "," For a given project, get a dictionary mapping available versions to Distribution instances. This should be implemented in subclasses. If called from a locate() request, self.matcher will be set to a matcher for the requirement to satisfy, otherwise it will be None. ",42,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_project(self, name): raise NotImplementedError('Please implement in the subclass') ``` ###Assistant : For a given project, get a dictionary mapping available versions to Distribution instances. This should be implemented in subclasses. If called from a locate() request, self.matcher will be set to a matcher for the requirement to satisfy, otherwise it will be None. " 1430,"def test_pickle_empty(self): arr = np.array([]).reshape(999999, 0) pk_dmp = pickle.dumps(arr) pk_load = pickle.loads(pk_dmp) assert pk_load.size == 0 ","Checking if an empty array pickled and un-pickled will not cause a segmentation fault",14,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_pickle_empty(self): arr = np.array([]).reshape(999999, 0) pk_dmp = pickle.dumps(arr) pk_load = pickle.loads(pk_dmp) assert pk_load.size == 0 ``` ###Assistant : Checking if an empty array pickled and un-pickled will not cause a segmentation fault " 1431,"def get_semantics(cls, kwargs, semantics=None): # TODO this should be get_variables since we have included x and y if semantics is None: semantics = cls.semantics variables = {} for key, val in kwargs.items(): if key in semantics and val is not None: variables[key] = val return variables ",Subset a dictionary arguments with known semantic variables.,8,46,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_semantics(cls, kwargs, semantics=None): # TODO this should be get_variables since we have included x and y if semantics is None: semantics = cls.semantics variables = {} for key, val in kwargs.items(): if key in semantics and val is not None: variables[key] = val return variables ``` ###Assistant : Subset a dictionary arguments with known semantic variables. " 1432,"def convert_deps_to_pip(deps, project=None, r=True, include_index=True): from pipenv.vendor.requirementslib.models.requirements import Requirement dependencies = [] for dep_name, dep in deps.items(): if project: project.clear_pipfile_cache() indexes = getattr(project, ""pipfile_sources"", []) if project is not None else [] new_dep = Requirement.from_pipfile(dep_name, dep) if new_dep.index: include_index = True req = new_dep.as_line(sources=indexes if include_index else None).strip() dependencies.append(req) if not r: return dependencies # Write requirements.txt to tmp directory. from pipenv.vendor.vistir.path import create_tracked_tempfile f = create_tracked_tempfile(suffix=""-requirements.txt"", delete=False) f.write(""\n"".join(dependencies).encode(""utf-8"")) f.close() return f.name ","""Converts a Pipfile-formatted dependency to a pip-formatted one.",8,72,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def convert_deps_to_pip(deps, project=None, r=True, include_index=True): from pipenv.vendor.requirementslib.models.requirements import Requirement dependencies = [] for dep_name, dep in deps.items(): if project: project.clear_pipfile_cache() indexes = getattr(project, ""pipfile_sources"", []) if project is not None else [] new_dep = Requirement.from_pipfile(dep_name, dep) if new_dep.index: include_index = True req = new_dep.as_line(sources=indexes if include_index else None).strip() dependencies.append(req) if not r: return dependencies # Write requirements.txt to tmp directory. from pipenv.vendor.vistir.path import create_tracked_tempfile f = create_tracked_tempfile(suffix=""-requirements.txt"", delete=False) f.write(""\n"".join(dependencies).encode(""utf-8"")) f.close() return f.name ``` ###Assistant : ""Converts a Pipfile-formatted dependency to a pip-formatted one. " 1433,"def handle_trial_end(self, data): hyper_params = nni.load(data['hyper_params']) if self.is_created_in_previous_exp(hyper_params['parameter_id']): # The end of the recovered trial is ignored return self._handle_trial_end(hyper_params['parameter_id']) if data['trial_job_id'] in self.job_id_para_id_map: del self.job_id_para_id_map[data['trial_job_id']] "," Parameters ---------- data: dict() it has three keys: trial_job_id, event, hyper_params trial_job_id: the id generated by training service event: the job's state hyper_params: the hyperparameters (a string) generated and returned by tuner ",32,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def handle_trial_end(self, data): hyper_params = nni.load(data['hyper_params']) if self.is_created_in_previous_exp(hyper_params['parameter_id']): # The end of the recovered trial is ignored return self._handle_trial_end(hyper_params['parameter_id']) if data['trial_job_id'] in self.job_id_para_id_map: del self.job_id_para_id_map[data['trial_job_id']] ``` ###Assistant : Parameters ---------- data: dict() it has three keys: trial_job_id, event, hyper_params trial_job_id: the id generated by training service event: the job's state hyper_params: the hyperparameters (a string) generated and returned by tuner " 1434,"def _copy_future_state(source, dest): assert source.done() if dest.cancelled(): return assert not dest.done() if source.cancelled(): dest.cancel() else: exception = source.exception() if exception is not None: dest.set_exception(_convert_future_exc(exception)) else: result = source.result() dest.set_result(result) ","Internal helper to copy state from another Future. The other Future may be a concurrent.futures.Future. ",15,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _copy_future_state(source, dest): assert source.done() if dest.cancelled(): return assert not dest.done() if source.cancelled(): dest.cancel() else: exception = source.exception() if exception is not None: dest.set_exception(_convert_future_exc(exception)) else: result = source.result() dest.set_result(result) ``` ###Assistant : Internal helper to copy state from another Future. The other Future may be a concurrent.futures.Future. " 1435,"def test_get_feature_names_invalid_dtypes(names, dtypes): pd = pytest.importorskip(""pandas"") X = pd.DataFrame([[1, 2], [4, 5], [5, 6]], columns=names) msg = re.escape( ""Feature names only support names that are all strings. "" f""Got feature names with dtypes: {dtypes}."" ) with pytest.raises(TypeError, match=msg): names = _get_feature_names(X) ",Get feature names errors when the feature names have mixed dtypes,11,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_feature_names_invalid_dtypes(names, dtypes): pd = pytest.importorskip(""pandas"") X = pd.DataFrame([[1, 2], [4, 5], [5, 6]], columns=names) msg = re.escape( ""Feature names only support names that are all strings. "" f""Got feature names with dtypes: {dtypes}."" ) with pytest.raises(TypeError, match=msg): names = _get_feature_names(X) ``` ###Assistant : Get feature names errors when the feature names have mixed dtypes " 1436,"def lookup(address, port, s): # We may get an ipv4-mapped ipv6 address here, e.g. ::ffff:127.0.0.1. # Those still appear as ""127.0.0.1"" in the table, so we need to strip the prefix. address = re.sub(r""^::ffff:(?=\d+.\d+.\d+.\d+$)"", """", address) s = s.decode() # ALL tcp 192.168.1.13:57474 -> 23.205.82.58:443 ESTABLISHED:ESTABLISHED specv4 = f""{address}:{port}"" # ALL tcp 2a01:e35:8bae:50f0:9d9b:ef0d:2de3:b733[58505] -> 2606:4700:30::681f:4ad0[443] ESTABLISHED:ESTABLISHED specv6 = f""{address}[{port}]"" for i in s.split(""\n""): if ""ESTABLISHED:ESTABLISHED"" in i and specv4 in i: s = i.split() if len(s) > 4: if sys.platform.startswith(""freebsd""): # strip parentheses for FreeBSD pfctl s = s[3][1:-1].split("":"") else: s = s[4].split("":"") if len(s) == 2: return s[0], int(s[1]) elif ""ESTABLISHED:ESTABLISHED"" in i and specv6 in i: s = i.split() if len(s) > 4: s = s[4].split(""["") port = s[1].split(""]"") port = port[0] return s[0], int(port) raise RuntimeError(""Could not resolve original destination."") "," Parse the pfctl state output s, to look up the destination host matching the client (address, port). Returns an (address, port) tuple, or None. ",24,133,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def lookup(address, port, s): # We may get an ipv4-mapped ipv6 address here, e.g. ::ffff:127.0.0.1. # Those still appear as ""127.0.0.1"" in the table, so we need to strip the prefix. address = re.sub(r""^::ffff:(?=\d+.\d+.\d+.\d+$)"", """", address) s = s.decode() # ALL tcp 192.168.1.13:57474 -> 23.205.82.58:443 ESTABLISHED:ESTABLISHED specv4 = f""{address}:{port}"" # ALL tcp 2a01:e35:8bae:50f0:9d9b:ef0d:2de3:b733[58505] -> 2606:4700:30::681f:4ad0[443] ESTABLISHED:ESTABLISHED specv6 = f""{address}[{port}]"" for i in s.split(""\n""): if ""ESTABLISHED:ESTABLISHED"" in i and specv4 in i: s = i.split() if len(s) > 4: if sys.platform.startswith(""freebsd""): # strip parentheses for FreeBSD pfctl s = s[3][1:-1].split("":"") else: s = s[4].split("":"") if len(s) == 2: return s[0], int(s[1]) elif ""ESTABLISHED:ESTABLISHED"" in i and specv6 in i: s = i.split() if len(s) > 4: s = s[4].split(""["") port = s[1].split(""]"") port = port[0] return s[0], int(port) raise RuntimeError(""Could not resolve original destination."") ``` ###Assistant : Parse the pfctl state output s, to look up the destination host matching the client (address, port). Returns an (address, port) tuple, or None. " 1437,"def _hydrate_rules(cls, project_id, rules, type=OwnerRuleType.OWNERSHIP_RULE.value): owners = [owner for rule in rules for owner in rule.owners] actors = { key: val for key, val in resolve_actors({owner for owner in owners}, project_id).items() if val } result = [ (rule, ActorTuple.resolve_many([actors[owner] for owner in rule.owners]), type) for rule in rules ] return result "," Get the last matching rule to take the most precedence. ",10,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _hydrate_rules(cls, project_id, rules, type=OwnerRuleType.OWNERSHIP_RULE.value): owners = [owner for rule in rules for owner in rule.owners] actors = { key: val for key, val in resolve_actors({owner for owner in owners}, project_id).items() if val } result = [ (rule, ActorTuple.resolve_many([actors[owner] for owner in rule.owners]), type) for rule in rules ] return result ``` ###Assistant : Get the last matching rule to take the most precedence. " 1438,"def activate(self) -> str: load_kube_config_from_dict( config_dict=self.config, context=self.context, ) return self.current_context() "," Convenience method for activating the k8s config stored in an instance of this block Returns current_context for sanity check ",19,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def activate(self) -> str: load_kube_config_from_dict( config_dict=self.config, context=self.context, ) return self.current_context() ``` ###Assistant : Convenience method for activating the k8s config stored in an instance of this block Returns current_context for sanity check " 1439,"def as_coeff_add(self, *deps) -> tuple[Expr, tuple[Expr, ...]]: if deps: if not self.has_free(*deps): return self, tuple() return S.Zero, (self,) ","Return the tuple (c, args) where self is written as an Add, ``a``. c should be a Rational added to any terms of the Add that are independent of deps. args should be a tuple of all other terms of ``a``; args is empty if self is a Number or if self is independent of deps (when given). This should be used when you do not know if self is an Add or not but you want to treat self as an Add or if you want to process the individual arguments of the tail of self as an Add. - if you know self is an Add and want only the head, use self.args[0]; - if you do not want to process the arguments of the tail but need the tail then use self.as_two_terms() which gives the head and tail. - if you want to split self into an independent and dependent parts use ``self.as_independent(*deps)`` >>> from sympy import S >>> from sympy.abc import x, y >>> (S(3)).as_coeff_add() (3, ()) >>> (3 + x).as_coeff_add() (3, (x,)) >>> (3 + x + y).as_coeff_add(x) (y + 3, (x,)) >>> (3 + y).as_coeff_add(x) (y + 3, ()) ",195,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def as_coeff_add(self, *deps) -> tuple[Expr, tuple[Expr, ...]]: if deps: if not self.has_free(*deps): return self, tuple() return S.Zero, (self,) ``` ###Assistant : Return the tuple (c, args) where self is written as an Add, ``a``. c should be a Rational added to any terms of the Add that are independent of deps. args should be a tuple of all other terms of ``a``; args is empty if self is a Number or if self is independent of deps (when given). This should be used when you do not know if self is an Add or not but you want to treat self as an Add or if you want to process the individual arguments of the tail of self as an Add. - if you know self is an Add and want only the head, use self.args[0]; - if you do not want to process the arguments of the tail but need the tail then use self.as_two_terms() which gives the head and tail. - if you want to split self into an independent and dependent parts use ``self.as_independent(*deps)`` >>> from sympy import S >>> from sympy.abc import x, y >>> (S(3)).as_coeff_add() (3, ()) >>> (3 + x).as_coeff_add() (3, (x,)) >>> (3 + x + y).as_coeff_add(x) (y + 3, (x,)) >>> (3 + y).as_coeff_add(x) (y + 3, ()) " 1440,"def create_github_url(url): repo_only_url = re.compile( r""https:\/\/github\.com\/[a-z\d](?:[a-z\d]|-(?=[a-z\d])){0,38}\/[a-zA-Z0-9]+$"" ) re_branch = re.compile(""/(tree|blob)/(.+?)/"") # Check if the given url is a url to a GitHub repo. If it is, tell the # user to use 'git clone' to download it if re.match(repo_only_url, url): print( ""✘ The given url is a complete repository. Use 'git clone' to download the repository"", ""red"", ) sys.exit() # extract the branch name from the given url (e.g master) branch = re_branch.search(url) if branch: download_dirs = url[branch.end() :] api_url = ( url[: branch.start()].replace(""github.com"", ""api.github.com/repos"", 1) + ""/contents/"" + download_dirs + ""?ref="" + branch.group(2) ) return api_url, download_dirs raise ValueError(""The given url is not a valid GitHub url"") # Modification of https://github.com/sdushantha/gitdir/blob/master/gitdir/gitdir.py"," From the given url, produce a URL that is compatible with Github's REST API. Can handle blob or tree paths. ",20,112,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def create_github_url(url): repo_only_url = re.compile( r""https:\/\/github\.com\/[a-z\d](?:[a-z\d]|-(?=[a-z\d])){0,38}\/[a-zA-Z0-9]+$"" ) re_branch = re.compile(""/(tree|blob)/(.+?)/"") # Check if the given url is a url to a GitHub repo. If it is, tell the # user to use 'git clone' to download it if re.match(repo_only_url, url): print( ""✘ The given url is a complete repository. Use 'git clone' to download the repository"", ""red"", ) sys.exit() # extract the branch name from the given url (e.g master) branch = re_branch.search(url) if branch: download_dirs = url[branch.end() :] api_url = ( url[: branch.start()].replace(""github.com"", ""api.github.com/repos"", 1) + ""/contents/"" + download_dirs + ""?ref="" + branch.group(2) ) return api_url, download_dirs raise ValueError(""The given url is not a valid GitHub url"") # Modification of https://github.com/sdushantha/gitdir/blob/master/gitdir/gitdir.py ``` ###Assistant : From the given url, produce a URL that is compatible with Github's REST API. Can handle blob or tree paths. " 1441,"def homogeneity_score(labels_true, labels_pred): return homogeneity_completeness_v_measure(labels_true, labels_pred)[0] ","Homogeneity metric of a cluster labeling given a ground truth. A clustering result satisfies homogeneity if all of its clusters contain only data points which are members of a single class. This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score value in any way. This metric is not symmetric: switching ``label_true`` with ``label_pred`` will return the :func:`completeness_score` which will be different in general. Read more in the :ref:`User Guide `. Parameters ---------- labels_true : int array, shape = [n_samples] Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,) Cluster labels to evaluate. Returns ------- homogeneity : float Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling. See Also -------- completeness_score : Completeness metric of cluster labeling. v_measure_score : V-Measure (NMI with arithmetic mean option). References ---------- .. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A conditional entropy-based external cluster evaluation measure `_ Examples -------- Perfect labelings are homogeneous:: >>> from sklearn.metrics.cluster import homogeneity_score >>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0]) 1.0 Non-perfect labelings that further split classes into more clusters can be perfectly homogeneous:: >>> print(""%.6f"" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2])) 1.000000 >>> print(""%.6f"" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3])) 1.000000 Clusters that include samples from different classes do not make for an homogeneous labeling:: >>> print(""%.6f"" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1])) 0.0... >>> print(""%.6f"" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0])) 0.0... ",263,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def homogeneity_score(labels_true, labels_pred): return homogeneity_completeness_v_measure(labels_true, labels_pred)[0] ``` ###Assistant : Homogeneity metric of a cluster labeling given a ground truth. A clustering result satisfies homogeneity if all of its clusters contain only data points which are members of a single class. This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score value in any way. This metric is not symmetric: switching ``label_true`` with ``label_pred`` will return the :func:`completeness_score` which will be different in general. Read more in the :ref:`User Guide `. Parameters ---------- labels_true : int array, shape = [n_samples] Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,) Cluster labels to evaluate. Returns ------- homogeneity : float Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling. See Also -------- completeness_score : Completeness metric of cluster labeling. v_measure_score : V-Measure (NMI with arithmetic mean option). References ---------- .. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A conditional entropy-based external cluster evaluation measure `_ Examples -------- Perfect labelings are homogeneous:: >>> from sklearn.metrics.cluster import homogeneity_score >>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0]) 1.0 Non-perfect labelings that further split classes into more clusters can be perfectly homogeneous:: >>> print(""%.6f"" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2])) 1.000000 >>> print(""%.6f"" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3])) 1.000000 Clusters that include samples from different classes do not make for an homogeneous labeling:: >>> print(""%.6f"" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1])) 0.0... >>> print(""%.6f"" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0])) 0.0... " 1442,"def dup_chebyshevt(n, K): if n < 1: return [K.one] m2, m1 = [K.one], [K.one, K.zero] for i in range(2, n+1): m2, m1 = m1, dup_sub(dup_mul_ground(dup_lshift(m1, 1, K), K(2), K), m2, K) return m1 ",Low-level implementation of Chebyshev polynomials of the first kind.,9,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dup_chebyshevt(n, K): if n < 1: return [K.one] m2, m1 = [K.one], [K.one, K.zero] for i in range(2, n+1): m2, m1 = m1, dup_sub(dup_mul_ground(dup_lshift(m1, 1, K), K(2), K), m2, K) return m1 ``` ###Assistant : Low-level implementation of Chebyshev polynomials of the first kind. " 1443,"def get_markdown_toc(markdown_source): md = markdown.Markdown(extensions=['toc']) md.convert(markdown_source) return md.toc_tokens ",Return TOC generated by Markdown parser from Markdown source text.,10,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_markdown_toc(markdown_source): md = markdown.Markdown(extensions=['toc']) md.convert(markdown_source) return md.toc_tokens ``` ###Assistant : Return TOC generated by Markdown parser from Markdown source text. " 1444,"def get_value_data_from_instance(self, instance): return { ""id"": instance.pk, ""edit_url"": AdminURLFinder().get_edit_url(instance), } "," Given a model instance, return a value that we can pass to both the server-side template and the client-side rendering code (via telepath) that contains all the information needed for display. Typically this is a dict of id, title etc; it must be JSON-serialisable. ",44,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_value_data_from_instance(self, instance): return { ""id"": instance.pk, ""edit_url"": AdminURLFinder().get_edit_url(instance), } ``` ###Assistant : Given a model instance, return a value that we can pass to both the server-side template and the client-side rendering code (via telepath) that contains all the information needed for display. Typically this is a dict of id, title etc; it must be JSON-serialisable. " 1445,"def set_options(icon=None, button_color=None, element_size=(None, None), button_element_size=(None, None), margins=(None, None), element_padding=(None, None), auto_size_text=None, auto_size_buttons=None, font=None, border_width=None, slider_border_width=None, slider_relief=None, slider_orientation=None, autoclose_time=None, message_box_line_width=None, progress_meter_border_depth=None, progress_meter_style=None, progress_meter_relief=None, progress_meter_color=None, progress_meter_size=None, text_justification=None, background_color=None, element_background_color=None, text_element_background_color=None, input_elements_background_color=None, input_text_color=None, scrollbar_color=None, text_color=None, element_text_color=None, debug_win_size=(None, None), window_location=(None, None), error_button_color=(None, None), tooltip_time=None, tooltip_font=None, use_ttk_buttons=None, ttk_theme=None, suppress_error_popups=None, suppress_raise_key_errors=None, suppress_key_guessing=None,warn_button_key_duplicates=False, enable_treeview_869_patch=None, enable_mac_notitlebar_patch=None, use_custom_titlebar=None, titlebar_background_color=None, titlebar_text_color=None, titlebar_font=None, titlebar_icon=None, user_settings_path=None, pysimplegui_settings_path=None, pysimplegui_settings_filename=None, keep_on_top=None, dpi_awareness=None, scaling=None, disable_modal_windows=None, tooltip_offset=(None, None)): global DEFAULT_ELEMENT_SIZE global DEFAULT_BUTTON_ELEMENT_SIZE global DEFAULT_MARGINS # Margins for each LEFT/RIGHT margin is first term global DEFAULT_ELEMENT_PADDING # Padding between elements (row, col) in pixels global DEFAULT_AUTOSIZE_TEXT global DEFAULT_AUTOSIZE_BUTTONS global DEFAULT_FONT global DEFAULT_BORDER_WIDTH global DEFAULT_AUTOCLOSE_TIME global DEFAULT_BUTTON_COLOR global MESSAGE_BOX_LINE_WIDTH global DEFAULT_PROGRESS_BAR_BORDER_WIDTH global DEFAULT_PROGRESS_BAR_STYLE global DEFAULT_PROGRESS_BAR_RELIEF global DEFAULT_PROGRESS_BAR_COLOR global DEFAULT_PROGRESS_BAR_SIZE global DEFAULT_TEXT_JUSTIFICATION global DEFAULT_DEBUG_WINDOW_SIZE global DEFAULT_SLIDER_BORDER_WIDTH global DEFAULT_SLIDER_RELIEF global DEFAULT_SLIDER_ORIENTATION global DEFAULT_BACKGROUND_COLOR global DEFAULT_INPUT_ELEMENTS_COLOR global DEFAULT_ELEMENT_BACKGROUND_COLOR global DEFAULT_TEXT_ELEMENT_BACKGROUND_COLOR global DEFAULT_SCROLLBAR_COLOR global DEFAULT_TEXT_COLOR global DEFAULT_WINDOW_LOCATION global DEFAULT_ELEMENT_TEXT_COLOR global DEFAULT_INPUT_TEXT_COLOR global DEFAULT_TOOLTIP_TIME global DEFAULT_ERROR_BUTTON_COLOR global DEFAULT_TTK_THEME global USE_TTK_BUTTONS global TOOLTIP_FONT global SUPPRESS_ERROR_POPUPS global SUPPRESS_RAISE_KEY_ERRORS global SUPPRESS_KEY_GUESSING global WARN_DUPLICATE_BUTTON_KEY_ERRORS global ENABLE_TREEVIEW_869_PATCH global ENABLE_MAC_NOTITLEBAR_PATCH global USE_CUSTOM_TITLEBAR global CUSTOM_TITLEBAR_BACKGROUND_COLOR global CUSTOM_TITLEBAR_TEXT_COLOR global CUSTOM_TITLEBAR_ICON global CUSTOM_TITLEBAR_FONT global DEFAULT_USER_SETTINGS_PATH global DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH global DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME global DEFAULT_KEEP_ON_TOP global DEFAULT_SCALING global DEFAULT_MODAL_WINDOWS_ENABLED global DEFAULT_TOOLTIP_OFFSET global _pysimplegui_user_settings # global _my_windows if icon: Window._user_defined_icon = icon # _my_windows._user_defined_icon = icon if button_color != None: if button_color == COLOR_SYSTEM_DEFAULT: DEFAULT_BUTTON_COLOR = (COLOR_SYSTEM_DEFAULT, COLOR_SYSTEM_DEFAULT) else: DEFAULT_BUTTON_COLOR = button_color if element_size != (None, None): DEFAULT_ELEMENT_SIZE = element_size if button_element_size != (None, None): DEFAULT_BUTTON_ELEMENT_SIZE = button_element_size if margins != (None, None): DEFAULT_MARGINS = margins if element_padding != (None, None): DEFAULT_ELEMENT_PADDING = element_padding if auto_size_text != None: DEFAULT_AUTOSIZE_TEXT = auto_size_text if auto_size_buttons != None: DEFAULT_AUTOSIZE_BUTTONS = auto_size_buttons if font != None: DEFAULT_FONT = font if border_width != None: DEFAULT_BORDER_WIDTH = border_width if autoclose_time != None: DEFAULT_AUTOCLOSE_TIME = autoclose_time if message_box_line_width != None: MESSAGE_BOX_LINE_WIDTH = message_box_line_width if progress_meter_border_depth != None: DEFAULT_PROGRESS_BAR_BORDER_WIDTH = progress_meter_border_depth if progress_meter_style != None: warnings.warn('You can no longer set a progress bar style. All ttk styles must be the same for the window', UserWarning) # DEFAULT_PROGRESS_BAR_STYLE = progress_meter_style if progress_meter_relief != None: DEFAULT_PROGRESS_BAR_RELIEF = progress_meter_relief if progress_meter_color != None: DEFAULT_PROGRESS_BAR_COLOR = progress_meter_color if progress_meter_size != None: DEFAULT_PROGRESS_BAR_SIZE = progress_meter_size if slider_border_width != None: DEFAULT_SLIDER_BORDER_WIDTH = slider_border_width if slider_orientation != None: DEFAULT_SLIDER_ORIENTATION = slider_orientation if slider_relief != None: DEFAULT_SLIDER_RELIEF = slider_relief if text_justification != None: DEFAULT_TEXT_JUSTIFICATION = text_justification if background_color != None: DEFAULT_BACKGROUND_COLOR = background_color if text_element_background_color != None: DEFAULT_TEXT_ELEMENT_BACKGROUND_COLOR = text_element_background_color if input_elements_background_color != None: DEFAULT_INPUT_ELEMENTS_COLOR = input_elements_background_color if element_background_color != None: DEFAULT_ELEMENT_BACKGROUND_COLOR = element_background_color if window_location != (None, None): DEFAULT_WINDOW_LOCATION = window_location if debug_win_size != (None, None): DEFAULT_DEBUG_WINDOW_SIZE = debug_win_size if text_color != None: DEFAULT_TEXT_COLOR = text_color if scrollbar_color != None: DEFAULT_SCROLLBAR_COLOR = scrollbar_color if element_text_color != None: DEFAULT_ELEMENT_TEXT_COLOR = element_text_color if input_text_color is not None: DEFAULT_INPUT_TEXT_COLOR = input_text_color if tooltip_time is not None: DEFAULT_TOOLTIP_TIME = tooltip_time if error_button_color != (None, None): DEFAULT_ERROR_BUTTON_COLOR = error_button_color if ttk_theme is not None: DEFAULT_TTK_THEME = ttk_theme if use_ttk_buttons is not None: USE_TTK_BUTTONS = use_ttk_buttons if tooltip_font is not None: TOOLTIP_FONT = tooltip_font if suppress_error_popups is not None: SUPPRESS_ERROR_POPUPS = suppress_error_popups if suppress_raise_key_errors is not None: SUPPRESS_RAISE_KEY_ERRORS = suppress_raise_key_errors if suppress_key_guessing is not None: SUPPRESS_KEY_GUESSING = suppress_key_guessing if warn_button_key_duplicates is not None: WARN_DUPLICATE_BUTTON_KEY_ERRORS = warn_button_key_duplicates if enable_treeview_869_patch is not None: ENABLE_TREEVIEW_869_PATCH = enable_treeview_869_patch if enable_mac_notitlebar_patch is not None: ENABLE_MAC_NOTITLEBAR_PATCH = enable_mac_notitlebar_patch if use_custom_titlebar is not None: USE_CUSTOM_TITLEBAR = use_custom_titlebar if titlebar_background_color is not None: CUSTOM_TITLEBAR_BACKGROUND_COLOR = titlebar_background_color if titlebar_text_color is not None: CUSTOM_TITLEBAR_TEXT_COLOR = titlebar_text_color if titlebar_font is not None: CUSTOM_TITLEBAR_FONT = titlebar_font if titlebar_icon is not None: CUSTOM_TITLEBAR_ICON = titlebar_icon if user_settings_path is not None: DEFAULT_USER_SETTINGS_PATH = user_settings_path if pysimplegui_settings_path is not None: DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH = pysimplegui_settings_path if pysimplegui_settings_filename is not None: DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME = pysimplegui_settings_filename if pysimplegui_settings_filename is not None or pysimplegui_settings_filename is not None: _pysimplegui_user_settings = UserSettings(filename=DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME, path=DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH) if keep_on_top is not None: DEFAULT_KEEP_ON_TOP = keep_on_top if dpi_awareness is True: if running_windows(): if platform.release() == ""7"": ctypes.windll.user32.SetProcessDPIAware() elif platform.release() == ""8"" or platform.release() == ""10"": ctypes.windll.shcore.SetProcessDpiAwareness(1) if scaling is not None: DEFAULT_SCALING = scaling if disable_modal_windows is not None: DEFAULT_MODAL_WINDOWS_ENABLED = not disable_modal_windows if tooltip_offset != (None, None): DEFAULT_TOOLTIP_OFFSET = tooltip_offset return True # ----------------------------------------------------------------- # # .########.##.....##.########.##.....##.########..######. # ....##....##.....##.##.......###...###.##.......##....## # ....##....##.....##.##.......####.####.##.......##...... # ....##....#########.######...##.###.##.######....######. # ....##....##.....##.##.......##.....##.##.............## # ....##....##.....##.##.......##.....##.##.......##....## # ....##....##.....##.########.##.....##.########..######. # ----------------------------------------------------------------- # # The official Theme code #################### ChangeLookAndFeel ####################### # Predefined settings that will change the colors and styles # # of the elements. # ############################################################## LOOK_AND_FEEL_TABLE = { ""SystemDefault"": {""BACKGROUND"": COLOR_SYSTEM_DEFAULT, ""TEXT"": COLOR_SYSTEM_DEFAULT, ""INPUT"": COLOR_SYSTEM_DEFAULT, ""TEXT_INPUT"": COLOR_SYSTEM_DEFAULT, ""SCROLL"": COLOR_SYSTEM_DEFAULT, ""BUTTON"": OFFICIAL_PYSIMPLEGUI_BUTTON_COLOR, ""PROGRESS"": COLOR_SYSTEM_DEFAULT, ""BORDER"": 1, ""SLIDER_DEPTH"": 1, ""PROGRESS_DEPTH"": 0, }, ""SystemDefaultForReal"": {""BACKGROUND"": COLOR_SYSTEM_DEFAULT, ""TEXT"": COLOR_SYSTEM_DEFAULT, ""INPUT"": COLOR_SYSTEM_DEFAULT, ""TEXT_INPUT"": COLOR_SYSTEM_DEFAULT, ""SCROLL"": COLOR_SYSTEM_DEFAULT, ""BUTTON"": COLOR_SYSTEM_DEFAULT, ""PROGRESS"": COLOR_SYSTEM_DEFAULT, ""BORDER"": 1, ""SLIDER_DEPTH"": 1, ""PROGRESS_DEPTH"": 0, }, ""SystemDefault1"": {""BACKGROUND"": COLOR_SYSTEM_DEFAULT, ""TEXT"": COLOR_SYSTEM_DEFAULT, ""INPUT"": COLOR_SYSTEM_DEFAULT, ""TEXT_INPUT"": COLOR_SYSTEM_DEFAULT, ""SCROLL"": COLOR_SYSTEM_DEFAULT, ""BUTTON"": COLOR_SYSTEM_DEFAULT, ""PROGRESS"": COLOR_SYSTEM_DEFAULT, ""BORDER"": 1, ""SLIDER_DEPTH"": 1, ""PROGRESS_DEPTH"": 0, }, ""Material1"": {""BACKGROUND"": ""#E3F2FD"", ""TEXT"": ""#000000"", ""INPUT"": ""#86A8FF"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#86A8FF"", ""BUTTON"": (""#FFFFFF"", ""#5079D3""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 0, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""ACCENT1"": ""#FF0266"", ""ACCENT2"": ""#FF5C93"", ""ACCENT3"": ""#C5003C"", }, ""Material2"": {""BACKGROUND"": ""#FAFAFA"", ""TEXT"": ""#000000"", ""INPUT"": ""#004EA1"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#5EA7FF"", ""BUTTON"": (""#FFFFFF"", ""#0079D3""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 0, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""ACCENT1"": ""#FF0266"", ""ACCENT2"": ""#FF5C93"", ""ACCENT3"": ""#C5003C"", }, ""Reddit"": {""BACKGROUND"": ""#ffffff"", ""TEXT"": ""#1a1a1b"", ""INPUT"": ""#dae0e6"", ""TEXT_INPUT"": ""#222222"", ""SCROLL"": ""#a5a4a4"", ""BUTTON"": (""#FFFFFF"", ""#0079d3""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""ACCENT1"": ""#ff5414"", ""ACCENT2"": ""#33a8ff"", ""ACCENT3"": ""#dbf0ff"", }, ""Topanga"": {""BACKGROUND"": ""#282923"", ""TEXT"": ""#E7DB74"", ""INPUT"": ""#393a32"", ""TEXT_INPUT"": ""#E7C855"", ""SCROLL"": ""#E7C855"", ""BUTTON"": (""#E7C855"", ""#284B5A""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""ACCENT1"": ""#c15226"", ""ACCENT2"": ""#7a4d5f"", ""ACCENT3"": ""#889743"", }, ""GreenTan"": {""BACKGROUND"": ""#9FB8AD"", ""TEXT"": '#000000', ""INPUT"": ""#F7F3EC"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#F7F3EC"", ""BUTTON"": (""#FFFFFF"", ""#475841""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""Dark"": {""BACKGROUND"": ""#404040"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#4D4D4D"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#707070"", ""BUTTON"": (""#FFFFFF"", ""#004F00""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightGreen"": {""BACKGROUND"": ""#B7CECE"", ""TEXT"": ""#000000"", ""INPUT"": ""#FDFFF7"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#FDFFF7"", ""BUTTON"": (""#FFFFFF"", ""#658268""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""ACCENT1"": ""#76506d"", ""ACCENT2"": ""#5148f1"", ""ACCENT3"": ""#0a1c84"", ""PROGRESS_DEPTH"": 0, }, ""Dark2"": {""BACKGROUND"": ""#404040"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#FFFFFF"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#707070"", ""BUTTON"": (""#FFFFFF"", ""#004F00""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""Black"": {""BACKGROUND"": ""#000000"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#4D4D4D"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#707070"", ""BUTTON"": (""#000000"", ""#FFFFFF""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""Tan"": {""BACKGROUND"": ""#fdf6e3"", ""TEXT"": ""#268bd1"", ""INPUT"": ""#eee8d5"", ""TEXT_INPUT"": ""#6c71c3"", ""SCROLL"": ""#eee8d5"", ""BUTTON"": (""#FFFFFF"", ""#063542""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""TanBlue"": {""BACKGROUND"": ""#e5dece"", ""TEXT"": ""#063289"", ""INPUT"": ""#f9f8f4"", ""TEXT_INPUT"": ""#242834"", ""SCROLL"": ""#eee8d5"", ""BUTTON"": (""#FFFFFF"", ""#063289""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkTanBlue"": {""BACKGROUND"": ""#242834"", ""TEXT"": ""#dfe6f8"", ""INPUT"": ""#97755c"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#a9afbb"", ""BUTTON"": (""#FFFFFF"", ""#063289""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkAmber"": {""BACKGROUND"": ""#2c2825"", ""TEXT"": ""#fdcb52"", ""INPUT"": ""#705e52"", ""TEXT_INPUT"": ""#fdcb52"", ""SCROLL"": ""#705e52"", ""BUTTON"": (""#000000"", ""#fdcb52""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkBlue"": {""BACKGROUND"": ""#1a2835"", ""TEXT"": ""#d1ecff"", ""INPUT"": ""#335267"", ""TEXT_INPUT"": ""#acc2d0"", ""SCROLL"": ""#1b6497"", ""BUTTON"": (""#000000"", ""#fafaf8""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""Reds"": {""BACKGROUND"": ""#280001"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#d8d584"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#763e00"", ""BUTTON"": (""#000000"", ""#daad28""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""Green"": {""BACKGROUND"": ""#82a459"", ""TEXT"": ""#000000"", ""INPUT"": ""#d8d584"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#e3ecf3"", ""BUTTON"": (""#FFFFFF"", ""#517239""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""BluePurple"": {""BACKGROUND"": ""#A5CADD"", ""TEXT"": ""#6E266E"", ""INPUT"": ""#E0F5FF"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#E0F5FF"", ""BUTTON"": (""#FFFFFF"", ""#303952""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""Purple"": {""BACKGROUND"": ""#B0AAC2"", ""TEXT"": ""#000000"", ""INPUT"": ""#F2EFE8"", ""SCROLL"": ""#F2EFE8"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#000000"", ""#C2D4D8""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""BlueMono"": {""BACKGROUND"": ""#AAB6D3"", ""TEXT"": ""#000000"", ""INPUT"": ""#F1F4FC"", ""SCROLL"": ""#F1F4FC"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#7186C7""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""GreenMono"": {""BACKGROUND"": ""#A8C1B4"", ""TEXT"": ""#000000"", ""INPUT"": ""#DDE0DE"", ""SCROLL"": ""#E3E3E3"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#6D9F85""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""BrownBlue"": {""BACKGROUND"": ""#64778d"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#f0f3f7"", ""SCROLL"": ""#A6B2BE"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#283b5b""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""BrightColors"": {""BACKGROUND"": ""#b4ffb4"", ""TEXT"": ""#000000"", ""INPUT"": ""#ffff64"", ""SCROLL"": ""#ffb482"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#000000"", ""#ffa0dc""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""NeutralBlue"": {""BACKGROUND"": ""#92aa9d"", ""TEXT"": ""#000000"", ""INPUT"": ""#fcfff6"", ""SCROLL"": ""#fcfff6"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#000000"", ""#d0dbbd""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""Kayak"": {""BACKGROUND"": ""#a7ad7f"", ""TEXT"": ""#000000"", ""INPUT"": ""#e6d3a8"", ""SCROLL"": ""#e6d3a8"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#5d907d""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""SandyBeach"": {""BACKGROUND"": ""#efeccb"", ""TEXT"": ""#012f2f"", ""INPUT"": ""#e6d3a8"", ""SCROLL"": ""#e6d3a8"", ""TEXT_INPUT"": ""#012f2f"", ""BUTTON"": (""#FFFFFF"", ""#046380""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""TealMono"": {""BACKGROUND"": ""#a8cfdd"", ""TEXT"": ""#000000"", ""INPUT"": ""#dfedf2"", ""SCROLL"": ""#dfedf2"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#183440""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""Default"": {""BACKGROUND"": COLOR_SYSTEM_DEFAULT, ""TEXT"": COLOR_SYSTEM_DEFAULT, ""INPUT"": COLOR_SYSTEM_DEFAULT, ""TEXT_INPUT"": COLOR_SYSTEM_DEFAULT, ""SCROLL"": COLOR_SYSTEM_DEFAULT, ""BUTTON"": OFFICIAL_PYSIMPLEGUI_BUTTON_COLOR, ""PROGRESS"": COLOR_SYSTEM_DEFAULT, ""BORDER"": 1, ""SLIDER_DEPTH"": 1, ""PROGRESS_DEPTH"": 0, }, ""Default1"": {""BACKGROUND"": COLOR_SYSTEM_DEFAULT, ""TEXT"": COLOR_SYSTEM_DEFAULT, ""INPUT"": COLOR_SYSTEM_DEFAULT, ""TEXT_INPUT"": COLOR_SYSTEM_DEFAULT, ""SCROLL"": COLOR_SYSTEM_DEFAULT, ""BUTTON"": COLOR_SYSTEM_DEFAULT, ""PROGRESS"": COLOR_SYSTEM_DEFAULT, ""BORDER"": 1, ""SLIDER_DEPTH"": 1, ""PROGRESS_DEPTH"": 0, }, ""DefaultNoMoreNagging"": {""BACKGROUND"": COLOR_SYSTEM_DEFAULT, ""TEXT"": COLOR_SYSTEM_DEFAULT, ""INPUT"": COLOR_SYSTEM_DEFAULT, ""TEXT_INPUT"": COLOR_SYSTEM_DEFAULT, ""SCROLL"": COLOR_SYSTEM_DEFAULT, ""BUTTON"": OFFICIAL_PYSIMPLEGUI_BUTTON_COLOR, ""PROGRESS"": COLOR_SYSTEM_DEFAULT, ""BORDER"": 1, ""SLIDER_DEPTH"": 1, ""PROGRESS_DEPTH"": 0, }, ""GrayGrayGray"": {""BACKGROUND"": COLOR_SYSTEM_DEFAULT, ""TEXT"": COLOR_SYSTEM_DEFAULT, ""INPUT"": COLOR_SYSTEM_DEFAULT, ""TEXT_INPUT"": COLOR_SYSTEM_DEFAULT, ""SCROLL"": COLOR_SYSTEM_DEFAULT, ""BUTTON"": COLOR_SYSTEM_DEFAULT, ""PROGRESS"": COLOR_SYSTEM_DEFAULT, ""BORDER"": 1, ""SLIDER_DEPTH"": 1, ""PROGRESS_DEPTH"": 0, }, ""LightBlue"": {""BACKGROUND"": ""#E3F2FD"", ""TEXT"": ""#000000"", ""INPUT"": ""#86A8FF"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#86A8FF"", ""BUTTON"": (""#FFFFFF"", ""#5079D3""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 0, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""ACCENT1"": ""#FF0266"", ""ACCENT2"": ""#FF5C93"", ""ACCENT3"": ""#C5003C"", }, ""LightGrey"": {""BACKGROUND"": ""#FAFAFA"", ""TEXT"": ""#000000"", ""INPUT"": ""#004EA1"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#5EA7FF"", ""BUTTON"": (""#FFFFFF"", ""#0079D3""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 0, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""ACCENT1"": ""#FF0266"", ""ACCENT2"": ""#FF5C93"", ""ACCENT3"": ""#C5003C"", }, ""LightGrey1"": {""BACKGROUND"": ""#ffffff"", ""TEXT"": ""#1a1a1b"", ""INPUT"": ""#dae0e6"", ""TEXT_INPUT"": ""#222222"", ""SCROLL"": ""#a5a4a4"", ""BUTTON"": (""#FFFFFF"", ""#0079d3""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""ACCENT1"": ""#ff5414"", ""ACCENT2"": ""#33a8ff"", ""ACCENT3"": ""#dbf0ff"", }, ""DarkBrown"": {""BACKGROUND"": ""#282923"", ""TEXT"": ""#E7DB74"", ""INPUT"": ""#393a32"", ""TEXT_INPUT"": ""#E7C855"", ""SCROLL"": ""#E7C855"", ""BUTTON"": (""#E7C855"", ""#284B5A""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""ACCENT1"": ""#c15226"", ""ACCENT2"": ""#7a4d5f"", ""ACCENT3"": ""#889743"", }, ""LightGreen1"": {""BACKGROUND"": ""#9FB8AD"", ""TEXT"": ""#000000"", ""INPUT"": ""#F7F3EC"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#F7F3EC"", ""BUTTON"": (""#FFFFFF"", ""#475841""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGrey"": {""BACKGROUND"": ""#404040"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#4D4D4D"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#707070"", ""BUTTON"": (""#FFFFFF"", ""#004F00""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightGreen2"": {""BACKGROUND"": ""#B7CECE"", ""TEXT"": ""#000000"", ""INPUT"": ""#FDFFF7"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#FDFFF7"", ""BUTTON"": (""#FFFFFF"", ""#658268""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""ACCENT1"": ""#76506d"", ""ACCENT2"": ""#5148f1"", ""ACCENT3"": ""#0a1c84"", ""PROGRESS_DEPTH"": 0, }, ""DarkGrey1"": {""BACKGROUND"": ""#404040"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#FFFFFF"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#707070"", ""BUTTON"": (""#FFFFFF"", ""#004F00""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkBlack"": {""BACKGROUND"": ""#000000"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#4D4D4D"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#707070"", ""BUTTON"": (""#000000"", ""#FFFFFF""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightBrown"": {""BACKGROUND"": ""#fdf6e3"", ""TEXT"": ""#268bd1"", ""INPUT"": ""#eee8d5"", ""TEXT_INPUT"": ""#6c71c3"", ""SCROLL"": ""#eee8d5"", ""BUTTON"": (""#FFFFFF"", ""#063542""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightBrown1"": {""BACKGROUND"": ""#e5dece"", ""TEXT"": ""#063289"", ""INPUT"": ""#f9f8f4"", ""TEXT_INPUT"": ""#242834"", ""SCROLL"": ""#eee8d5"", ""BUTTON"": (""#FFFFFF"", ""#063289""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkBlue1"": {""BACKGROUND"": ""#242834"", ""TEXT"": ""#dfe6f8"", ""INPUT"": ""#97755c"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#a9afbb"", ""BUTTON"": (""#FFFFFF"", ""#063289""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkBrown1"": {""BACKGROUND"": ""#2c2825"", ""TEXT"": ""#fdcb52"", ""INPUT"": ""#705e52"", ""TEXT_INPUT"": ""#fdcb52"", ""SCROLL"": ""#705e52"", ""BUTTON"": (""#000000"", ""#fdcb52""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkBlue2"": {""BACKGROUND"": ""#1a2835"", ""TEXT"": ""#d1ecff"", ""INPUT"": ""#335267"", ""TEXT_INPUT"": ""#acc2d0"", ""SCROLL"": ""#1b6497"", ""BUTTON"": (""#000000"", ""#fafaf8""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkBrown2"": {""BACKGROUND"": ""#280001"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#d8d584"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#763e00"", ""BUTTON"": (""#000000"", ""#daad28""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGreen"": {""BACKGROUND"": ""#82a459"", ""TEXT"": ""#000000"", ""INPUT"": ""#d8d584"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#e3ecf3"", ""BUTTON"": (""#FFFFFF"", ""#517239""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightBlue1"": {""BACKGROUND"": ""#A5CADD"", ""TEXT"": ""#6E266E"", ""INPUT"": ""#E0F5FF"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#E0F5FF"", ""BUTTON"": (""#FFFFFF"", ""#303952""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightPurple"": {""BACKGROUND"": ""#B0AAC2"", ""TEXT"": ""#000000"", ""INPUT"": ""#F2EFE8"", ""SCROLL"": ""#F2EFE8"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#000000"", ""#C2D4D8""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightBlue2"": {""BACKGROUND"": ""#AAB6D3"", ""TEXT"": ""#000000"", ""INPUT"": ""#F1F4FC"", ""SCROLL"": ""#F1F4FC"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#7186C7""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightGreen3"": {""BACKGROUND"": ""#A8C1B4"", ""TEXT"": ""#000000"", ""INPUT"": ""#DDE0DE"", ""SCROLL"": ""#E3E3E3"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#6D9F85""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkBlue3"": {""BACKGROUND"": ""#64778d"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#f0f3f7"", ""SCROLL"": ""#A6B2BE"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#283b5b""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightGreen4"": {""BACKGROUND"": ""#b4ffb4"", ""TEXT"": ""#000000"", ""INPUT"": ""#ffff64"", ""SCROLL"": ""#ffb482"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#000000"", ""#ffa0dc""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightGreen5"": {""BACKGROUND"": ""#92aa9d"", ""TEXT"": ""#000000"", ""INPUT"": ""#fcfff6"", ""SCROLL"": ""#fcfff6"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#000000"", ""#d0dbbd""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightBrown2"": {""BACKGROUND"": ""#a7ad7f"", ""TEXT"": ""#000000"", ""INPUT"": ""#e6d3a8"", ""SCROLL"": ""#e6d3a8"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#5d907d""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightBrown3"": {""BACKGROUND"": ""#efeccb"", ""TEXT"": ""#012f2f"", ""INPUT"": ""#e6d3a8"", ""SCROLL"": ""#e6d3a8"", ""TEXT_INPUT"": ""#012f2f"", ""BUTTON"": (""#FFFFFF"", ""#046380""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightBlue3"": {""BACKGROUND"": ""#a8cfdd"", ""TEXT"": ""#000000"", ""INPUT"": ""#dfedf2"", ""SCROLL"": ""#dfedf2"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#183440""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightBrown4"": {""BACKGROUND"": ""#d7c79e"", ""TEXT"": ""#a35638"", ""INPUT"": ""#9dab86"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#a35638"", ""BUTTON"": (""#FFFFFF"", ""#a35638""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#a35638"", ""#9dab86"", ""#e08f62"", ""#d7c79e""], }, ""DarkTeal"": {""BACKGROUND"": ""#003f5c"", ""TEXT"": ""#fb5b5a"", ""INPUT"": ""#bc4873"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#bc4873"", ""BUTTON"": (""#FFFFFF"", ""#fb5b5a""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#003f5c"", ""#472b62"", ""#bc4873"", ""#fb5b5a""], }, ""DarkPurple"": {""BACKGROUND"": ""#472b62"", ""TEXT"": ""#fb5b5a"", ""INPUT"": ""#bc4873"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#bc4873"", ""BUTTON"": (""#FFFFFF"", ""#472b62""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#003f5c"", ""#472b62"", ""#bc4873"", ""#fb5b5a""], }, ""LightGreen6"": {""BACKGROUND"": ""#eafbea"", ""TEXT"": ""#1f6650"", ""INPUT"": ""#6f9a8d"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#1f6650"", ""BUTTON"": (""#FFFFFF"", ""#1f6650""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#1f6650"", ""#6f9a8d"", ""#ea5e5e"", ""#eafbea""], }, ""DarkGrey2"": {""BACKGROUND"": ""#2b2b28"", ""TEXT"": ""#f8f8f8"", ""INPUT"": ""#f1d6ab"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#f1d6ab"", ""BUTTON"": (""#2b2b28"", ""#e3b04b""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#2b2b28"", ""#e3b04b"", ""#f1d6ab"", ""#f8f8f8""], }, ""LightBrown6"": {""BACKGROUND"": ""#f9b282"", ""TEXT"": ""#8f4426"", ""INPUT"": ""#de6b35"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#8f4426"", ""BUTTON"": (""#FFFFFF"", ""#8f4426""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#8f4426"", ""#de6b35"", ""#64ccda"", ""#f9b282""], }, ""DarkTeal1"": {""BACKGROUND"": ""#396362"", ""TEXT"": ""#ffe7d1"", ""INPUT"": ""#f6c89f"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#f6c89f"", ""BUTTON"": (""#ffe7d1"", ""#4b8e8d""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#396362"", ""#4b8e8d"", ""#f6c89f"", ""#ffe7d1""], }, ""LightBrown7"": {""BACKGROUND"": ""#f6c89f"", ""TEXT"": ""#396362"", ""INPUT"": ""#4b8e8d"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#396362"", ""BUTTON"": (""#FFFFFF"", ""#396362""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#396362"", ""#4b8e8d"", ""#f6c89f"", ""#ffe7d1""], }, ""DarkPurple1"": {""BACKGROUND"": ""#0c093c"", ""TEXT"": ""#fad6d6"", ""INPUT"": ""#eea5f6"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#eea5f6"", ""BUTTON"": (""#FFFFFF"", ""#df42d1""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#0c093c"", ""#df42d1"", ""#eea5f6"", ""#fad6d6""], }, ""DarkGrey3"": {""BACKGROUND"": ""#211717"", ""TEXT"": ""#dfddc7"", ""INPUT"": ""#f58b54"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#f58b54"", ""BUTTON"": (""#dfddc7"", ""#a34a28""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#211717"", ""#a34a28"", ""#f58b54"", ""#dfddc7""], }, ""LightBrown8"": {""BACKGROUND"": ""#dfddc7"", ""TEXT"": ""#211717"", ""INPUT"": ""#a34a28"", ""TEXT_INPUT"": ""#dfddc7"", ""SCROLL"": ""#211717"", ""BUTTON"": (""#dfddc7"", ""#a34a28""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#211717"", ""#a34a28"", ""#f58b54"", ""#dfddc7""], }, ""DarkBlue4"": {""BACKGROUND"": ""#494ca2"", ""TEXT"": ""#e3e7f1"", ""INPUT"": ""#c6cbef"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#c6cbef"", ""BUTTON"": (""#FFFFFF"", ""#8186d5""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#494ca2"", ""#8186d5"", ""#c6cbef"", ""#e3e7f1""], }, ""LightBlue4"": {""BACKGROUND"": ""#5c94bd"", ""TEXT"": ""#470938"", ""INPUT"": ""#1a3e59"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#470938"", ""BUTTON"": (""#FFFFFF"", ""#470938""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#470938"", ""#1a3e59"", ""#5c94bd"", ""#f2d6eb""], }, ""DarkTeal2"": {""BACKGROUND"": ""#394a6d"", ""TEXT"": ""#c0ffb3"", ""INPUT"": ""#52de97"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#52de97"", ""BUTTON"": (""#c0ffb3"", ""#394a6d""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#394a6d"", ""#3c9d9b"", ""#52de97"", ""#c0ffb3""], }, ""DarkTeal3"": {""BACKGROUND"": ""#3c9d9b"", ""TEXT"": ""#c0ffb3"", ""INPUT"": ""#52de97"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#52de97"", ""BUTTON"": (""#c0ffb3"", ""#394a6d""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#394a6d"", ""#3c9d9b"", ""#52de97"", ""#c0ffb3""], }, ""DarkPurple5"": {""BACKGROUND"": ""#730068"", ""TEXT"": ""#f6f078"", ""INPUT"": ""#01d28e"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#01d28e"", ""BUTTON"": (""#f6f078"", ""#730068""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#730068"", ""#434982"", ""#01d28e"", ""#f6f078""], }, ""DarkPurple2"": {""BACKGROUND"": ""#202060"", ""TEXT"": ""#b030b0"", ""INPUT"": ""#602080"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#602080"", ""BUTTON"": (""#FFFFFF"", ""#202040""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#202040"", ""#202060"", ""#602080"", ""#b030b0""], }, ""DarkBlue5"": {""BACKGROUND"": ""#000272"", ""TEXT"": ""#ff6363"", ""INPUT"": ""#a32f80"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#a32f80"", ""BUTTON"": (""#FFFFFF"", ""#341677""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#000272"", ""#341677"", ""#a32f80"", ""#ff6363""], }, ""LightGrey2"": {""BACKGROUND"": ""#f6f6f6"", ""TEXT"": ""#420000"", ""INPUT"": ""#d4d7dd"", ""TEXT_INPUT"": ""#420000"", ""SCROLL"": ""#420000"", ""BUTTON"": (""#420000"", ""#d4d7dd""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#420000"", ""#d4d7dd"", ""#eae9e9"", ""#f6f6f6""], }, ""LightGrey3"": {""BACKGROUND"": ""#eae9e9"", ""TEXT"": ""#420000"", ""INPUT"": ""#d4d7dd"", ""TEXT_INPUT"": ""#420000"", ""SCROLL"": ""#420000"", ""BUTTON"": (""#420000"", ""#d4d7dd""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#420000"", ""#d4d7dd"", ""#eae9e9"", ""#f6f6f6""], }, ""DarkBlue6"": {""BACKGROUND"": ""#01024e"", ""TEXT"": ""#ff6464"", ""INPUT"": ""#8b4367"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#8b4367"", ""BUTTON"": (""#FFFFFF"", ""#543864""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#01024e"", ""#543864"", ""#8b4367"", ""#ff6464""], }, ""DarkBlue7"": {""BACKGROUND"": ""#241663"", ""TEXT"": ""#eae7af"", ""INPUT"": ""#a72693"", ""TEXT_INPUT"": ""#eae7af"", ""SCROLL"": ""#a72693"", ""BUTTON"": (""#eae7af"", ""#160f30""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#160f30"", ""#241663"", ""#a72693"", ""#eae7af""], }, ""LightBrown9"": {""BACKGROUND"": ""#f6d365"", ""TEXT"": ""#3a1f5d"", ""INPUT"": ""#c83660"", ""TEXT_INPUT"": ""#f6d365"", ""SCROLL"": ""#3a1f5d"", ""BUTTON"": (""#f6d365"", ""#c83660""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#3a1f5d"", ""#c83660"", ""#e15249"", ""#f6d365""], }, ""DarkPurple3"": {""BACKGROUND"": ""#6e2142"", ""TEXT"": ""#ffd692"", ""INPUT"": ""#e16363"", ""TEXT_INPUT"": ""#ffd692"", ""SCROLL"": ""#e16363"", ""BUTTON"": (""#ffd692"", ""#943855""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#6e2142"", ""#943855"", ""#e16363"", ""#ffd692""], }, ""LightBrown10"": {""BACKGROUND"": ""#ffd692"", ""TEXT"": ""#6e2142"", ""INPUT"": ""#943855"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#6e2142"", ""BUTTON"": (""#FFFFFF"", ""#6e2142""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#6e2142"", ""#943855"", ""#e16363"", ""#ffd692""], }, ""DarkPurple4"": {""BACKGROUND"": ""#200f21"", ""TEXT"": ""#f638dc"", ""INPUT"": ""#5a3d5c"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#5a3d5c"", ""BUTTON"": (""#FFFFFF"", ""#382039""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#200f21"", ""#382039"", ""#5a3d5c"", ""#f638dc""], }, ""LightBlue5"": {""BACKGROUND"": ""#b2fcff"", ""TEXT"": ""#3e64ff"", ""INPUT"": ""#5edfff"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#3e64ff"", ""BUTTON"": (""#FFFFFF"", ""#3e64ff""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#3e64ff"", ""#5edfff"", ""#b2fcff"", ""#ecfcff""], }, ""DarkTeal4"": {""BACKGROUND"": ""#464159"", ""TEXT"": ""#c7f0db"", ""INPUT"": ""#8bbabb"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#8bbabb"", ""BUTTON"": (""#FFFFFF"", ""#6c7b95""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#464159"", ""#6c7b95"", ""#8bbabb"", ""#c7f0db""], }, ""LightTeal"": {""BACKGROUND"": ""#c7f0db"", ""TEXT"": ""#464159"", ""INPUT"": ""#6c7b95"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#464159"", ""BUTTON"": (""#FFFFFF"", ""#464159""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#464159"", ""#6c7b95"", ""#8bbabb"", ""#c7f0db""], }, ""DarkTeal5"": {""BACKGROUND"": ""#8bbabb"", ""TEXT"": ""#464159"", ""INPUT"": ""#6c7b95"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#464159"", ""BUTTON"": (""#c7f0db"", ""#6c7b95""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#464159"", ""#6c7b95"", ""#8bbabb"", ""#c7f0db""], }, ""LightGrey4"": {""BACKGROUND"": ""#faf5ef"", ""TEXT"": ""#672f2f"", ""INPUT"": ""#99b19c"", ""TEXT_INPUT"": ""#672f2f"", ""SCROLL"": ""#672f2f"", ""BUTTON"": (""#672f2f"", ""#99b19c""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#672f2f"", ""#99b19c"", ""#d7d1c9"", ""#faf5ef""], }, ""LightGreen7"": {""BACKGROUND"": ""#99b19c"", ""TEXT"": ""#faf5ef"", ""INPUT"": ""#d7d1c9"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#d7d1c9"", ""BUTTON"": (""#FFFFFF"", ""#99b19c""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#672f2f"", ""#99b19c"", ""#d7d1c9"", ""#faf5ef""], }, ""LightGrey5"": {""BACKGROUND"": ""#d7d1c9"", ""TEXT"": ""#672f2f"", ""INPUT"": ""#99b19c"", ""TEXT_INPUT"": ""#672f2f"", ""SCROLL"": ""#672f2f"", ""BUTTON"": (""#FFFFFF"", ""#672f2f""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#672f2f"", ""#99b19c"", ""#d7d1c9"", ""#faf5ef""], }, ""DarkBrown3"": {""BACKGROUND"": ""#a0855b"", ""TEXT"": ""#f9f6f2"", ""INPUT"": ""#f1d6ab"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#f1d6ab"", ""BUTTON"": (""#FFFFFF"", ""#38470b""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#38470b"", ""#a0855b"", ""#f1d6ab"", ""#f9f6f2""], }, ""LightBrown11"": {""BACKGROUND"": ""#f1d6ab"", ""TEXT"": ""#38470b"", ""INPUT"": ""#a0855b"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#38470b"", ""BUTTON"": (""#f9f6f2"", ""#a0855b""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#38470b"", ""#a0855b"", ""#f1d6ab"", ""#f9f6f2""], }, ""DarkRed"": {""BACKGROUND"": ""#83142c"", ""TEXT"": ""#f9d276"", ""INPUT"": ""#ad1d45"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#ad1d45"", ""BUTTON"": (""#f9d276"", ""#ad1d45""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#44000d"", ""#83142c"", ""#ad1d45"", ""#f9d276""], }, ""DarkTeal6"": {""BACKGROUND"": ""#204969"", ""TEXT"": ""#fff7f7"", ""INPUT"": ""#dadada"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#dadada"", ""BUTTON"": (""#000000"", ""#fff7f7""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#204969"", ""#08ffc8"", ""#dadada"", ""#fff7f7""], }, ""DarkBrown4"": {""BACKGROUND"": ""#252525"", ""TEXT"": ""#ff0000"", ""INPUT"": ""#af0404"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#af0404"", ""BUTTON"": (""#FFFFFF"", ""#252525""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#252525"", ""#414141"", ""#af0404"", ""#ff0000""], }, ""LightYellow"": {""BACKGROUND"": ""#f4ff61"", ""TEXT"": ""#27aa80"", ""INPUT"": ""#32ff6a"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#27aa80"", ""BUTTON"": (""#f4ff61"", ""#27aa80""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#27aa80"", ""#32ff6a"", ""#a8ff3e"", ""#f4ff61""], }, ""DarkGreen1"": {""BACKGROUND"": ""#2b580c"", ""TEXT"": ""#fdef96"", ""INPUT"": ""#f7b71d"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#f7b71d"", ""BUTTON"": (""#fdef96"", ""#2b580c""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#2b580c"", ""#afa939"", ""#f7b71d"", ""#fdef96""], }, ""LightGreen8"": {""BACKGROUND"": ""#c8dad3"", ""TEXT"": ""#63707e"", ""INPUT"": ""#93b5b3"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#63707e"", ""BUTTON"": (""#FFFFFF"", ""#63707e""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#63707e"", ""#93b5b3"", ""#c8dad3"", ""#f2f6f5""], }, ""DarkTeal7"": {""BACKGROUND"": ""#248ea9"", ""TEXT"": ""#fafdcb"", ""INPUT"": ""#aee7e8"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#aee7e8"", ""BUTTON"": (""#000000"", ""#fafdcb""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#248ea9"", ""#28c3d4"", ""#aee7e8"", ""#fafdcb""], }, ""DarkBlue8"": {""BACKGROUND"": ""#454d66"", ""TEXT"": ""#d9d872"", ""INPUT"": ""#58b368"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#58b368"", ""BUTTON"": (""#000000"", ""#009975""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#009975"", ""#454d66"", ""#58b368"", ""#d9d872""], }, ""DarkBlue9"": {""BACKGROUND"": ""#263859"", ""TEXT"": ""#ff6768"", ""INPUT"": ""#6b778d"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#6b778d"", ""BUTTON"": (""#ff6768"", ""#263859""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#17223b"", ""#263859"", ""#6b778d"", ""#ff6768""], }, ""DarkBlue10"": {""BACKGROUND"": ""#0028ff"", ""TEXT"": ""#f1f4df"", ""INPUT"": ""#10eaf0"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#10eaf0"", ""BUTTON"": (""#f1f4df"", ""#24009c""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#24009c"", ""#0028ff"", ""#10eaf0"", ""#f1f4df""], }, ""DarkBlue11"": {""BACKGROUND"": ""#6384b3"", ""TEXT"": ""#e6f0b6"", ""INPUT"": ""#b8e9c0"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#b8e9c0"", ""BUTTON"": (""#e6f0b6"", ""#684949""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#684949"", ""#6384b3"", ""#b8e9c0"", ""#e6f0b6""], }, ""DarkTeal8"": {""BACKGROUND"": ""#71a0a5"", ""TEXT"": ""#212121"", ""INPUT"": ""#665c84"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#212121"", ""BUTTON"": (""#fab95b"", ""#665c84""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#212121"", ""#665c84"", ""#71a0a5"", ""#fab95b""], }, ""DarkRed1"": {""BACKGROUND"": ""#c10000"", ""TEXT"": ""#eeeeee"", ""INPUT"": ""#dedede"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#dedede"", ""BUTTON"": (""#c10000"", ""#eeeeee""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#c10000"", ""#ff4949"", ""#dedede"", ""#eeeeee""], }, ""LightBrown5"": {""BACKGROUND"": ""#fff591"", ""TEXT"": ""#e41749"", ""INPUT"": ""#f5587b"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#e41749"", ""BUTTON"": (""#fff591"", ""#e41749""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#e41749"", ""#f5587b"", ""#ff8a5c"", ""#fff591""], }, ""LightGreen9"": {""BACKGROUND"": ""#f1edb3"", ""TEXT"": ""#3b503d"", ""INPUT"": ""#4a746e"", ""TEXT_INPUT"": ""#f1edb3"", ""SCROLL"": ""#3b503d"", ""BUTTON"": (""#f1edb3"", ""#3b503d""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#3b503d"", ""#4a746e"", ""#c8cf94"", ""#f1edb3""], ""DESCRIPTION"": [""Green"", ""Turquoise"", ""Yellow""], }, ""DarkGreen2"": {""BACKGROUND"": ""#3b503d"", ""TEXT"": ""#f1edb3"", ""INPUT"": ""#c8cf94"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#c8cf94"", ""BUTTON"": (""#f1edb3"", ""#3b503d""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#3b503d"", ""#4a746e"", ""#c8cf94"", ""#f1edb3""], ""DESCRIPTION"": [""Green"", ""Turquoise"", ""Yellow""], }, ""LightGray1"": {""BACKGROUND"": ""#f2f2f2"", ""TEXT"": ""#222831"", ""INPUT"": ""#393e46"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#222831"", ""BUTTON"": (""#f2f2f2"", ""#222831""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#222831"", ""#393e46"", ""#f96d00"", ""#f2f2f2""], ""DESCRIPTION"": [""#000000"", ""Grey"", ""Orange"", ""Grey"", ""Autumn""], }, ""DarkGrey4"": {""BACKGROUND"": ""#52524e"", ""TEXT"": ""#e9e9e5"", ""INPUT"": ""#d4d6c8"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#d4d6c8"", ""BUTTON"": (""#FFFFFF"", ""#9a9b94""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#52524e"", ""#9a9b94"", ""#d4d6c8"", ""#e9e9e5""], ""DESCRIPTION"": [""Grey"", ""Pastel"", ""Winter""], }, ""DarkBlue12"": {""BACKGROUND"": ""#324e7b"", ""TEXT"": ""#f8f8f8"", ""INPUT"": ""#86a6df"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#86a6df"", ""BUTTON"": (""#FFFFFF"", ""#5068a9""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#324e7b"", ""#5068a9"", ""#86a6df"", ""#f8f8f8""], ""DESCRIPTION"": [""Blue"", ""Grey"", ""Cold"", ""Winter""], }, ""DarkPurple6"": {""BACKGROUND"": ""#070739"", ""TEXT"": ""#e1e099"", ""INPUT"": ""#c327ab"", ""TEXT_INPUT"": ""#e1e099"", ""SCROLL"": ""#c327ab"", ""BUTTON"": (""#e1e099"", ""#521477""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#070739"", ""#521477"", ""#c327ab"", ""#e1e099""], ""DESCRIPTION"": [""#000000"", ""Purple"", ""Yellow"", ""Dark""], }, ""DarkPurple7"": {""BACKGROUND"": ""#191930"", ""TEXT"": ""#B1B7C5"", ""INPUT"": ""#232B5C"", ""TEXT_INPUT"": ""#D0E3E7"", ""SCROLL"": ""#B1B7C5"", ""BUTTON"": (""#272D38"", ""#B1B7C5""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkBlue13"": {""BACKGROUND"": ""#203562"", ""TEXT"": ""#e3e8f8"", ""INPUT"": ""#c0c5cd"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#c0c5cd"", ""BUTTON"": (""#FFFFFF"", ""#3e588f""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#203562"", ""#3e588f"", ""#c0c5cd"", ""#e3e8f8""], ""DESCRIPTION"": [""Blue"", ""Grey"", ""Wedding"", ""Cold""], }, ""DarkBrown5"": {""BACKGROUND"": ""#3c1b1f"", ""TEXT"": ""#f6e1b5"", ""INPUT"": ""#e2bf81"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#e2bf81"", ""BUTTON"": (""#3c1b1f"", ""#f6e1b5""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#3c1b1f"", ""#b21e4b"", ""#e2bf81"", ""#f6e1b5""], ""DESCRIPTION"": [""Brown"", ""Red"", ""Yellow"", ""Warm""], }, ""DarkGreen3"": {""BACKGROUND"": ""#062121"", ""TEXT"": ""#eeeeee"", ""INPUT"": ""#e4dcad"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#e4dcad"", ""BUTTON"": (""#eeeeee"", ""#181810""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#062121"", ""#181810"", ""#e4dcad"", ""#eeeeee""], ""DESCRIPTION"": [""#000000"", ""#000000"", ""Brown"", ""Grey""], }, ""DarkBlack1"": {""BACKGROUND"": ""#181810"", ""TEXT"": ""#eeeeee"", ""INPUT"": ""#e4dcad"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#e4dcad"", ""BUTTON"": (""#FFFFFF"", ""#062121""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#062121"", ""#181810"", ""#e4dcad"", ""#eeeeee""], ""DESCRIPTION"": [""#000000"", ""#000000"", ""Brown"", ""Grey""], }, ""DarkGrey5"": {""BACKGROUND"": ""#343434"", ""TEXT"": ""#f3f3f3"", ""INPUT"": ""#e9dcbe"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#e9dcbe"", ""BUTTON"": (""#FFFFFF"", ""#8e8b82""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#343434"", ""#8e8b82"", ""#e9dcbe"", ""#f3f3f3""], ""DESCRIPTION"": [""Grey"", ""Brown""], }, ""LightBrown12"": {""BACKGROUND"": ""#8e8b82"", ""TEXT"": ""#f3f3f3"", ""INPUT"": ""#e9dcbe"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#e9dcbe"", ""BUTTON"": (""#f3f3f3"", ""#8e8b82""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#343434"", ""#8e8b82"", ""#e9dcbe"", ""#f3f3f3""], ""DESCRIPTION"": [""Grey"", ""Brown""], }, ""DarkTeal9"": {""BACKGROUND"": ""#13445a"", ""TEXT"": ""#fef4e8"", ""INPUT"": ""#446878"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#446878"", ""BUTTON"": (""#fef4e8"", ""#446878""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#13445a"", ""#970747"", ""#446878"", ""#fef4e8""], ""DESCRIPTION"": [""Red"", ""Grey"", ""Blue"", ""Wedding"", ""Retro""], }, ""DarkBlue14"": {""BACKGROUND"": ""#21273d"", ""TEXT"": ""#f1f6f8"", ""INPUT"": ""#b9d4f1"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#b9d4f1"", ""BUTTON"": (""#FFFFFF"", ""#6a759b""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#21273d"", ""#6a759b"", ""#b9d4f1"", ""#f1f6f8""], ""DESCRIPTION"": [""Blue"", ""#000000"", ""Grey"", ""Cold"", ""Winter""], }, ""LightBlue6"": {""BACKGROUND"": ""#f1f6f8"", ""TEXT"": ""#21273d"", ""INPUT"": ""#6a759b"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#21273d"", ""BUTTON"": (""#f1f6f8"", ""#6a759b""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#21273d"", ""#6a759b"", ""#b9d4f1"", ""#f1f6f8""], ""DESCRIPTION"": [""Blue"", ""#000000"", ""Grey"", ""Cold"", ""Winter""], }, ""DarkGreen4"": {""BACKGROUND"": ""#044343"", ""TEXT"": ""#e4e4e4"", ""INPUT"": ""#045757"", ""TEXT_INPUT"": ""#e4e4e4"", ""SCROLL"": ""#045757"", ""BUTTON"": (""#e4e4e4"", ""#045757""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#222222"", ""#044343"", ""#045757"", ""#e4e4e4""], ""DESCRIPTION"": [""#000000"", ""Turquoise"", ""Grey"", ""Dark""], }, ""DarkGreen5"": {""BACKGROUND"": ""#1b4b36"", ""TEXT"": ""#e0e7f1"", ""INPUT"": ""#aebd77"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#aebd77"", ""BUTTON"": (""#FFFFFF"", ""#538f6a""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#1b4b36"", ""#538f6a"", ""#aebd77"", ""#e0e7f1""], ""DESCRIPTION"": [""Green"", ""Grey""], }, ""DarkTeal10"": {""BACKGROUND"": ""#0d3446"", ""TEXT"": ""#d8dfe2"", ""INPUT"": ""#71adb5"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#71adb5"", ""BUTTON"": (""#FFFFFF"", ""#176d81""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#0d3446"", ""#176d81"", ""#71adb5"", ""#d8dfe2""], ""DESCRIPTION"": [""Grey"", ""Turquoise"", ""Winter"", ""Cold""], }, ""DarkGrey6"": {""BACKGROUND"": ""#3e3e3e"", ""TEXT"": ""#ededed"", ""INPUT"": ""#68868c"", ""TEXT_INPUT"": ""#ededed"", ""SCROLL"": ""#68868c"", ""BUTTON"": (""#FFFFFF"", ""#405559""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#3e3e3e"", ""#405559"", ""#68868c"", ""#ededed""], ""DESCRIPTION"": [""Grey"", ""Turquoise"", ""Winter""], }, ""DarkTeal11"": {""BACKGROUND"": ""#405559"", ""TEXT"": ""#ededed"", ""INPUT"": ""#68868c"", ""TEXT_INPUT"": ""#ededed"", ""SCROLL"": ""#68868c"", ""BUTTON"": (""#ededed"", ""#68868c""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#3e3e3e"", ""#405559"", ""#68868c"", ""#ededed""], ""DESCRIPTION"": [""Grey"", ""Turquoise"", ""Winter""], }, ""LightBlue7"": {""BACKGROUND"": ""#9ed0e0"", ""TEXT"": ""#19483f"", ""INPUT"": ""#5c868e"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#19483f"", ""BUTTON"": (""#FFFFFF"", ""#19483f""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#19483f"", ""#5c868e"", ""#ff6a38"", ""#9ed0e0""], ""DESCRIPTION"": [""Orange"", ""Blue"", ""Turquoise""], }, ""LightGreen10"": {""BACKGROUND"": ""#d8ebb5"", ""TEXT"": ""#205d67"", ""INPUT"": ""#639a67"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#205d67"", ""BUTTON"": (""#d8ebb5"", ""#205d67""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#205d67"", ""#639a67"", ""#d9bf77"", ""#d8ebb5""], ""DESCRIPTION"": [""Blue"", ""Green"", ""Brown"", ""Vintage""], }, ""DarkBlue15"": {""BACKGROUND"": ""#151680"", ""TEXT"": ""#f1fea4"", ""INPUT"": ""#375fc0"", ""TEXT_INPUT"": ""#f1fea4"", ""SCROLL"": ""#375fc0"", ""BUTTON"": (""#f1fea4"", ""#1c44ac""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#151680"", ""#1c44ac"", ""#375fc0"", ""#f1fea4""], ""DESCRIPTION"": [""Blue"", ""Yellow"", ""Cold""], }, ""DarkBlue16"": {""BACKGROUND"": ""#1c44ac"", ""TEXT"": ""#f1fea4"", ""INPUT"": ""#375fc0"", ""TEXT_INPUT"": ""#f1fea4"", ""SCROLL"": ""#375fc0"", ""BUTTON"": (""#f1fea4"", ""#151680""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#151680"", ""#1c44ac"", ""#375fc0"", ""#f1fea4""], ""DESCRIPTION"": [""Blue"", ""Yellow"", ""Cold""], }, ""DarkTeal12"": {""BACKGROUND"": ""#004a7c"", ""TEXT"": ""#fafafa"", ""INPUT"": ""#e8f1f5"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#e8f1f5"", ""BUTTON"": (""#fafafa"", ""#005691""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#004a7c"", ""#005691"", ""#e8f1f5"", ""#fafafa""], ""DESCRIPTION"": [""Grey"", ""Blue"", ""Cold"", ""Winter""], }, ""LightBrown13"": {""BACKGROUND"": ""#ebf5ee"", ""TEXT"": ""#921224"", ""INPUT"": ""#bdc6b8"", ""TEXT_INPUT"": ""#921224"", ""SCROLL"": ""#921224"", ""BUTTON"": (""#FFFFFF"", ""#921224""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#921224"", ""#bdc6b8"", ""#bce0da"", ""#ebf5ee""], ""DESCRIPTION"": [""Red"", ""Blue"", ""Grey"", ""Vintage"", ""Wedding""], }, ""DarkBlue17"": {""BACKGROUND"": ""#21294c"", ""TEXT"": ""#f9f2d7"", ""INPUT"": ""#f2dea8"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#f2dea8"", ""BUTTON"": (""#f9f2d7"", ""#141829""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#141829"", ""#21294c"", ""#f2dea8"", ""#f9f2d7""], ""DESCRIPTION"": [""#000000"", ""Blue"", ""Yellow""], }, ""DarkBrown6"": {""BACKGROUND"": ""#785e4d"", ""TEXT"": ""#f2eee3"", ""INPUT"": ""#baaf92"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#baaf92"", ""BUTTON"": (""#FFFFFF"", ""#785e4d""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#785e4d"", ""#ff8426"", ""#baaf92"", ""#f2eee3""], ""DESCRIPTION"": [""Grey"", ""Brown"", ""Orange"", ""Autumn""], }, ""DarkGreen6"": {""BACKGROUND"": ""#5c715e"", ""TEXT"": ""#f2f9f1"", ""INPUT"": ""#ddeedf"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#ddeedf"", ""BUTTON"": (""#f2f9f1"", ""#5c715e""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#5c715e"", ""#b6cdbd"", ""#ddeedf"", ""#f2f9f1""], ""DESCRIPTION"": [""Grey"", ""Green"", ""Vintage""], }, ""DarkGreen7"": {""BACKGROUND"": ""#0C231E"", ""TEXT"": ""#efbe1c"", ""INPUT"": ""#153C33"", ""TEXT_INPUT"": ""#efbe1c"", ""SCROLL"": ""#153C33"", ""BUTTON"": (""#efbe1c"", ""#153C33""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGrey7"": {""BACKGROUND"": ""#4b586e"", ""TEXT"": ""#dddddd"", ""INPUT"": ""#574e6d"", ""TEXT_INPUT"": ""#dddddd"", ""SCROLL"": ""#574e6d"", ""BUTTON"": (""#dddddd"", ""#43405d""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#43405d"", ""#4b586e"", ""#574e6d"", ""#dddddd""], ""DESCRIPTION"": [""Grey"", ""Winter"", ""Cold""], }, ""DarkRed2"": {""BACKGROUND"": ""#ab1212"", ""TEXT"": ""#f6e4b5"", ""INPUT"": ""#cd3131"", ""TEXT_INPUT"": ""#f6e4b5"", ""SCROLL"": ""#cd3131"", ""BUTTON"": (""#f6e4b5"", ""#ab1212""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#ab1212"", ""#1fad9f"", ""#cd3131"", ""#f6e4b5""], ""DESCRIPTION"": [""Turquoise"", ""Red"", ""Yellow""], }, ""LightGrey6"": {""BACKGROUND"": ""#e3e3e3"", ""TEXT"": ""#233142"", ""INPUT"": ""#455d7a"", ""TEXT_INPUT"": ""#e3e3e3"", ""SCROLL"": ""#233142"", ""BUTTON"": (""#e3e3e3"", ""#455d7a""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#233142"", ""#455d7a"", ""#f95959"", ""#e3e3e3""], ""DESCRIPTION"": [""#000000"", ""Blue"", ""Red"", ""Grey""], }, ""HotDogStand"": {""BACKGROUND"": ""red"", ""TEXT"": ""yellow"", ""INPUT"": ""yellow"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""yellow"", ""BUTTON"": (""red"", ""yellow""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGrey8"": {""BACKGROUND"": ""#19232D"", ""TEXT"": ""#ffffff"", ""INPUT"": ""#32414B"", ""TEXT_INPUT"": ""#ffffff"", ""SCROLL"": ""#505F69"", ""BUTTON"": (""#ffffff"", ""#32414B""), ""PROGRESS"": (""#505F69"", ""#32414B""), ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGrey9"": {""BACKGROUND"": ""#36393F"", ""TEXT"": ""#DCDDDE"", ""INPUT"": ""#40444B"", ""TEXT_INPUT"": ""#ffffff"", ""SCROLL"": ""#202225"", ""BUTTON"": (""#202225"", ""#B9BBBE""), ""PROGRESS"": (""#202225"", ""#40444B""), ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGrey10"": {""BACKGROUND"": ""#1c1e23"", ""TEXT"": ""#cccdcf"", ""INPUT"": ""#272a31"", ""TEXT_INPUT"": ""#8b9fde"", ""SCROLL"": ""#313641"", ""BUTTON"": (""#f5f5f6"", ""#2e3d5a""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGrey11"": {""BACKGROUND"": ""#1c1e23"", ""TEXT"": ""#cccdcf"", ""INPUT"": ""#313641"", ""TEXT_INPUT"": ""#cccdcf"", ""SCROLL"": ""#313641"", ""BUTTON"": (""#f5f5f6"", ""#313641""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGrey12"": {""BACKGROUND"": ""#1c1e23"", ""TEXT"": ""#8b9fde"", ""INPUT"": ""#313641"", ""TEXT_INPUT"": ""#8b9fde"", ""SCROLL"": ""#313641"", ""BUTTON"": (""#cccdcf"", ""#2e3d5a""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGrey13"": {""BACKGROUND"": ""#1c1e23"", ""TEXT"": ""#cccdcf"", ""INPUT"": ""#272a31"", ""TEXT_INPUT"": ""#cccdcf"", ""SCROLL"": ""#313641"", ""BUTTON"": (""#8b9fde"", ""#313641""), ""PROGRESS"": (""#cccdcf"", ""#272a31""), ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGrey14"": {""BACKGROUND"": ""#24292e"", ""TEXT"": ""#fafbfc"", ""INPUT"": ""#1d2125"", ""TEXT_INPUT"": ""#fafbfc"", ""SCROLL"": ""#1d2125"", ""BUTTON"": (""#fafbfc"", ""#155398""), ""PROGRESS"": (""#155398"", ""#1d2125""), ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkBrown7"": {""BACKGROUND"": ""#2c2417"", ""TEXT"": ""#baa379"", ""INPUT"": ""#baa379"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#392e1c"", ""BUTTON"": (""#000000"", ""#baa379""), ""PROGRESS"": (""#baa379"", ""#453923""), ""BORDER"": 1, ""SLIDER_DEPTH"": 1, ""PROGRESS_DEPTH"": 0, }, ""Python"": {""BACKGROUND"": ""#3d7aab"", ""TEXT"": ""#ffde56"", ""INPUT"": ""#295273"", ""TEXT_INPUT"": ""#ffde56"", ""SCROLL"": ""#295273"", ""BUTTON"": (""#ffde56"", ""#295273""), ""PROGRESS"": (""#ffde56"", ""#295273""), ""BORDER"": 1, ""SLIDER_DEPTH"": 1, ""PROGRESS_DEPTH"": 0, }, } "," :param icon: Can be either a filename or Base64 value. For Windows if filename, it MUST be ICO format. For Linux, must NOT be ICO. Most portable is to use a Base64 of a PNG file. This works universally across all OS's :type icon: bytes | str :param button_color: Color of the button (text, background) :type button_color: (str, str) or str :param element_size: element size (width, height) in characters :type element_size: (int, int) :param button_element_size: Size of button :type button_element_size: (int, int) :param margins: (left/right, top/bottom) tkinter margins around outsize. Amount of pixels to leave inside the window's frame around the edges before your elements are shown. :type margins: (int, int) :param element_padding: Default amount of padding to put around elements in window (left/right, top/bottom) or ((left, right), (top, bottom)) :type element_padding: (int, int) or ((int, int),(int,int)) :param auto_size_text: True if the Widget should be shrunk to exactly fit the number of chars to show :type auto_size_text: bool :param auto_size_buttons: True if Buttons in this Window should be sized to exactly fit the text on this. :type auto_size_buttons: (bool) :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike :type font: (str or (str, int[, str]) or None) :param border_width: width of border around element :type border_width: (int) :param slider_border_width: Width of the border around sliders :type slider_border_width: (int) :param slider_relief: Type of relief to use for sliders :type slider_relief: (str) :param slider_orientation: ??? :type slider_orientation: ??? :param autoclose_time: ??? :type autoclose_time: ??? :param message_box_line_width: ??? :type message_box_line_width: ??? :param progress_meter_border_depth: ??? :type progress_meter_border_depth: ??? :param progress_meter_style: You can no longer set a progress bar style. All ttk styles must be the same for the window :type progress_meter_style: ??? :param progress_meter_relief: :type progress_meter_relief: ??? :param progress_meter_color: ??? :type progress_meter_color: ??? :param progress_meter_size: ??? :type progress_meter_size: ??? :param text_justification: Default text justification for all Text Elements in window :type text_justification: 'left' | 'right' | 'center' :param background_color: color of background :type background_color: (str) :param element_background_color: element background color :type element_background_color: (str) :param text_element_background_color: text element background color :type text_element_background_color: (str) :param input_elements_background_color: Default color to use for the background of input elements :type input_elements_background_color: (str) :param input_text_color: Default color to use for the text for Input elements :type input_text_color: (str) :param scrollbar_color: Default color to use for the slider trough :type scrollbar_color: (str) :param text_color: color of the text :type text_color: (str) :param element_text_color: Default color to use for Text elements :type element_text_color: (str) :param debug_win_size: window size :type debug_win_size: (int, int) :param window_location: Default location to place windows. Not setting will center windows on the display :type window_location: (int, int) | None :param error_button_color: (Default = (None)) :type error_button_color: ??? :param tooltip_time: time in milliseconds to wait before showing a tooltip. Default is 400ms :type tooltip_time: (int) :param tooltip_font: font to use for all tooltips :type tooltip_font: str or Tuple[str, int] or Tuple[str, int, str] :param use_ttk_buttons: if True will cause all buttons to be ttk buttons :type use_ttk_buttons: (bool) :param ttk_theme: Theme to use with ttk widgets. Choices (on Windows) include - 'default', 'winnative', 'clam', 'alt', 'classic', 'vista', 'xpnative' :type ttk_theme: (str) :param suppress_error_popups: If True then error popups will not be shown if generated internally to PySimpleGUI :type suppress_error_popups: (bool) :param suppress_raise_key_errors: If True then key errors won't be raised (you'll still get popup error) :type suppress_raise_key_errors: (bool) :param suppress_key_guessing: If True then key errors won't try and find closest matches for you :type suppress_key_guessing: (bool) :param warn_button_key_duplicates: If True then duplicate Button Keys generate warnings (not recommended as they're expected) :type warn_button_key_duplicates: (bool) :param enable_treeview_869_patch: If True, then will use the treeview color patch for tk 8.6.9 :type enable_treeview_869_patch: (bool) :param enable_mac_notitlebar_patch: If True then Windows with no titlebar use an alternative technique when tkinter version < 8.6.10 :type enable_mac_notitlebar_patch: (bool) :param use_custom_titlebar: If True then a custom titlebar is used instead of the normal system titlebar :type use_custom_titlebar: (bool) :param titlebar_background_color: If custom titlebar indicated by use_custom_titlebar, then use this as background color :type titlebar_background_color: str | None :param titlebar_text_color: If custom titlebar indicated by use_custom_titlebar, then use this as text color :type titlebar_text_color: str | None :param titlebar_font: If custom titlebar indicated by use_custom_titlebar, then use this as title font :type titlebar_font: (str or (str, int[, str]) or None) | None :param titlebar_icon: If custom titlebar indicated by use_custom_titlebar, then use this as the icon (file or base64 bytes) :type titlebar_icon: bytes | str :param user_settings_path: default path for user_settings API calls. Expanded with os.path.expanduser so can contain ~ to represent user :type user_settings_path: (str) :param pysimplegui_settings_path: default path for the global PySimpleGUI user_settings :type pysimplegui_settings_path: (str) :param pysimplegui_settings_filename: default filename for the global PySimpleGUI user_settings :type pysimplegui_settings_filename: (str) :param keep_on_top: If True then all windows will automatically be set to keep_on_top=True :type keep_on_top: (bool) :param dpi_awareness: If True then will turn on DPI awareness (Windows only at the moment) :type dpi_awareness: (bool) :param scaling: Sets the default scaling for all windows including popups, etc. :type scaling: (float) :param disable_modal_windows: If True then all windows, including popups, will not be modal windows :type disable_modal_windows: (bool) :param tooltip_offset: Offset to use for tooltips as a tuple. These values will be added to the mouse location when the widget was entered. :type tooltip_offset: ((None, None) | (int, int)) :return: None :rtype: None ",889,4824,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_options(icon=None, button_color=None, element_size=(None, None), button_element_size=(None, None), margins=(None, None), element_padding=(None, None), auto_size_text=None, auto_size_buttons=None, font=None, border_width=None, slider_border_width=None, slider_relief=None, slider_orientation=None, autoclose_time=None, message_box_line_width=None, progress_meter_border_depth=None, progress_meter_style=None, progress_meter_relief=None, progress_meter_color=None, progress_meter_size=None, text_justification=None, background_color=None, element_background_color=None, text_element_background_color=None, input_elements_background_color=None, input_text_color=None, scrollbar_color=None, text_color=None, element_text_color=None, debug_win_size=(None, None), window_location=(None, None), error_button_color=(None, None), tooltip_time=None, tooltip_font=None, use_ttk_buttons=None, ttk_theme=None, suppress_error_popups=None, suppress_raise_key_errors=None, suppress_key_guessing=None,warn_button_key_duplicates=False, enable_treeview_869_patch=None, enable_mac_notitlebar_patch=None, use_custom_titlebar=None, titlebar_background_color=None, titlebar_text_color=None, titlebar_font=None, titlebar_icon=None, user_settings_path=None, pysimplegui_settings_path=None, pysimplegui_settings_filename=None, keep_on_top=None, dpi_awareness=None, scaling=None, disable_modal_windows=None, tooltip_offset=(None, None)): global DEFAULT_ELEMENT_SIZE global DEFAULT_BUTTON_ELEMENT_SIZE global DEFAULT_MARGINS # Margins for each LEFT/RIGHT margin is first term global DEFAULT_ELEMENT_PADDING # Padding between elements (row, col) in pixels global DEFAULT_AUTOSIZE_TEXT global DEFAULT_AUTOSIZE_BUTTONS global DEFAULT_FONT global DEFAULT_BORDER_WIDTH global DEFAULT_AUTOCLOSE_TIME global DEFAULT_BUTTON_COLOR global MESSAGE_BOX_LINE_WIDTH global DEFAULT_PROGRESS_BAR_BORDER_WIDTH global DEFAULT_PROGRESS_BAR_STYLE global DEFAULT_PROGRESS_BAR_RELIEF global DEFAULT_PROGRESS_BAR_COLOR global DEFAULT_PROGRESS_BAR_SIZE global DEFAULT_TEXT_JUSTIFICATION global DEFAULT_DEBUG_WINDOW_SIZE global DEFAULT_SLIDER_BORDER_WIDTH global DEFAULT_SLIDER_RELIEF global DEFAULT_SLIDER_ORIENTATION global DEFAULT_BACKGROUND_COLOR global DEFAULT_INPUT_ELEMENTS_COLOR global DEFAULT_ELEMENT_BACKGROUND_COLOR global DEFAULT_TEXT_ELEMENT_BACKGROUND_COLOR global DEFAULT_SCROLLBAR_COLOR global DEFAULT_TEXT_COLOR global DEFAULT_WINDOW_LOCATION global DEFAULT_ELEMENT_TEXT_COLOR global DEFAULT_INPUT_TEXT_COLOR global DEFAULT_TOOLTIP_TIME global DEFAULT_ERROR_BUTTON_COLOR global DEFAULT_TTK_THEME global USE_TTK_BUTTONS global TOOLTIP_FONT global SUPPRESS_ERROR_POPUPS global SUPPRESS_RAISE_KEY_ERRORS global SUPPRESS_KEY_GUESSING global WARN_DUPLICATE_BUTTON_KEY_ERRORS global ENABLE_TREEVIEW_869_PATCH global ENABLE_MAC_NOTITLEBAR_PATCH global USE_CUSTOM_TITLEBAR global CUSTOM_TITLEBAR_BACKGROUND_COLOR global CUSTOM_TITLEBAR_TEXT_COLOR global CUSTOM_TITLEBAR_ICON global CUSTOM_TITLEBAR_FONT global DEFAULT_USER_SETTINGS_PATH global DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH global DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME global DEFAULT_KEEP_ON_TOP global DEFAULT_SCALING global DEFAULT_MODAL_WINDOWS_ENABLED global DEFAULT_TOOLTIP_OFFSET global _pysimplegui_user_settings # global _my_windows if icon: Window._user_defined_icon = icon # _my_windows._user_defined_icon = icon if button_color != None: if button_color == COLOR_SYSTEM_DEFAULT: DEFAULT_BUTTON_COLOR = (COLOR_SYSTEM_DEFAULT, COLOR_SYSTEM_DEFAULT) else: DEFAULT_BUTTON_COLOR = button_color if element_size != (None, None): DEFAULT_ELEMENT_SIZE = element_size if button_element_size != (None, None): DEFAULT_BUTTON_ELEMENT_SIZE = button_element_size if margins != (None, None): DEFAULT_MARGINS = margins if element_padding != (None, None): DEFAULT_ELEMENT_PADDING = element_padding if auto_size_text != None: DEFAULT_AUTOSIZE_TEXT = auto_size_text if auto_size_buttons != None: DEFAULT_AUTOSIZE_BUTTONS = auto_size_buttons if font != None: DEFAULT_FONT = font if border_width != None: DEFAULT_BORDER_WIDTH = border_width if autoclose_time != None: DEFAULT_AUTOCLOSE_TIME = autoclose_time if message_box_line_width != None: MESSAGE_BOX_LINE_WIDTH = message_box_line_width if progress_meter_border_depth != None: DEFAULT_PROGRESS_BAR_BORDER_WIDTH = progress_meter_border_depth if progress_meter_style != None: warnings.warn('You can no longer set a progress bar style. All ttk styles must be the same for the window', UserWarning) # DEFAULT_PROGRESS_BAR_STYLE = progress_meter_style if progress_meter_relief != None: DEFAULT_PROGRESS_BAR_RELIEF = progress_meter_relief if progress_meter_color != None: DEFAULT_PROGRESS_BAR_COLOR = progress_meter_color if progress_meter_size != None: DEFAULT_PROGRESS_BAR_SIZE = progress_meter_size if slider_border_width != None: DEFAULT_SLIDER_BORDER_WIDTH = slider_border_width if slider_orientation != None: DEFAULT_SLIDER_ORIENTATION = slider_orientation if slider_relief != None: DEFAULT_SLIDER_RELIEF = slider_relief if text_justification != None: DEFAULT_TEXT_JUSTIFICATION = text_justification if background_color != None: DEFAULT_BACKGROUND_COLOR = background_color if text_element_background_color != None: DEFAULT_TEXT_ELEMENT_BACKGROUND_COLOR = text_element_background_color if input_elements_background_color != None: DEFAULT_INPUT_ELEMENTS_COLOR = input_elements_background_color if element_background_color != None: DEFAULT_ELEMENT_BACKGROUND_COLOR = element_background_color if window_location != (None, None): DEFAULT_WINDOW_LOCATION = window_location if debug_win_size != (None, None): DEFAULT_DEBUG_WINDOW_SIZE = debug_win_size if text_color != None: DEFAULT_TEXT_COLOR = text_color if scrollbar_color != None: DEFAULT_SCROLLBAR_COLOR = scrollbar_color if element_text_color != None: DEFAULT_ELEMENT_TEXT_COLOR = element_text_color if input_text_color is not None: DEFAULT_INPUT_TEXT_COLOR = input_text_color if tooltip_time is not None: DEFAULT_TOOLTIP_TIME = tooltip_time if error_button_color != (None, None): DEFAULT_ERROR_BUTTON_COLOR = error_button_color if ttk_theme is not None: DEFAULT_TTK_THEME = ttk_theme if use_ttk_buttons is not None: USE_TTK_BUTTONS = use_ttk_buttons if tooltip_font is not None: TOOLTIP_FONT = tooltip_font if suppress_error_popups is not None: SUPPRESS_ERROR_POPUPS = suppress_error_popups if suppress_raise_key_errors is not None: SUPPRESS_RAISE_KEY_ERRORS = suppress_raise_key_errors if suppress_key_guessing is not None: SUPPRESS_KEY_GUESSING = suppress_key_guessing if warn_button_key_duplicates is not None: WARN_DUPLICATE_BUTTON_KEY_ERRORS = warn_button_key_duplicates if enable_treeview_869_patch is not None: ENABLE_TREEVIEW_869_PATCH = enable_treeview_869_patch if enable_mac_notitlebar_patch is not None: ENABLE_MAC_NOTITLEBAR_PATCH = enable_mac_notitlebar_patch if use_custom_titlebar is not None: USE_CUSTOM_TITLEBAR = use_custom_titlebar if titlebar_background_color is not None: CUSTOM_TITLEBAR_BACKGROUND_COLOR = titlebar_background_color if titlebar_text_color is not None: CUSTOM_TITLEBAR_TEXT_COLOR = titlebar_text_color if titlebar_font is not None: CUSTOM_TITLEBAR_FONT = titlebar_font if titlebar_icon is not None: CUSTOM_TITLEBAR_ICON = titlebar_icon if user_settings_path is not None: DEFAULT_USER_SETTINGS_PATH = user_settings_path if pysimplegui_settings_path is not None: DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH = pysimplegui_settings_path if pysimplegui_settings_filename is not None: DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME = pysimplegui_settings_filename if pysimplegui_settings_filename is not None or pysimplegui_settings_filename is not None: _pysimplegui_user_settings = UserSettings(filename=DEFAULT_USER_SETTINGS_PYSIMPLEGUI_FILENAME, path=DEFAULT_USER_SETTINGS_PYSIMPLEGUI_PATH) if keep_on_top is not None: DEFAULT_KEEP_ON_TOP = keep_on_top if dpi_awareness is True: if running_windows(): if platform.release() == ""7"": ctypes.windll.user32.SetProcessDPIAware() elif platform.release() == ""8"" or platform.release() == ""10"": ctypes.windll.shcore.SetProcessDpiAwareness(1) if scaling is not None: DEFAULT_SCALING = scaling if disable_modal_windows is not None: DEFAULT_MODAL_WINDOWS_ENABLED = not disable_modal_windows if tooltip_offset != (None, None): DEFAULT_TOOLTIP_OFFSET = tooltip_offset return True # ----------------------------------------------------------------- # # .########.##.....##.########.##.....##.########..######. # ....##....##.....##.##.......###...###.##.......##....## # ....##....##.....##.##.......####.####.##.......##...... # ....##....#########.######...##.###.##.######....######. # ....##....##.....##.##.......##.....##.##.............## # ....##....##.....##.##.......##.....##.##.......##....## # ....##....##.....##.########.##.....##.########..######. # ----------------------------------------------------------------- # # The official Theme code #################### ChangeLookAndFeel ####################### # Predefined settings that will change the colors and styles # # of the elements. # ############################################################## LOOK_AND_FEEL_TABLE = { ""SystemDefault"": {""BACKGROUND"": COLOR_SYSTEM_DEFAULT, ""TEXT"": COLOR_SYSTEM_DEFAULT, ""INPUT"": COLOR_SYSTEM_DEFAULT, ""TEXT_INPUT"": COLOR_SYSTEM_DEFAULT, ""SCROLL"": COLOR_SYSTEM_DEFAULT, ""BUTTON"": OFFICIAL_PYSIMPLEGUI_BUTTON_COLOR, ""PROGRESS"": COLOR_SYSTEM_DEFAULT, ""BORDER"": 1, ""SLIDER_DEPTH"": 1, ""PROGRESS_DEPTH"": 0, }, ""SystemDefaultForReal"": {""BACKGROUND"": COLOR_SYSTEM_DEFAULT, ""TEXT"": COLOR_SYSTEM_DEFAULT, ""INPUT"": COLOR_SYSTEM_DEFAULT, ""TEXT_INPUT"": COLOR_SYSTEM_DEFAULT, ""SCROLL"": COLOR_SYSTEM_DEFAULT, ""BUTTON"": COLOR_SYSTEM_DEFAULT, ""PROGRESS"": COLOR_SYSTEM_DEFAULT, ""BORDER"": 1, ""SLIDER_DEPTH"": 1, ""PROGRESS_DEPTH"": 0, }, ""SystemDefault1"": {""BACKGROUND"": COLOR_SYSTEM_DEFAULT, ""TEXT"": COLOR_SYSTEM_DEFAULT, ""INPUT"": COLOR_SYSTEM_DEFAULT, ""TEXT_INPUT"": COLOR_SYSTEM_DEFAULT, ""SCROLL"": COLOR_SYSTEM_DEFAULT, ""BUTTON"": COLOR_SYSTEM_DEFAULT, ""PROGRESS"": COLOR_SYSTEM_DEFAULT, ""BORDER"": 1, ""SLIDER_DEPTH"": 1, ""PROGRESS_DEPTH"": 0, }, ""Material1"": {""BACKGROUND"": ""#E3F2FD"", ""TEXT"": ""#000000"", ""INPUT"": ""#86A8FF"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#86A8FF"", ""BUTTON"": (""#FFFFFF"", ""#5079D3""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 0, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""ACCENT1"": ""#FF0266"", ""ACCENT2"": ""#FF5C93"", ""ACCENT3"": ""#C5003C"", }, ""Material2"": {""BACKGROUND"": ""#FAFAFA"", ""TEXT"": ""#000000"", ""INPUT"": ""#004EA1"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#5EA7FF"", ""BUTTON"": (""#FFFFFF"", ""#0079D3""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 0, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""ACCENT1"": ""#FF0266"", ""ACCENT2"": ""#FF5C93"", ""ACCENT3"": ""#C5003C"", }, ""Reddit"": {""BACKGROUND"": ""#ffffff"", ""TEXT"": ""#1a1a1b"", ""INPUT"": ""#dae0e6"", ""TEXT_INPUT"": ""#222222"", ""SCROLL"": ""#a5a4a4"", ""BUTTON"": (""#FFFFFF"", ""#0079d3""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""ACCENT1"": ""#ff5414"", ""ACCENT2"": ""#33a8ff"", ""ACCENT3"": ""#dbf0ff"", }, ""Topanga"": {""BACKGROUND"": ""#282923"", ""TEXT"": ""#E7DB74"", ""INPUT"": ""#393a32"", ""TEXT_INPUT"": ""#E7C855"", ""SCROLL"": ""#E7C855"", ""BUTTON"": (""#E7C855"", ""#284B5A""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""ACCENT1"": ""#c15226"", ""ACCENT2"": ""#7a4d5f"", ""ACCENT3"": ""#889743"", }, ""GreenTan"": {""BACKGROUND"": ""#9FB8AD"", ""TEXT"": '#000000', ""INPUT"": ""#F7F3EC"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#F7F3EC"", ""BUTTON"": (""#FFFFFF"", ""#475841""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""Dark"": {""BACKGROUND"": ""#404040"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#4D4D4D"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#707070"", ""BUTTON"": (""#FFFFFF"", ""#004F00""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightGreen"": {""BACKGROUND"": ""#B7CECE"", ""TEXT"": ""#000000"", ""INPUT"": ""#FDFFF7"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#FDFFF7"", ""BUTTON"": (""#FFFFFF"", ""#658268""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""ACCENT1"": ""#76506d"", ""ACCENT2"": ""#5148f1"", ""ACCENT3"": ""#0a1c84"", ""PROGRESS_DEPTH"": 0, }, ""Dark2"": {""BACKGROUND"": ""#404040"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#FFFFFF"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#707070"", ""BUTTON"": (""#FFFFFF"", ""#004F00""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""Black"": {""BACKGROUND"": ""#000000"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#4D4D4D"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#707070"", ""BUTTON"": (""#000000"", ""#FFFFFF""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""Tan"": {""BACKGROUND"": ""#fdf6e3"", ""TEXT"": ""#268bd1"", ""INPUT"": ""#eee8d5"", ""TEXT_INPUT"": ""#6c71c3"", ""SCROLL"": ""#eee8d5"", ""BUTTON"": (""#FFFFFF"", ""#063542""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""TanBlue"": {""BACKGROUND"": ""#e5dece"", ""TEXT"": ""#063289"", ""INPUT"": ""#f9f8f4"", ""TEXT_INPUT"": ""#242834"", ""SCROLL"": ""#eee8d5"", ""BUTTON"": (""#FFFFFF"", ""#063289""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkTanBlue"": {""BACKGROUND"": ""#242834"", ""TEXT"": ""#dfe6f8"", ""INPUT"": ""#97755c"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#a9afbb"", ""BUTTON"": (""#FFFFFF"", ""#063289""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkAmber"": {""BACKGROUND"": ""#2c2825"", ""TEXT"": ""#fdcb52"", ""INPUT"": ""#705e52"", ""TEXT_INPUT"": ""#fdcb52"", ""SCROLL"": ""#705e52"", ""BUTTON"": (""#000000"", ""#fdcb52""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkBlue"": {""BACKGROUND"": ""#1a2835"", ""TEXT"": ""#d1ecff"", ""INPUT"": ""#335267"", ""TEXT_INPUT"": ""#acc2d0"", ""SCROLL"": ""#1b6497"", ""BUTTON"": (""#000000"", ""#fafaf8""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""Reds"": {""BACKGROUND"": ""#280001"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#d8d584"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#763e00"", ""BUTTON"": (""#000000"", ""#daad28""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""Green"": {""BACKGROUND"": ""#82a459"", ""TEXT"": ""#000000"", ""INPUT"": ""#d8d584"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#e3ecf3"", ""BUTTON"": (""#FFFFFF"", ""#517239""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""BluePurple"": {""BACKGROUND"": ""#A5CADD"", ""TEXT"": ""#6E266E"", ""INPUT"": ""#E0F5FF"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#E0F5FF"", ""BUTTON"": (""#FFFFFF"", ""#303952""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""Purple"": {""BACKGROUND"": ""#B0AAC2"", ""TEXT"": ""#000000"", ""INPUT"": ""#F2EFE8"", ""SCROLL"": ""#F2EFE8"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#000000"", ""#C2D4D8""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""BlueMono"": {""BACKGROUND"": ""#AAB6D3"", ""TEXT"": ""#000000"", ""INPUT"": ""#F1F4FC"", ""SCROLL"": ""#F1F4FC"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#7186C7""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""GreenMono"": {""BACKGROUND"": ""#A8C1B4"", ""TEXT"": ""#000000"", ""INPUT"": ""#DDE0DE"", ""SCROLL"": ""#E3E3E3"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#6D9F85""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""BrownBlue"": {""BACKGROUND"": ""#64778d"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#f0f3f7"", ""SCROLL"": ""#A6B2BE"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#283b5b""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""BrightColors"": {""BACKGROUND"": ""#b4ffb4"", ""TEXT"": ""#000000"", ""INPUT"": ""#ffff64"", ""SCROLL"": ""#ffb482"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#000000"", ""#ffa0dc""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""NeutralBlue"": {""BACKGROUND"": ""#92aa9d"", ""TEXT"": ""#000000"", ""INPUT"": ""#fcfff6"", ""SCROLL"": ""#fcfff6"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#000000"", ""#d0dbbd""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""Kayak"": {""BACKGROUND"": ""#a7ad7f"", ""TEXT"": ""#000000"", ""INPUT"": ""#e6d3a8"", ""SCROLL"": ""#e6d3a8"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#5d907d""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""SandyBeach"": {""BACKGROUND"": ""#efeccb"", ""TEXT"": ""#012f2f"", ""INPUT"": ""#e6d3a8"", ""SCROLL"": ""#e6d3a8"", ""TEXT_INPUT"": ""#012f2f"", ""BUTTON"": (""#FFFFFF"", ""#046380""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""TealMono"": {""BACKGROUND"": ""#a8cfdd"", ""TEXT"": ""#000000"", ""INPUT"": ""#dfedf2"", ""SCROLL"": ""#dfedf2"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#183440""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""Default"": {""BACKGROUND"": COLOR_SYSTEM_DEFAULT, ""TEXT"": COLOR_SYSTEM_DEFAULT, ""INPUT"": COLOR_SYSTEM_DEFAULT, ""TEXT_INPUT"": COLOR_SYSTEM_DEFAULT, ""SCROLL"": COLOR_SYSTEM_DEFAULT, ""BUTTON"": OFFICIAL_PYSIMPLEGUI_BUTTON_COLOR, ""PROGRESS"": COLOR_SYSTEM_DEFAULT, ""BORDER"": 1, ""SLIDER_DEPTH"": 1, ""PROGRESS_DEPTH"": 0, }, ""Default1"": {""BACKGROUND"": COLOR_SYSTEM_DEFAULT, ""TEXT"": COLOR_SYSTEM_DEFAULT, ""INPUT"": COLOR_SYSTEM_DEFAULT, ""TEXT_INPUT"": COLOR_SYSTEM_DEFAULT, ""SCROLL"": COLOR_SYSTEM_DEFAULT, ""BUTTON"": COLOR_SYSTEM_DEFAULT, ""PROGRESS"": COLOR_SYSTEM_DEFAULT, ""BORDER"": 1, ""SLIDER_DEPTH"": 1, ""PROGRESS_DEPTH"": 0, }, ""DefaultNoMoreNagging"": {""BACKGROUND"": COLOR_SYSTEM_DEFAULT, ""TEXT"": COLOR_SYSTEM_DEFAULT, ""INPUT"": COLOR_SYSTEM_DEFAULT, ""TEXT_INPUT"": COLOR_SYSTEM_DEFAULT, ""SCROLL"": COLOR_SYSTEM_DEFAULT, ""BUTTON"": OFFICIAL_PYSIMPLEGUI_BUTTON_COLOR, ""PROGRESS"": COLOR_SYSTEM_DEFAULT, ""BORDER"": 1, ""SLIDER_DEPTH"": 1, ""PROGRESS_DEPTH"": 0, }, ""GrayGrayGray"": {""BACKGROUND"": COLOR_SYSTEM_DEFAULT, ""TEXT"": COLOR_SYSTEM_DEFAULT, ""INPUT"": COLOR_SYSTEM_DEFAULT, ""TEXT_INPUT"": COLOR_SYSTEM_DEFAULT, ""SCROLL"": COLOR_SYSTEM_DEFAULT, ""BUTTON"": COLOR_SYSTEM_DEFAULT, ""PROGRESS"": COLOR_SYSTEM_DEFAULT, ""BORDER"": 1, ""SLIDER_DEPTH"": 1, ""PROGRESS_DEPTH"": 0, }, ""LightBlue"": {""BACKGROUND"": ""#E3F2FD"", ""TEXT"": ""#000000"", ""INPUT"": ""#86A8FF"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#86A8FF"", ""BUTTON"": (""#FFFFFF"", ""#5079D3""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 0, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""ACCENT1"": ""#FF0266"", ""ACCENT2"": ""#FF5C93"", ""ACCENT3"": ""#C5003C"", }, ""LightGrey"": {""BACKGROUND"": ""#FAFAFA"", ""TEXT"": ""#000000"", ""INPUT"": ""#004EA1"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#5EA7FF"", ""BUTTON"": (""#FFFFFF"", ""#0079D3""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 0, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""ACCENT1"": ""#FF0266"", ""ACCENT2"": ""#FF5C93"", ""ACCENT3"": ""#C5003C"", }, ""LightGrey1"": {""BACKGROUND"": ""#ffffff"", ""TEXT"": ""#1a1a1b"", ""INPUT"": ""#dae0e6"", ""TEXT_INPUT"": ""#222222"", ""SCROLL"": ""#a5a4a4"", ""BUTTON"": (""#FFFFFF"", ""#0079d3""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""ACCENT1"": ""#ff5414"", ""ACCENT2"": ""#33a8ff"", ""ACCENT3"": ""#dbf0ff"", }, ""DarkBrown"": {""BACKGROUND"": ""#282923"", ""TEXT"": ""#E7DB74"", ""INPUT"": ""#393a32"", ""TEXT_INPUT"": ""#E7C855"", ""SCROLL"": ""#E7C855"", ""BUTTON"": (""#E7C855"", ""#284B5A""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""ACCENT1"": ""#c15226"", ""ACCENT2"": ""#7a4d5f"", ""ACCENT3"": ""#889743"", }, ""LightGreen1"": {""BACKGROUND"": ""#9FB8AD"", ""TEXT"": ""#000000"", ""INPUT"": ""#F7F3EC"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#F7F3EC"", ""BUTTON"": (""#FFFFFF"", ""#475841""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGrey"": {""BACKGROUND"": ""#404040"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#4D4D4D"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#707070"", ""BUTTON"": (""#FFFFFF"", ""#004F00""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightGreen2"": {""BACKGROUND"": ""#B7CECE"", ""TEXT"": ""#000000"", ""INPUT"": ""#FDFFF7"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#FDFFF7"", ""BUTTON"": (""#FFFFFF"", ""#658268""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""ACCENT1"": ""#76506d"", ""ACCENT2"": ""#5148f1"", ""ACCENT3"": ""#0a1c84"", ""PROGRESS_DEPTH"": 0, }, ""DarkGrey1"": {""BACKGROUND"": ""#404040"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#FFFFFF"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#707070"", ""BUTTON"": (""#FFFFFF"", ""#004F00""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkBlack"": {""BACKGROUND"": ""#000000"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#4D4D4D"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#707070"", ""BUTTON"": (""#000000"", ""#FFFFFF""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightBrown"": {""BACKGROUND"": ""#fdf6e3"", ""TEXT"": ""#268bd1"", ""INPUT"": ""#eee8d5"", ""TEXT_INPUT"": ""#6c71c3"", ""SCROLL"": ""#eee8d5"", ""BUTTON"": (""#FFFFFF"", ""#063542""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightBrown1"": {""BACKGROUND"": ""#e5dece"", ""TEXT"": ""#063289"", ""INPUT"": ""#f9f8f4"", ""TEXT_INPUT"": ""#242834"", ""SCROLL"": ""#eee8d5"", ""BUTTON"": (""#FFFFFF"", ""#063289""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkBlue1"": {""BACKGROUND"": ""#242834"", ""TEXT"": ""#dfe6f8"", ""INPUT"": ""#97755c"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#a9afbb"", ""BUTTON"": (""#FFFFFF"", ""#063289""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkBrown1"": {""BACKGROUND"": ""#2c2825"", ""TEXT"": ""#fdcb52"", ""INPUT"": ""#705e52"", ""TEXT_INPUT"": ""#fdcb52"", ""SCROLL"": ""#705e52"", ""BUTTON"": (""#000000"", ""#fdcb52""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkBlue2"": {""BACKGROUND"": ""#1a2835"", ""TEXT"": ""#d1ecff"", ""INPUT"": ""#335267"", ""TEXT_INPUT"": ""#acc2d0"", ""SCROLL"": ""#1b6497"", ""BUTTON"": (""#000000"", ""#fafaf8""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkBrown2"": {""BACKGROUND"": ""#280001"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#d8d584"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#763e00"", ""BUTTON"": (""#000000"", ""#daad28""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGreen"": {""BACKGROUND"": ""#82a459"", ""TEXT"": ""#000000"", ""INPUT"": ""#d8d584"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#e3ecf3"", ""BUTTON"": (""#FFFFFF"", ""#517239""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightBlue1"": {""BACKGROUND"": ""#A5CADD"", ""TEXT"": ""#6E266E"", ""INPUT"": ""#E0F5FF"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#E0F5FF"", ""BUTTON"": (""#FFFFFF"", ""#303952""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightPurple"": {""BACKGROUND"": ""#B0AAC2"", ""TEXT"": ""#000000"", ""INPUT"": ""#F2EFE8"", ""SCROLL"": ""#F2EFE8"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#000000"", ""#C2D4D8""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightBlue2"": {""BACKGROUND"": ""#AAB6D3"", ""TEXT"": ""#000000"", ""INPUT"": ""#F1F4FC"", ""SCROLL"": ""#F1F4FC"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#7186C7""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightGreen3"": {""BACKGROUND"": ""#A8C1B4"", ""TEXT"": ""#000000"", ""INPUT"": ""#DDE0DE"", ""SCROLL"": ""#E3E3E3"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#6D9F85""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkBlue3"": {""BACKGROUND"": ""#64778d"", ""TEXT"": ""#FFFFFF"", ""INPUT"": ""#f0f3f7"", ""SCROLL"": ""#A6B2BE"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#283b5b""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightGreen4"": {""BACKGROUND"": ""#b4ffb4"", ""TEXT"": ""#000000"", ""INPUT"": ""#ffff64"", ""SCROLL"": ""#ffb482"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#000000"", ""#ffa0dc""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightGreen5"": {""BACKGROUND"": ""#92aa9d"", ""TEXT"": ""#000000"", ""INPUT"": ""#fcfff6"", ""SCROLL"": ""#fcfff6"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#000000"", ""#d0dbbd""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightBrown2"": {""BACKGROUND"": ""#a7ad7f"", ""TEXT"": ""#000000"", ""INPUT"": ""#e6d3a8"", ""SCROLL"": ""#e6d3a8"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#5d907d""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightBrown3"": {""BACKGROUND"": ""#efeccb"", ""TEXT"": ""#012f2f"", ""INPUT"": ""#e6d3a8"", ""SCROLL"": ""#e6d3a8"", ""TEXT_INPUT"": ""#012f2f"", ""BUTTON"": (""#FFFFFF"", ""#046380""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightBlue3"": {""BACKGROUND"": ""#a8cfdd"", ""TEXT"": ""#000000"", ""INPUT"": ""#dfedf2"", ""SCROLL"": ""#dfedf2"", ""TEXT_INPUT"": ""#000000"", ""BUTTON"": (""#FFFFFF"", ""#183440""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""LightBrown4"": {""BACKGROUND"": ""#d7c79e"", ""TEXT"": ""#a35638"", ""INPUT"": ""#9dab86"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#a35638"", ""BUTTON"": (""#FFFFFF"", ""#a35638""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#a35638"", ""#9dab86"", ""#e08f62"", ""#d7c79e""], }, ""DarkTeal"": {""BACKGROUND"": ""#003f5c"", ""TEXT"": ""#fb5b5a"", ""INPUT"": ""#bc4873"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#bc4873"", ""BUTTON"": (""#FFFFFF"", ""#fb5b5a""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#003f5c"", ""#472b62"", ""#bc4873"", ""#fb5b5a""], }, ""DarkPurple"": {""BACKGROUND"": ""#472b62"", ""TEXT"": ""#fb5b5a"", ""INPUT"": ""#bc4873"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#bc4873"", ""BUTTON"": (""#FFFFFF"", ""#472b62""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#003f5c"", ""#472b62"", ""#bc4873"", ""#fb5b5a""], }, ""LightGreen6"": {""BACKGROUND"": ""#eafbea"", ""TEXT"": ""#1f6650"", ""INPUT"": ""#6f9a8d"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#1f6650"", ""BUTTON"": (""#FFFFFF"", ""#1f6650""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#1f6650"", ""#6f9a8d"", ""#ea5e5e"", ""#eafbea""], }, ""DarkGrey2"": {""BACKGROUND"": ""#2b2b28"", ""TEXT"": ""#f8f8f8"", ""INPUT"": ""#f1d6ab"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#f1d6ab"", ""BUTTON"": (""#2b2b28"", ""#e3b04b""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#2b2b28"", ""#e3b04b"", ""#f1d6ab"", ""#f8f8f8""], }, ""LightBrown6"": {""BACKGROUND"": ""#f9b282"", ""TEXT"": ""#8f4426"", ""INPUT"": ""#de6b35"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#8f4426"", ""BUTTON"": (""#FFFFFF"", ""#8f4426""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#8f4426"", ""#de6b35"", ""#64ccda"", ""#f9b282""], }, ""DarkTeal1"": {""BACKGROUND"": ""#396362"", ""TEXT"": ""#ffe7d1"", ""INPUT"": ""#f6c89f"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#f6c89f"", ""BUTTON"": (""#ffe7d1"", ""#4b8e8d""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#396362"", ""#4b8e8d"", ""#f6c89f"", ""#ffe7d1""], }, ""LightBrown7"": {""BACKGROUND"": ""#f6c89f"", ""TEXT"": ""#396362"", ""INPUT"": ""#4b8e8d"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#396362"", ""BUTTON"": (""#FFFFFF"", ""#396362""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#396362"", ""#4b8e8d"", ""#f6c89f"", ""#ffe7d1""], }, ""DarkPurple1"": {""BACKGROUND"": ""#0c093c"", ""TEXT"": ""#fad6d6"", ""INPUT"": ""#eea5f6"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#eea5f6"", ""BUTTON"": (""#FFFFFF"", ""#df42d1""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#0c093c"", ""#df42d1"", ""#eea5f6"", ""#fad6d6""], }, ""DarkGrey3"": {""BACKGROUND"": ""#211717"", ""TEXT"": ""#dfddc7"", ""INPUT"": ""#f58b54"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#f58b54"", ""BUTTON"": (""#dfddc7"", ""#a34a28""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#211717"", ""#a34a28"", ""#f58b54"", ""#dfddc7""], }, ""LightBrown8"": {""BACKGROUND"": ""#dfddc7"", ""TEXT"": ""#211717"", ""INPUT"": ""#a34a28"", ""TEXT_INPUT"": ""#dfddc7"", ""SCROLL"": ""#211717"", ""BUTTON"": (""#dfddc7"", ""#a34a28""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#211717"", ""#a34a28"", ""#f58b54"", ""#dfddc7""], }, ""DarkBlue4"": {""BACKGROUND"": ""#494ca2"", ""TEXT"": ""#e3e7f1"", ""INPUT"": ""#c6cbef"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#c6cbef"", ""BUTTON"": (""#FFFFFF"", ""#8186d5""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#494ca2"", ""#8186d5"", ""#c6cbef"", ""#e3e7f1""], }, ""LightBlue4"": {""BACKGROUND"": ""#5c94bd"", ""TEXT"": ""#470938"", ""INPUT"": ""#1a3e59"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#470938"", ""BUTTON"": (""#FFFFFF"", ""#470938""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#470938"", ""#1a3e59"", ""#5c94bd"", ""#f2d6eb""], }, ""DarkTeal2"": {""BACKGROUND"": ""#394a6d"", ""TEXT"": ""#c0ffb3"", ""INPUT"": ""#52de97"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#52de97"", ""BUTTON"": (""#c0ffb3"", ""#394a6d""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#394a6d"", ""#3c9d9b"", ""#52de97"", ""#c0ffb3""], }, ""DarkTeal3"": {""BACKGROUND"": ""#3c9d9b"", ""TEXT"": ""#c0ffb3"", ""INPUT"": ""#52de97"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#52de97"", ""BUTTON"": (""#c0ffb3"", ""#394a6d""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#394a6d"", ""#3c9d9b"", ""#52de97"", ""#c0ffb3""], }, ""DarkPurple5"": {""BACKGROUND"": ""#730068"", ""TEXT"": ""#f6f078"", ""INPUT"": ""#01d28e"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#01d28e"", ""BUTTON"": (""#f6f078"", ""#730068""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#730068"", ""#434982"", ""#01d28e"", ""#f6f078""], }, ""DarkPurple2"": {""BACKGROUND"": ""#202060"", ""TEXT"": ""#b030b0"", ""INPUT"": ""#602080"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#602080"", ""BUTTON"": (""#FFFFFF"", ""#202040""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#202040"", ""#202060"", ""#602080"", ""#b030b0""], }, ""DarkBlue5"": {""BACKGROUND"": ""#000272"", ""TEXT"": ""#ff6363"", ""INPUT"": ""#a32f80"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#a32f80"", ""BUTTON"": (""#FFFFFF"", ""#341677""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#000272"", ""#341677"", ""#a32f80"", ""#ff6363""], }, ""LightGrey2"": {""BACKGROUND"": ""#f6f6f6"", ""TEXT"": ""#420000"", ""INPUT"": ""#d4d7dd"", ""TEXT_INPUT"": ""#420000"", ""SCROLL"": ""#420000"", ""BUTTON"": (""#420000"", ""#d4d7dd""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#420000"", ""#d4d7dd"", ""#eae9e9"", ""#f6f6f6""], }, ""LightGrey3"": {""BACKGROUND"": ""#eae9e9"", ""TEXT"": ""#420000"", ""INPUT"": ""#d4d7dd"", ""TEXT_INPUT"": ""#420000"", ""SCROLL"": ""#420000"", ""BUTTON"": (""#420000"", ""#d4d7dd""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#420000"", ""#d4d7dd"", ""#eae9e9"", ""#f6f6f6""], }, ""DarkBlue6"": {""BACKGROUND"": ""#01024e"", ""TEXT"": ""#ff6464"", ""INPUT"": ""#8b4367"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#8b4367"", ""BUTTON"": (""#FFFFFF"", ""#543864""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#01024e"", ""#543864"", ""#8b4367"", ""#ff6464""], }, ""DarkBlue7"": {""BACKGROUND"": ""#241663"", ""TEXT"": ""#eae7af"", ""INPUT"": ""#a72693"", ""TEXT_INPUT"": ""#eae7af"", ""SCROLL"": ""#a72693"", ""BUTTON"": (""#eae7af"", ""#160f30""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#160f30"", ""#241663"", ""#a72693"", ""#eae7af""], }, ""LightBrown9"": {""BACKGROUND"": ""#f6d365"", ""TEXT"": ""#3a1f5d"", ""INPUT"": ""#c83660"", ""TEXT_INPUT"": ""#f6d365"", ""SCROLL"": ""#3a1f5d"", ""BUTTON"": (""#f6d365"", ""#c83660""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#3a1f5d"", ""#c83660"", ""#e15249"", ""#f6d365""], }, ""DarkPurple3"": {""BACKGROUND"": ""#6e2142"", ""TEXT"": ""#ffd692"", ""INPUT"": ""#e16363"", ""TEXT_INPUT"": ""#ffd692"", ""SCROLL"": ""#e16363"", ""BUTTON"": (""#ffd692"", ""#943855""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#6e2142"", ""#943855"", ""#e16363"", ""#ffd692""], }, ""LightBrown10"": {""BACKGROUND"": ""#ffd692"", ""TEXT"": ""#6e2142"", ""INPUT"": ""#943855"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#6e2142"", ""BUTTON"": (""#FFFFFF"", ""#6e2142""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#6e2142"", ""#943855"", ""#e16363"", ""#ffd692""], }, ""DarkPurple4"": {""BACKGROUND"": ""#200f21"", ""TEXT"": ""#f638dc"", ""INPUT"": ""#5a3d5c"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#5a3d5c"", ""BUTTON"": (""#FFFFFF"", ""#382039""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#200f21"", ""#382039"", ""#5a3d5c"", ""#f638dc""], }, ""LightBlue5"": {""BACKGROUND"": ""#b2fcff"", ""TEXT"": ""#3e64ff"", ""INPUT"": ""#5edfff"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#3e64ff"", ""BUTTON"": (""#FFFFFF"", ""#3e64ff""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#3e64ff"", ""#5edfff"", ""#b2fcff"", ""#ecfcff""], }, ""DarkTeal4"": {""BACKGROUND"": ""#464159"", ""TEXT"": ""#c7f0db"", ""INPUT"": ""#8bbabb"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#8bbabb"", ""BUTTON"": (""#FFFFFF"", ""#6c7b95""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#464159"", ""#6c7b95"", ""#8bbabb"", ""#c7f0db""], }, ""LightTeal"": {""BACKGROUND"": ""#c7f0db"", ""TEXT"": ""#464159"", ""INPUT"": ""#6c7b95"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#464159"", ""BUTTON"": (""#FFFFFF"", ""#464159""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#464159"", ""#6c7b95"", ""#8bbabb"", ""#c7f0db""], }, ""DarkTeal5"": {""BACKGROUND"": ""#8bbabb"", ""TEXT"": ""#464159"", ""INPUT"": ""#6c7b95"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#464159"", ""BUTTON"": (""#c7f0db"", ""#6c7b95""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#464159"", ""#6c7b95"", ""#8bbabb"", ""#c7f0db""], }, ""LightGrey4"": {""BACKGROUND"": ""#faf5ef"", ""TEXT"": ""#672f2f"", ""INPUT"": ""#99b19c"", ""TEXT_INPUT"": ""#672f2f"", ""SCROLL"": ""#672f2f"", ""BUTTON"": (""#672f2f"", ""#99b19c""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#672f2f"", ""#99b19c"", ""#d7d1c9"", ""#faf5ef""], }, ""LightGreen7"": {""BACKGROUND"": ""#99b19c"", ""TEXT"": ""#faf5ef"", ""INPUT"": ""#d7d1c9"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#d7d1c9"", ""BUTTON"": (""#FFFFFF"", ""#99b19c""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#672f2f"", ""#99b19c"", ""#d7d1c9"", ""#faf5ef""], }, ""LightGrey5"": {""BACKGROUND"": ""#d7d1c9"", ""TEXT"": ""#672f2f"", ""INPUT"": ""#99b19c"", ""TEXT_INPUT"": ""#672f2f"", ""SCROLL"": ""#672f2f"", ""BUTTON"": (""#FFFFFF"", ""#672f2f""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#672f2f"", ""#99b19c"", ""#d7d1c9"", ""#faf5ef""], }, ""DarkBrown3"": {""BACKGROUND"": ""#a0855b"", ""TEXT"": ""#f9f6f2"", ""INPUT"": ""#f1d6ab"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#f1d6ab"", ""BUTTON"": (""#FFFFFF"", ""#38470b""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#38470b"", ""#a0855b"", ""#f1d6ab"", ""#f9f6f2""], }, ""LightBrown11"": {""BACKGROUND"": ""#f1d6ab"", ""TEXT"": ""#38470b"", ""INPUT"": ""#a0855b"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#38470b"", ""BUTTON"": (""#f9f6f2"", ""#a0855b""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#38470b"", ""#a0855b"", ""#f1d6ab"", ""#f9f6f2""], }, ""DarkRed"": {""BACKGROUND"": ""#83142c"", ""TEXT"": ""#f9d276"", ""INPUT"": ""#ad1d45"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#ad1d45"", ""BUTTON"": (""#f9d276"", ""#ad1d45""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#44000d"", ""#83142c"", ""#ad1d45"", ""#f9d276""], }, ""DarkTeal6"": {""BACKGROUND"": ""#204969"", ""TEXT"": ""#fff7f7"", ""INPUT"": ""#dadada"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#dadada"", ""BUTTON"": (""#000000"", ""#fff7f7""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#204969"", ""#08ffc8"", ""#dadada"", ""#fff7f7""], }, ""DarkBrown4"": {""BACKGROUND"": ""#252525"", ""TEXT"": ""#ff0000"", ""INPUT"": ""#af0404"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#af0404"", ""BUTTON"": (""#FFFFFF"", ""#252525""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#252525"", ""#414141"", ""#af0404"", ""#ff0000""], }, ""LightYellow"": {""BACKGROUND"": ""#f4ff61"", ""TEXT"": ""#27aa80"", ""INPUT"": ""#32ff6a"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#27aa80"", ""BUTTON"": (""#f4ff61"", ""#27aa80""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#27aa80"", ""#32ff6a"", ""#a8ff3e"", ""#f4ff61""], }, ""DarkGreen1"": {""BACKGROUND"": ""#2b580c"", ""TEXT"": ""#fdef96"", ""INPUT"": ""#f7b71d"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#f7b71d"", ""BUTTON"": (""#fdef96"", ""#2b580c""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#2b580c"", ""#afa939"", ""#f7b71d"", ""#fdef96""], }, ""LightGreen8"": {""BACKGROUND"": ""#c8dad3"", ""TEXT"": ""#63707e"", ""INPUT"": ""#93b5b3"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#63707e"", ""BUTTON"": (""#FFFFFF"", ""#63707e""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#63707e"", ""#93b5b3"", ""#c8dad3"", ""#f2f6f5""], }, ""DarkTeal7"": {""BACKGROUND"": ""#248ea9"", ""TEXT"": ""#fafdcb"", ""INPUT"": ""#aee7e8"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#aee7e8"", ""BUTTON"": (""#000000"", ""#fafdcb""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#248ea9"", ""#28c3d4"", ""#aee7e8"", ""#fafdcb""], }, ""DarkBlue8"": {""BACKGROUND"": ""#454d66"", ""TEXT"": ""#d9d872"", ""INPUT"": ""#58b368"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#58b368"", ""BUTTON"": (""#000000"", ""#009975""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#009975"", ""#454d66"", ""#58b368"", ""#d9d872""], }, ""DarkBlue9"": {""BACKGROUND"": ""#263859"", ""TEXT"": ""#ff6768"", ""INPUT"": ""#6b778d"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#6b778d"", ""BUTTON"": (""#ff6768"", ""#263859""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#17223b"", ""#263859"", ""#6b778d"", ""#ff6768""], }, ""DarkBlue10"": {""BACKGROUND"": ""#0028ff"", ""TEXT"": ""#f1f4df"", ""INPUT"": ""#10eaf0"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#10eaf0"", ""BUTTON"": (""#f1f4df"", ""#24009c""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#24009c"", ""#0028ff"", ""#10eaf0"", ""#f1f4df""], }, ""DarkBlue11"": {""BACKGROUND"": ""#6384b3"", ""TEXT"": ""#e6f0b6"", ""INPUT"": ""#b8e9c0"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#b8e9c0"", ""BUTTON"": (""#e6f0b6"", ""#684949""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#684949"", ""#6384b3"", ""#b8e9c0"", ""#e6f0b6""], }, ""DarkTeal8"": {""BACKGROUND"": ""#71a0a5"", ""TEXT"": ""#212121"", ""INPUT"": ""#665c84"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#212121"", ""BUTTON"": (""#fab95b"", ""#665c84""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#212121"", ""#665c84"", ""#71a0a5"", ""#fab95b""], }, ""DarkRed1"": {""BACKGROUND"": ""#c10000"", ""TEXT"": ""#eeeeee"", ""INPUT"": ""#dedede"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#dedede"", ""BUTTON"": (""#c10000"", ""#eeeeee""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#c10000"", ""#ff4949"", ""#dedede"", ""#eeeeee""], }, ""LightBrown5"": {""BACKGROUND"": ""#fff591"", ""TEXT"": ""#e41749"", ""INPUT"": ""#f5587b"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#e41749"", ""BUTTON"": (""#fff591"", ""#e41749""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#e41749"", ""#f5587b"", ""#ff8a5c"", ""#fff591""], }, ""LightGreen9"": {""BACKGROUND"": ""#f1edb3"", ""TEXT"": ""#3b503d"", ""INPUT"": ""#4a746e"", ""TEXT_INPUT"": ""#f1edb3"", ""SCROLL"": ""#3b503d"", ""BUTTON"": (""#f1edb3"", ""#3b503d""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#3b503d"", ""#4a746e"", ""#c8cf94"", ""#f1edb3""], ""DESCRIPTION"": [""Green"", ""Turquoise"", ""Yellow""], }, ""DarkGreen2"": {""BACKGROUND"": ""#3b503d"", ""TEXT"": ""#f1edb3"", ""INPUT"": ""#c8cf94"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#c8cf94"", ""BUTTON"": (""#f1edb3"", ""#3b503d""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#3b503d"", ""#4a746e"", ""#c8cf94"", ""#f1edb3""], ""DESCRIPTION"": [""Green"", ""Turquoise"", ""Yellow""], }, ""LightGray1"": {""BACKGROUND"": ""#f2f2f2"", ""TEXT"": ""#222831"", ""INPUT"": ""#393e46"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#222831"", ""BUTTON"": (""#f2f2f2"", ""#222831""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#222831"", ""#393e46"", ""#f96d00"", ""#f2f2f2""], ""DESCRIPTION"": [""#000000"", ""Grey"", ""Orange"", ""Grey"", ""Autumn""], }, ""DarkGrey4"": {""BACKGROUND"": ""#52524e"", ""TEXT"": ""#e9e9e5"", ""INPUT"": ""#d4d6c8"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#d4d6c8"", ""BUTTON"": (""#FFFFFF"", ""#9a9b94""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#52524e"", ""#9a9b94"", ""#d4d6c8"", ""#e9e9e5""], ""DESCRIPTION"": [""Grey"", ""Pastel"", ""Winter""], }, ""DarkBlue12"": {""BACKGROUND"": ""#324e7b"", ""TEXT"": ""#f8f8f8"", ""INPUT"": ""#86a6df"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#86a6df"", ""BUTTON"": (""#FFFFFF"", ""#5068a9""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#324e7b"", ""#5068a9"", ""#86a6df"", ""#f8f8f8""], ""DESCRIPTION"": [""Blue"", ""Grey"", ""Cold"", ""Winter""], }, ""DarkPurple6"": {""BACKGROUND"": ""#070739"", ""TEXT"": ""#e1e099"", ""INPUT"": ""#c327ab"", ""TEXT_INPUT"": ""#e1e099"", ""SCROLL"": ""#c327ab"", ""BUTTON"": (""#e1e099"", ""#521477""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#070739"", ""#521477"", ""#c327ab"", ""#e1e099""], ""DESCRIPTION"": [""#000000"", ""Purple"", ""Yellow"", ""Dark""], }, ""DarkPurple7"": {""BACKGROUND"": ""#191930"", ""TEXT"": ""#B1B7C5"", ""INPUT"": ""#232B5C"", ""TEXT_INPUT"": ""#D0E3E7"", ""SCROLL"": ""#B1B7C5"", ""BUTTON"": (""#272D38"", ""#B1B7C5""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkBlue13"": {""BACKGROUND"": ""#203562"", ""TEXT"": ""#e3e8f8"", ""INPUT"": ""#c0c5cd"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#c0c5cd"", ""BUTTON"": (""#FFFFFF"", ""#3e588f""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#203562"", ""#3e588f"", ""#c0c5cd"", ""#e3e8f8""], ""DESCRIPTION"": [""Blue"", ""Grey"", ""Wedding"", ""Cold""], }, ""DarkBrown5"": {""BACKGROUND"": ""#3c1b1f"", ""TEXT"": ""#f6e1b5"", ""INPUT"": ""#e2bf81"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#e2bf81"", ""BUTTON"": (""#3c1b1f"", ""#f6e1b5""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#3c1b1f"", ""#b21e4b"", ""#e2bf81"", ""#f6e1b5""], ""DESCRIPTION"": [""Brown"", ""Red"", ""Yellow"", ""Warm""], }, ""DarkGreen3"": {""BACKGROUND"": ""#062121"", ""TEXT"": ""#eeeeee"", ""INPUT"": ""#e4dcad"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#e4dcad"", ""BUTTON"": (""#eeeeee"", ""#181810""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#062121"", ""#181810"", ""#e4dcad"", ""#eeeeee""], ""DESCRIPTION"": [""#000000"", ""#000000"", ""Brown"", ""Grey""], }, ""DarkBlack1"": {""BACKGROUND"": ""#181810"", ""TEXT"": ""#eeeeee"", ""INPUT"": ""#e4dcad"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#e4dcad"", ""BUTTON"": (""#FFFFFF"", ""#062121""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#062121"", ""#181810"", ""#e4dcad"", ""#eeeeee""], ""DESCRIPTION"": [""#000000"", ""#000000"", ""Brown"", ""Grey""], }, ""DarkGrey5"": {""BACKGROUND"": ""#343434"", ""TEXT"": ""#f3f3f3"", ""INPUT"": ""#e9dcbe"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#e9dcbe"", ""BUTTON"": (""#FFFFFF"", ""#8e8b82""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#343434"", ""#8e8b82"", ""#e9dcbe"", ""#f3f3f3""], ""DESCRIPTION"": [""Grey"", ""Brown""], }, ""LightBrown12"": {""BACKGROUND"": ""#8e8b82"", ""TEXT"": ""#f3f3f3"", ""INPUT"": ""#e9dcbe"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#e9dcbe"", ""BUTTON"": (""#f3f3f3"", ""#8e8b82""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#343434"", ""#8e8b82"", ""#e9dcbe"", ""#f3f3f3""], ""DESCRIPTION"": [""Grey"", ""Brown""], }, ""DarkTeal9"": {""BACKGROUND"": ""#13445a"", ""TEXT"": ""#fef4e8"", ""INPUT"": ""#446878"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#446878"", ""BUTTON"": (""#fef4e8"", ""#446878""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#13445a"", ""#970747"", ""#446878"", ""#fef4e8""], ""DESCRIPTION"": [""Red"", ""Grey"", ""Blue"", ""Wedding"", ""Retro""], }, ""DarkBlue14"": {""BACKGROUND"": ""#21273d"", ""TEXT"": ""#f1f6f8"", ""INPUT"": ""#b9d4f1"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#b9d4f1"", ""BUTTON"": (""#FFFFFF"", ""#6a759b""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#21273d"", ""#6a759b"", ""#b9d4f1"", ""#f1f6f8""], ""DESCRIPTION"": [""Blue"", ""#000000"", ""Grey"", ""Cold"", ""Winter""], }, ""LightBlue6"": {""BACKGROUND"": ""#f1f6f8"", ""TEXT"": ""#21273d"", ""INPUT"": ""#6a759b"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#21273d"", ""BUTTON"": (""#f1f6f8"", ""#6a759b""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#21273d"", ""#6a759b"", ""#b9d4f1"", ""#f1f6f8""], ""DESCRIPTION"": [""Blue"", ""#000000"", ""Grey"", ""Cold"", ""Winter""], }, ""DarkGreen4"": {""BACKGROUND"": ""#044343"", ""TEXT"": ""#e4e4e4"", ""INPUT"": ""#045757"", ""TEXT_INPUT"": ""#e4e4e4"", ""SCROLL"": ""#045757"", ""BUTTON"": (""#e4e4e4"", ""#045757""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#222222"", ""#044343"", ""#045757"", ""#e4e4e4""], ""DESCRIPTION"": [""#000000"", ""Turquoise"", ""Grey"", ""Dark""], }, ""DarkGreen5"": {""BACKGROUND"": ""#1b4b36"", ""TEXT"": ""#e0e7f1"", ""INPUT"": ""#aebd77"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#aebd77"", ""BUTTON"": (""#FFFFFF"", ""#538f6a""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#1b4b36"", ""#538f6a"", ""#aebd77"", ""#e0e7f1""], ""DESCRIPTION"": [""Green"", ""Grey""], }, ""DarkTeal10"": {""BACKGROUND"": ""#0d3446"", ""TEXT"": ""#d8dfe2"", ""INPUT"": ""#71adb5"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#71adb5"", ""BUTTON"": (""#FFFFFF"", ""#176d81""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#0d3446"", ""#176d81"", ""#71adb5"", ""#d8dfe2""], ""DESCRIPTION"": [""Grey"", ""Turquoise"", ""Winter"", ""Cold""], }, ""DarkGrey6"": {""BACKGROUND"": ""#3e3e3e"", ""TEXT"": ""#ededed"", ""INPUT"": ""#68868c"", ""TEXT_INPUT"": ""#ededed"", ""SCROLL"": ""#68868c"", ""BUTTON"": (""#FFFFFF"", ""#405559""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#3e3e3e"", ""#405559"", ""#68868c"", ""#ededed""], ""DESCRIPTION"": [""Grey"", ""Turquoise"", ""Winter""], }, ""DarkTeal11"": {""BACKGROUND"": ""#405559"", ""TEXT"": ""#ededed"", ""INPUT"": ""#68868c"", ""TEXT_INPUT"": ""#ededed"", ""SCROLL"": ""#68868c"", ""BUTTON"": (""#ededed"", ""#68868c""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#3e3e3e"", ""#405559"", ""#68868c"", ""#ededed""], ""DESCRIPTION"": [""Grey"", ""Turquoise"", ""Winter""], }, ""LightBlue7"": {""BACKGROUND"": ""#9ed0e0"", ""TEXT"": ""#19483f"", ""INPUT"": ""#5c868e"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#19483f"", ""BUTTON"": (""#FFFFFF"", ""#19483f""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#19483f"", ""#5c868e"", ""#ff6a38"", ""#9ed0e0""], ""DESCRIPTION"": [""Orange"", ""Blue"", ""Turquoise""], }, ""LightGreen10"": {""BACKGROUND"": ""#d8ebb5"", ""TEXT"": ""#205d67"", ""INPUT"": ""#639a67"", ""TEXT_INPUT"": ""#FFFFFF"", ""SCROLL"": ""#205d67"", ""BUTTON"": (""#d8ebb5"", ""#205d67""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#205d67"", ""#639a67"", ""#d9bf77"", ""#d8ebb5""], ""DESCRIPTION"": [""Blue"", ""Green"", ""Brown"", ""Vintage""], }, ""DarkBlue15"": {""BACKGROUND"": ""#151680"", ""TEXT"": ""#f1fea4"", ""INPUT"": ""#375fc0"", ""TEXT_INPUT"": ""#f1fea4"", ""SCROLL"": ""#375fc0"", ""BUTTON"": (""#f1fea4"", ""#1c44ac""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#151680"", ""#1c44ac"", ""#375fc0"", ""#f1fea4""], ""DESCRIPTION"": [""Blue"", ""Yellow"", ""Cold""], }, ""DarkBlue16"": {""BACKGROUND"": ""#1c44ac"", ""TEXT"": ""#f1fea4"", ""INPUT"": ""#375fc0"", ""TEXT_INPUT"": ""#f1fea4"", ""SCROLL"": ""#375fc0"", ""BUTTON"": (""#f1fea4"", ""#151680""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#151680"", ""#1c44ac"", ""#375fc0"", ""#f1fea4""], ""DESCRIPTION"": [""Blue"", ""Yellow"", ""Cold""], }, ""DarkTeal12"": {""BACKGROUND"": ""#004a7c"", ""TEXT"": ""#fafafa"", ""INPUT"": ""#e8f1f5"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#e8f1f5"", ""BUTTON"": (""#fafafa"", ""#005691""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#004a7c"", ""#005691"", ""#e8f1f5"", ""#fafafa""], ""DESCRIPTION"": [""Grey"", ""Blue"", ""Cold"", ""Winter""], }, ""LightBrown13"": {""BACKGROUND"": ""#ebf5ee"", ""TEXT"": ""#921224"", ""INPUT"": ""#bdc6b8"", ""TEXT_INPUT"": ""#921224"", ""SCROLL"": ""#921224"", ""BUTTON"": (""#FFFFFF"", ""#921224""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#921224"", ""#bdc6b8"", ""#bce0da"", ""#ebf5ee""], ""DESCRIPTION"": [""Red"", ""Blue"", ""Grey"", ""Vintage"", ""Wedding""], }, ""DarkBlue17"": {""BACKGROUND"": ""#21294c"", ""TEXT"": ""#f9f2d7"", ""INPUT"": ""#f2dea8"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#f2dea8"", ""BUTTON"": (""#f9f2d7"", ""#141829""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#141829"", ""#21294c"", ""#f2dea8"", ""#f9f2d7""], ""DESCRIPTION"": [""#000000"", ""Blue"", ""Yellow""], }, ""DarkBrown6"": {""BACKGROUND"": ""#785e4d"", ""TEXT"": ""#f2eee3"", ""INPUT"": ""#baaf92"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#baaf92"", ""BUTTON"": (""#FFFFFF"", ""#785e4d""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#785e4d"", ""#ff8426"", ""#baaf92"", ""#f2eee3""], ""DESCRIPTION"": [""Grey"", ""Brown"", ""Orange"", ""Autumn""], }, ""DarkGreen6"": {""BACKGROUND"": ""#5c715e"", ""TEXT"": ""#f2f9f1"", ""INPUT"": ""#ddeedf"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#ddeedf"", ""BUTTON"": (""#f2f9f1"", ""#5c715e""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#5c715e"", ""#b6cdbd"", ""#ddeedf"", ""#f2f9f1""], ""DESCRIPTION"": [""Grey"", ""Green"", ""Vintage""], }, ""DarkGreen7"": {""BACKGROUND"": ""#0C231E"", ""TEXT"": ""#efbe1c"", ""INPUT"": ""#153C33"", ""TEXT_INPUT"": ""#efbe1c"", ""SCROLL"": ""#153C33"", ""BUTTON"": (""#efbe1c"", ""#153C33""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGrey7"": {""BACKGROUND"": ""#4b586e"", ""TEXT"": ""#dddddd"", ""INPUT"": ""#574e6d"", ""TEXT_INPUT"": ""#dddddd"", ""SCROLL"": ""#574e6d"", ""BUTTON"": (""#dddddd"", ""#43405d""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#43405d"", ""#4b586e"", ""#574e6d"", ""#dddddd""], ""DESCRIPTION"": [""Grey"", ""Winter"", ""Cold""], }, ""DarkRed2"": {""BACKGROUND"": ""#ab1212"", ""TEXT"": ""#f6e4b5"", ""INPUT"": ""#cd3131"", ""TEXT_INPUT"": ""#f6e4b5"", ""SCROLL"": ""#cd3131"", ""BUTTON"": (""#f6e4b5"", ""#ab1212""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#ab1212"", ""#1fad9f"", ""#cd3131"", ""#f6e4b5""], ""DESCRIPTION"": [""Turquoise"", ""Red"", ""Yellow""], }, ""LightGrey6"": {""BACKGROUND"": ""#e3e3e3"", ""TEXT"": ""#233142"", ""INPUT"": ""#455d7a"", ""TEXT_INPUT"": ""#e3e3e3"", ""SCROLL"": ""#233142"", ""BUTTON"": (""#e3e3e3"", ""#455d7a""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, ""COLOR_LIST"": [""#233142"", ""#455d7a"", ""#f95959"", ""#e3e3e3""], ""DESCRIPTION"": [""#000000"", ""Blue"", ""Red"", ""Grey""], }, ""HotDogStand"": {""BACKGROUND"": ""red"", ""TEXT"": ""yellow"", ""INPUT"": ""yellow"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""yellow"", ""BUTTON"": (""red"", ""yellow""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGrey8"": {""BACKGROUND"": ""#19232D"", ""TEXT"": ""#ffffff"", ""INPUT"": ""#32414B"", ""TEXT_INPUT"": ""#ffffff"", ""SCROLL"": ""#505F69"", ""BUTTON"": (""#ffffff"", ""#32414B""), ""PROGRESS"": (""#505F69"", ""#32414B""), ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGrey9"": {""BACKGROUND"": ""#36393F"", ""TEXT"": ""#DCDDDE"", ""INPUT"": ""#40444B"", ""TEXT_INPUT"": ""#ffffff"", ""SCROLL"": ""#202225"", ""BUTTON"": (""#202225"", ""#B9BBBE""), ""PROGRESS"": (""#202225"", ""#40444B""), ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGrey10"": {""BACKGROUND"": ""#1c1e23"", ""TEXT"": ""#cccdcf"", ""INPUT"": ""#272a31"", ""TEXT_INPUT"": ""#8b9fde"", ""SCROLL"": ""#313641"", ""BUTTON"": (""#f5f5f6"", ""#2e3d5a""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGrey11"": {""BACKGROUND"": ""#1c1e23"", ""TEXT"": ""#cccdcf"", ""INPUT"": ""#313641"", ""TEXT_INPUT"": ""#cccdcf"", ""SCROLL"": ""#313641"", ""BUTTON"": (""#f5f5f6"", ""#313641""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGrey12"": {""BACKGROUND"": ""#1c1e23"", ""TEXT"": ""#8b9fde"", ""INPUT"": ""#313641"", ""TEXT_INPUT"": ""#8b9fde"", ""SCROLL"": ""#313641"", ""BUTTON"": (""#cccdcf"", ""#2e3d5a""), ""PROGRESS"": DEFAULT_PROGRESS_BAR_COMPUTE, ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGrey13"": {""BACKGROUND"": ""#1c1e23"", ""TEXT"": ""#cccdcf"", ""INPUT"": ""#272a31"", ""TEXT_INPUT"": ""#cccdcf"", ""SCROLL"": ""#313641"", ""BUTTON"": (""#8b9fde"", ""#313641""), ""PROGRESS"": (""#cccdcf"", ""#272a31""), ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkGrey14"": {""BACKGROUND"": ""#24292e"", ""TEXT"": ""#fafbfc"", ""INPUT"": ""#1d2125"", ""TEXT_INPUT"": ""#fafbfc"", ""SCROLL"": ""#1d2125"", ""BUTTON"": (""#fafbfc"", ""#155398""), ""PROGRESS"": (""#155398"", ""#1d2125""), ""BORDER"": 1, ""SLIDER_DEPTH"": 0, ""PROGRESS_DEPTH"": 0, }, ""DarkBrown7"": {""BACKGROUND"": ""#2c2417"", ""TEXT"": ""#baa379"", ""INPUT"": ""#baa379"", ""TEXT_INPUT"": ""#000000"", ""SCROLL"": ""#392e1c"", ""BUTTON"": (""#000000"", ""#baa379""), ""PROGRESS"": (""#baa379"", ""#453923""), ""BORDER"": 1, ""SLIDER_DEPTH"": 1, ""PROGRESS_DEPTH"": 0, }, ""Python"": {""BACKGROUND"": ""#3d7aab"", ""TEXT"": ""#ffde56"", ""INPUT"": ""#295273"", ""TEXT_INPUT"": ""#ffde56"", ""SCROLL"": ""#295273"", ""BUTTON"": (""#ffde56"", ""#295273""), ""PROGRESS"": (""#ffde56"", ""#295273""), ""BORDER"": 1, ""SLIDER_DEPTH"": 1, ""PROGRESS_DEPTH"": 0, }, } ``` ###Assistant : :param icon: Can be either a filename or Base64 value. For Windows if filename, it MUST be ICO format. For Linux, must NOT be ICO. Most portable is to use a Base64 of a PNG file. This works universally across all OS's :type icon: bytes | str :param button_color: Color of the button (text, background) :type button_color: (str, str) or str :param element_size: element size (width, height) in characters :type element_size: (int, int) :param button_element_size: Size of button :type button_element_size: (int, int) :param margins: (left/right, top/bottom) tkinter margins around outsize. Amount of pixels to leave inside the window's frame around the edges before your elements are shown. :type margins: (int, int) :param element_padding: Default amount of padding to put around elements in window (left/right, top/bottom) or ((left, right), (top, bottom)) :type element_padding: (int, int) or ((int, int),(int,int)) :param auto_size_text: True if the Widget should be shrunk to exactly fit the number of chars to show :type auto_size_text: bool :param auto_size_buttons: True if Buttons in this Window should be sized to exactly fit the text on this. :type auto_size_buttons: (bool) :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike :type font: (str or (str, int[, str]) or None) :param border_width: width of border around element :type border_width: (int) :param slider_border_width: Width of the border around sliders :type slider_border_width: (int) :param slider_relief: Type of relief to use for sliders :type slider_relief: (str) :param slider_orientation: ??? :type slider_orientation: ??? :param autoclose_time: ??? :type autoclose_time: ??? :param message_box_line_width: ??? :type message_box_line_width: ??? :param progress_meter_border_depth: ??? :type progress_meter_border_depth: ??? :param progress_meter_style: You can no longer set a progress bar style. All ttk styles must be the same for the window :type progress_meter_style: ??? :param progress_meter_relief: :type progress_meter_relief: ??? :param progress_meter_color: ??? :type progress_meter_color: ??? :param progress_meter_size: ??? :type progress_meter_size: ??? :param text_justification: Default text justification for all Text Elements in window :type text_justification: 'left' | 'right' | 'center' :param background_color: color of background :type background_color: (str) :param element_background_color: element background color :type element_background_color: (str) :param text_element_background_color: text element background color :type text_element_background_color: (str) :param input_elements_background_color: Default color to use for the background of input elements :type input_elements_background_color: (str) :param input_text_color: Default color to use for the text for Input elements :type input_text_color: (str) :param scrollbar_color: Default color to use for the slider trough :type scrollbar_color: (str) :param text_color: color of the text :type text_color: (str) :param element_text_color: Default color to use for Text elements :type element_text_color: (str) :param debug_win_size: window size :type debug_win_size: (int, int) :param window_location: Default location to place windows. Not setting will center windows on the display :type window_location: (int, int) | None :param error_button_color: (Default = (None)) :type error_button_color: ??? :param tooltip_time: time in milliseconds to wait before showing a tooltip. Default is 400ms :type tooltip_time: (int) :param tooltip_font: font to use for all tooltips :type tooltip_font: str or Tuple[str, int] or Tuple[str, int, str] :param use_ttk_buttons: if True will cause all buttons to be ttk buttons :type use_ttk_buttons: (bool) :param ttk_theme: Theme to use with ttk widgets. Choices (on Windows) include - 'default', 'winnative', 'clam', 'alt', 'classic', 'vista', 'xpnative' :type ttk_theme: (str) :param suppress_error_popups: If True then error popups will not be shown if generated internally to PySimpleGUI :type suppress_error_popups: (bool) :param suppress_raise_key_errors: If True then key errors won't be raised (you'll still get popup error) :type suppress_raise_key_errors: (bool) :param suppress_key_guessing: If True then key errors won't try and find closest matches for you :type suppress_key_guessing: (bool) :param warn_button_key_duplicates: If True then duplicate Button Keys generate warnings (not recommended as they're expected) :type warn_button_key_duplicates: (bool) :param enable_treeview_869_patch: If True, then will use the treeview color patch for tk 8.6.9 :type enable_treeview_869_patch: (bool) :param enable_mac_notitlebar_patch: If True then Windows with no titlebar use an alternative technique when tkinter version < 8.6.10 :type enable_mac_notitlebar_patch: (bool) :param use_custom_titlebar: If True then a custom titlebar is used instead of the normal system titlebar :type use_custom_titlebar: (bool) :param titlebar_background_color: If custom titlebar indicated by use_custom_titlebar, then use this as background color :type titlebar_background_color: str | None :param titlebar_text_color: If custom titlebar indicated by use_custom_titlebar, then use this as text color :type titlebar_text_color: str | None :param titlebar_font: If custom titlebar indicated by use_custom_titlebar, then use this as title font :type titlebar_font: (str or (str, int[, str]) or None) | None :param titlebar_icon: If custom titlebar indicated by use_custom_titlebar, then use this as the icon (file or base64 bytes) :type titlebar_icon: bytes | str :param user_settings_path: default path for user_settings API calls. Expanded with os.path.expanduser so can contain ~ to represent user :type user_settings_path: (str) :param pysimplegui_settings_path: default path for the global PySimpleGUI user_settings :type pysimplegui_settings_path: (str) :param pysimplegui_settings_filename: default filename for the global PySimpleGUI user_settings :type pysimplegui_settings_filename: (str) :param keep_on_top: If True then all windows will automatically be set to keep_on_top=True :type keep_on_top: (bool) :param dpi_awareness: If True then will turn on DPI awareness (Windows only at the moment) :type dpi_awareness: (bool) :param scaling: Sets the default scaling for all windows including popups, etc. :type scaling: (float) :param disable_modal_windows: If True then all windows, including popups, will not be modal windows :type disable_modal_windows: (bool) :param tooltip_offset: Offset to use for tooltips as a tuple. These values will be added to the mouse location when the widget was entered. :type tooltip_offset: ((None, None) | (int, int)) :return: None :rtype: None " 1446,"def find_module(module, path=None, imp=None): if imp is None: imp = import_module with cwd_in_path(): try: return imp(module) except ImportError: # Raise a more specific error if the problem is that one of the # dot-separated segments of the module name is not a package. if '.' in module: parts = module.split('.') for i, part in enumerate(parts[:-1]): package = '.'.join(parts[:i + 1]) try: mpart = imp(package) except ImportError: # Break out and re-raise the original ImportError # instead. break try: mpart.__path__ except AttributeError: raise NotAPackage(package) raise ",Version of :func:`imp.find_module` supporting dots.,5,84,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_module(module, path=None, imp=None): if imp is None: imp = import_module with cwd_in_path(): try: return imp(module) except ImportError: # Raise a more specific error if the problem is that one of the # dot-separated segments of the module name is not a package. if '.' in module: parts = module.split('.') for i, part in enumerate(parts[:-1]): package = '.'.join(parts[:i + 1]) try: mpart = imp(package) except ImportError: # Break out and re-raise the original ImportError # instead. break try: mpart.__path__ except AttributeError: raise NotAPackage(package) raise ``` ###Assistant : Version of :func:`imp.find_module` supporting dots. " 1447,"def get_ffmpeg_path() -> Optional[Path]: # Check if ffmpeg is installed global_ffmpeg = shutil.which(""ffmpeg"") if global_ffmpeg: return Path(global_ffmpeg) # Get local ffmpeg path return get_local_ffmpeg() "," Get path to global ffmpeg binary or a local ffmpeg binary. Or None if not found. ",16,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_ffmpeg_path() -> Optional[Path]: # Check if ffmpeg is installed global_ffmpeg = shutil.which(""ffmpeg"") if global_ffmpeg: return Path(global_ffmpeg) # Get local ffmpeg path return get_local_ffmpeg() ``` ###Assistant : Get path to global ffmpeg binary or a local ffmpeg binary. Or None if not found. " 1448,"def __call__(self, inputs, state, scope=None): return self._call_wrapped_cell( inputs, state, cell_call_fn=self.cell.__call__, scope=scope ) ","Runs the RNN cell step computation. We assume that the wrapped RNNCell is being built within its `__call__` method. We directly use the wrapped cell's `__call__` in the overridden wrapper `__call__` method. This allows to use the wrapped cell and the non-wrapped cell equivalently when using `__call__`. Args: inputs: A tensor with wrapped cell's input. state: A tensor or tuple of tensors with wrapped cell's state. scope: VariableScope for the subgraph created in the wrapped cells' `__call__`. Returns: A pair containing: - Output: A tensor with cell's output. - New state: A tensor or tuple of tensors with new wrapped cell's state. ",102,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __call__(self, inputs, state, scope=None): return self._call_wrapped_cell( inputs, state, cell_call_fn=self.cell.__call__, scope=scope ) ``` ###Assistant : Runs the RNN cell step computation. We assume that the wrapped RNNCell is being built within its `__call__` method. We directly use the wrapped cell's `__call__` in the overridden wrapper `__call__` method. This allows to use the wrapped cell and the non-wrapped cell equivalently when using `__call__`. Args: inputs: A tensor with wrapped cell's input. state: A tensor or tuple of tensors with wrapped cell's state. scope: VariableScope for the subgraph created in the wrapped cells' `__call__`. Returns: A pair containing: - Output: A tensor with cell's output. - New state: A tensor or tuple of tensors with new wrapped cell's state. " 1449,"def get_kurtosis(self) -> pd.DataFrame: vals = list() for period in portfolio_helper.PERIODS: vals.append( [ round( scipy.stats.kurtosis( portfolio_helper.filter_df_by_period(self.returns, period) ), 3, ), round( scipy.stats.skew( portfolio_helper.filter_df_by_period( self.benchmark_returns, period ) ), 3, ), ] ) return pd.DataFrame( vals, index=portfolio_helper.PERIODS, columns=[""Portfolio"", ""Benchmark""] ) ","Class method that retrieves kurtosis for portfolio and benchmark selected Returns ------- pd.DataFrame DataFrame with kurtosis for portfolio and benchmark for different periods ",23,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_kurtosis(self) -> pd.DataFrame: vals = list() for period in portfolio_helper.PERIODS: vals.append( [ round( scipy.stats.kurtosis( portfolio_helper.filter_df_by_period(self.returns, period) ), 3, ), round( scipy.stats.skew( portfolio_helper.filter_df_by_period( self.benchmark_returns, period ) ), 3, ), ] ) return pd.DataFrame( vals, index=portfolio_helper.PERIODS, columns=[""Portfolio"", ""Benchmark""] ) ``` ###Assistant : Class method that retrieves kurtosis for portfolio and benchmark selected Returns ------- pd.DataFrame DataFrame with kurtosis for portfolio and benchmark for different periods " 1450,"def _save_model(self, epoch, batch, logs): logs = logs or {} if ( isinstance(self.save_freq, int) or self.epochs_since_last_save >= self.period ): # Block only when saving interval is reached. logs = tf_utils.sync_to_numpy_or_python_type(logs) self.epochs_since_last_save = 0 filepath = self._get_file_path(epoch, batch, logs) try: if self.save_best_only: current = logs.get(self.monitor) if current is None: logging.warning( ""Can save best model only with %s available, "" ""skipping."", self.monitor, ) else: if self.monitor_op(current, self.best): if self.verbose > 0: io_utils.print_msg( f""\nEpoch {epoch + 1}: {self.monitor} improved "" f""from {self.best:.5f} to {current:.5f}, "" f""saving model to {filepath}"" ) self.best = current if self.save_weights_only: self.model.save_weights( filepath, overwrite=True, options=self._options, ) else: self.model.save( filepath, overwrite=True, options=self._options, ) else: if self.verbose > 0: io_utils.print_msg( f""\nEpoch {epoch + 1}: "" f""{self.monitor} did not improve from {self.best:.5f}"" ) else: if self.verbose > 0: io_utils.print_msg( f""\nEpoch {epoch + 1}: saving model to {filepath}"" ) if self.save_weights_only: self.model.save_weights( filepath, overwrite=True, options=self._options ) else: self.model.save( filepath, overwrite=True, options=self._options ) self._maybe_remove_file() except IsADirectoryError as e: # h5py 3.x raise IOError( ""Please specify a non-directory filepath for "" ""ModelCheckpoint. Filepath used is an existing "" f""directory: {filepath}"" ) except IOError as e: # h5py 2.x # `e.errno` appears to be `None` so checking the content of `e.args[0]`. if ""is a directory"" in str(e.args[0]).lower(): raise IOError( ""Please specify a non-directory filepath for "" ""ModelCheckpoint. Filepath used is an existing "" f""directory: f{filepath}"" ) # Re-throw the error for any other causes. raise e ","Saves the model. Args: epoch: the epoch this iteration is in. batch: the batch this iteration is in. `None` if the `save_freq` is set to `epoch`. logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`. ",36,230,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _save_model(self, epoch, batch, logs): logs = logs or {} if ( isinstance(self.save_freq, int) or self.epochs_since_last_save >= self.period ): # Block only when saving interval is reached. logs = tf_utils.sync_to_numpy_or_python_type(logs) self.epochs_since_last_save = 0 filepath = self._get_file_path(epoch, batch, logs) try: if self.save_best_only: current = logs.get(self.monitor) if current is None: logging.warning( ""Can save best model only with %s available, "" ""skipping."", self.monitor, ) else: if self.monitor_op(current, self.best): if self.verbose > 0: io_utils.print_msg( f""\nEpoch {epoch + 1}: {self.monitor} improved "" f""from {self.best:.5f} to {current:.5f}, "" f""saving model to {filepath}"" ) self.best = current if self.save_weights_only: self.model.save_weights( filepath, overwrite=True, options=self._options, ) else: self.model.save( filepath, overwrite=True, options=self._options, ) else: if self.verbose > 0: io_utils.print_msg( f""\nEpoch {epoch + 1}: "" f""{self.monitor} did not improve from {self.best:.5f}"" ) else: if self.verbose > 0: io_utils.print_msg( f""\nEpoch {epoch + 1}: saving model to {filepath}"" ) if self.save_weights_only: self.model.save_weights( filepath, overwrite=True, options=self._options ) else: self.model.save( filepath, overwrite=True, options=self._options ) self._maybe_remove_file() except IsADirectoryError as e: # h5py 3.x raise IOError( ""Please specify a non-directory filepath for "" ""ModelCheckpoint. Filepath used is an existing "" f""directory: {filepath}"" ) except IOError as e: # h5py 2.x # `e.errno` appears to be `None` so checking the content of `e.args[0]`. if ""is a directory"" in str(e.args[0]).lower(): raise IOError( ""Please specify a non-directory filepath for "" ""ModelCheckpoint. Filepath used is an existing "" f""directory: f{filepath}"" ) # Re-throw the error for any other causes. raise e ``` ###Assistant : Saves the model. Args: epoch: the epoch this iteration is in. batch: the batch this iteration is in. `None` if the `save_freq` is set to `epoch`. logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`. " 1451,"def steiner_tree(G, terminal_nodes, weight=""weight"", method=None): r if method is None: import warnings msg = ( ""steiner_tree will change default method from 'kou' to 'mehlhorn'"" ""in version 3.2.\nSet the `method` kwarg to remove this warning."" ) warnings.warn(msg, FutureWarning, stacklevel=4) method = ""kou"" try: algo = ALGORITHMS[method] except KeyError as e: msg = f""{method} is not a valid choice for an algorithm."" raise ValueError(msg) from e edges = algo(G, terminal_nodes, weight) # For multigraph we should add the minimal weight edge keys if G.is_multigraph(): edges = ( (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) for u, v in edges ) T = G.edge_subgraph(edges) return T ","Return an approximation to the minimum Steiner tree of a graph. The minimum Steiner tree of `G` w.r.t a set of `terminal_nodes` (also *S*) is a tree within `G` that spans those nodes and has minimum size (sum of edge weights) among all such trees. The approximation algorithm is specified with the `method` keyword argument. All three available algorithms produce a tree whose weight is within a (2 - (2 / l)) factor of the weight of the optimal Steiner tree, where *l* is the minimum number of leaf nodes across all possible Steiner trees. * `kou` [2]_ (runtime $O(|S| |V|^2)$) computes the minimum spanning tree of the subgraph of the metric closure of *G* induced by the terminal nodes, where the metric closure of *G* is the complete graph in which each edge is weighted by the shortest path distance between the nodes in *G*. * `mehlhorn` [3]_ (runtime $O(|E|+|V|\log|V|)$) modifies Kou et al.'s algorithm, beginning by finding the closest terminal node for each non-terminal. This data is used to create a complete graph containing only the terminal nodes, in which edge is weighted with the shortest path distance between them. The algorithm then proceeds in the same way as Kou et al.. Parameters ---------- G : NetworkX graph terminal_nodes : list A list of terminal nodes for which minimum steiner tree is to be found. weight : string (default = 'weight') Use the edge attribute specified by this string as the edge weight. Any edge attribute not present defaults to 1. method : string, optional (default = 'kou') The algorithm to use to approximate the Steiner tree. Supported options: 'kou', 'mehlhorn'. Other inputs produce a ValueError. Returns ------- NetworkX graph Approximation to the minimum steiner tree of `G` induced by `terminal_nodes` . Notes ----- For multigraphs, the edge between two nodes with minimum weight is the edge put into the Steiner tree. References ---------- .. [1] Steiner_tree_problem on Wikipedia. https://en.wikipedia.org/wiki/Steiner_tree_problem .. [2] Kou, L., G. Markowsky, and L. Berman. 1981. ‘A Fast Algorithm for Steiner Trees’. Acta Informatica 15 (2): 141–45. https://doi.org/10.1007/BF00288961. .. [3] Mehlhorn, Kurt. 1988. ‘A Faster Approximation Algorithm for the Steiner Problem in Graphs’. Information Processing Letters 27 (3): 125–28. https://doi.org/10.1016/0020-0190(88)90066-X. ",366,102,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def steiner_tree(G, terminal_nodes, weight=""weight"", method=None): r if method is None: import warnings msg = ( ""steiner_tree will change default method from 'kou' to 'mehlhorn'"" ""in version 3.2.\nSet the `method` kwarg to remove this warning."" ) warnings.warn(msg, FutureWarning, stacklevel=4) method = ""kou"" try: algo = ALGORITHMS[method] except KeyError as e: msg = f""{method} is not a valid choice for an algorithm."" raise ValueError(msg) from e edges = algo(G, terminal_nodes, weight) # For multigraph we should add the minimal weight edge keys if G.is_multigraph(): edges = ( (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) for u, v in edges ) T = G.edge_subgraph(edges) return T ``` ###Assistant : Return an approximation to the minimum Steiner tree of a graph. The minimum Steiner tree of `G` w.r.t a set of `terminal_nodes` (also *S*) is a tree within `G` that spans those nodes and has minimum size (sum of edge weights) among all such trees. The approximation algorithm is specified with the `method` keyword argument. All three available algorithms produce a tree whose weight is within a (2 - (2 / l)) factor of the weight of the optimal Steiner tree, where *l* is the minimum number of leaf nodes across all possible Steiner trees. * `kou` [2]_ (runtime $O(|S| |V|^2)$) computes the minimum spanning tree of the subgraph of the metric closure of *G* induced by the terminal nodes, where the metric closure of *G* is the complete graph in which each edge is weighted by the shortest path distance between the nodes in *G*. * `mehlhorn` [3]_ (runtime $O(|E|+|V|\log|V|)$) modifies Kou et al.'s algorithm, beginning by finding the closest terminal node for each non-terminal. This data is used to create a complete graph containing only the terminal nodes, in which edge is weighted with the shortest path distance between them. The algorithm then proceeds in the same way as Kou et al.. Parameters ---------- G : NetworkX graph terminal_nodes : list A list of terminal nodes for which minimum steiner tree is to be found. weight : string (default = 'weight') Use the edge attribute specified by this string as the edge weight. Any edge attribute not present defaults to 1. method : string, optional (default = 'kou') The algorithm to use to approximate the Steiner tree. Supported options: 'kou', 'mehlhorn'. Other inputs produce a ValueError. Returns ------- NetworkX graph Approximation to the minimum steiner tree of `G` induced by `terminal_nodes` . Notes ----- For multigraphs, the edge between two nodes with minimum weight is the edge put into the Steiner tree. References ---------- .. [1] Steiner_tree_problem on Wikipedia. https://en.wikipedia.org/wiki/Steiner_tree_problem .. [2] Kou, L., G. Markowsky, and L. Berman. 1981. ‘A Fast Algorithm for Steiner Trees’. Acta Informatica 15 (2): 141–45. https://doi.org/10.1007/BF00288961. .. [3] Mehlhorn, Kurt. 1988. ‘A Faster Approximation Algorithm for the Steiner Problem in Graphs’. Information Processing Letters 27 (3): 125–28. https://doi.org/10.1016/0020-0190(88)90066-X. " 1452,"def find_object(self, queryset, request): if ""id"" in request.GET: return queryset.get(id=request.GET[""id""]) "," Override this to implement more find methods. ",7,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_object(self, queryset, request): if ""id"" in request.GET: return queryset.get(id=request.GET[""id""]) ``` ###Assistant : Override this to implement more find methods. " 1453,"def _maybe_infer_dtype_type(element): tipo = None if hasattr(element, ""dtype""): tipo = element.dtype elif is_list_like(element): element = np.asarray(element) tipo = element.dtype return tipo "," Try to infer an object's dtype, for use in arithmetic ops. Uses `element.dtype` if that's available. Objects implementing the iterator protocol are cast to a NumPy array, and from there the array's type is used. Parameters ---------- element : object Possibly has a `.dtype` attribute, and possibly the iterator protocol. Returns ------- tipo : type Examples -------- >>> from collections import namedtuple >>> Foo = namedtuple(""Foo"", ""dtype"") >>> _maybe_infer_dtype_type(Foo(np.dtype(""i8""))) dtype('int64') ",70,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _maybe_infer_dtype_type(element): tipo = None if hasattr(element, ""dtype""): tipo = element.dtype elif is_list_like(element): element = np.asarray(element) tipo = element.dtype return tipo ``` ###Assistant : Try to infer an object's dtype, for use in arithmetic ops. Uses `element.dtype` if that's available. Objects implementing the iterator protocol are cast to a NumPy array, and from there the array's type is used. Parameters ---------- element : object Possibly has a `.dtype` attribute, and possibly the iterator protocol. Returns ------- tipo : type Examples -------- >>> from collections import namedtuple >>> Foo = namedtuple(""Foo"", ""dtype"") >>> _maybe_infer_dtype_type(Foo(np.dtype(""i8""))) dtype('int64') " 1454,"def test_create_api_message_special(): request = get_new_request(""Alexa.PowerController"", ""TurnOn"") directive_header = request[""directive""][""header""] directive_header.pop(""correlationToken"") directive = messages.AlexaDirective(request) msg = directive.response(""testName"", ""testNameSpace"")._response assert ""event"" in msg msg = msg[""event""] assert msg[""header""][""messageId""] is not None assert msg[""header""][""messageId""] != directive_header[""messageId""] assert ""correlationToken"" not in msg[""header""] assert msg[""header""][""name""] == ""testName"" assert msg[""header""][""namespace""] == ""testNameSpace"" assert msg[""header""][""payloadVersion""] == ""3"" assert msg[""payload""] == {} assert ""endpoint"" not in msg ",Create an API message response of a request with non defaults.,11,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_create_api_message_special(): request = get_new_request(""Alexa.PowerController"", ""TurnOn"") directive_header = request[""directive""][""header""] directive_header.pop(""correlationToken"") directive = messages.AlexaDirective(request) msg = directive.response(""testName"", ""testNameSpace"")._response assert ""event"" in msg msg = msg[""event""] assert msg[""header""][""messageId""] is not None assert msg[""header""][""messageId""] != directive_header[""messageId""] assert ""correlationToken"" not in msg[""header""] assert msg[""header""][""name""] == ""testName"" assert msg[""header""][""namespace""] == ""testNameSpace"" assert msg[""header""][""payloadVersion""] == ""3"" assert msg[""payload""] == {} assert ""endpoint"" not in msg ``` ###Assistant : Create an API message response of a request with non defaults. " 1455,"def tearDown(self): r del self.model_fp16 del self.model_8bit gc.collect() torch.cuda.empty_cache() "," TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 ",27,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def tearDown(self): r del self.model_fp16 del self.model_8bit gc.collect() torch.cuda.empty_cache() ``` ###Assistant : TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 " 1456,"def test_pagination_from_sync_and_messages(self): channel = self._send_relation(RelationTypes.ANNOTATION, ""m.reaction"", ""A"") self.assertEquals(200, channel.code, channel.json_body) annotation_id = channel.json_body[""event_id""] # Send an event after the relation events. self.helper.send(self.room, body=""Latest event"", tok=self.user_token) # Request /sync, limiting it such that only the latest event is returned # (and not the relation). filter = urllib.parse.quote_plus( '{""room"": {""timeline"": {""limit"": 1}}}'.encode() ) channel = self.make_request( ""GET"", f""/sync?filter={filter}"", access_token=self.user_token ) self.assertEquals(200, channel.code, channel.json_body) room_timeline = channel.json_body[""rooms""][""join""][self.room][""timeline""] sync_prev_batch = room_timeline[""prev_batch""] self.assertIsNotNone(sync_prev_batch) # Ensure the relation event is not in the batch returned from /sync. self.assertNotIn( annotation_id, [ev[""event_id""] for ev in room_timeline[""events""]] ) # Request /messages, limiting it such that only the latest event is # returned (and not the relation). channel = self.make_request( ""GET"", f""/rooms/{self.room}/messages?dir=b&limit=1"", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) messages_end = channel.json_body[""end""] self.assertIsNotNone(messages_end) # Ensure the relation event is not in the chunk returned from /messages. self.assertNotIn( annotation_id, [ev[""event_id""] for ev in channel.json_body[""chunk""]] ) # Request /relations with the pagination tokens received from both the # /sync and /messages responses above, in turn. # # This is a tiny bit silly since the client wouldn't know the parent ID # from the requests above; consider the parent ID to be known from a # previous /sync. for from_token in (sync_prev_batch, messages_end): channel = self.make_request( ""GET"", f""/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?from={from_token}"", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) # The relation should be in the returned chunk. self.assertIn( annotation_id, [ev[""event_id""] for ev in channel.json_body[""chunk""]] ) ",Pagination tokens from /sync and /messages can be used to paginate /relations.,12,226,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_pagination_from_sync_and_messages(self): channel = self._send_relation(RelationTypes.ANNOTATION, ""m.reaction"", ""A"") self.assertEquals(200, channel.code, channel.json_body) annotation_id = channel.json_body[""event_id""] # Send an event after the relation events. self.helper.send(self.room, body=""Latest event"", tok=self.user_token) # Request /sync, limiting it such that only the latest event is returned # (and not the relation). filter = urllib.parse.quote_plus( '{""room"": {""timeline"": {""limit"": 1}}}'.encode() ) channel = self.make_request( ""GET"", f""/sync?filter={filter}"", access_token=self.user_token ) self.assertEquals(200, channel.code, channel.json_body) room_timeline = channel.json_body[""rooms""][""join""][self.room][""timeline""] sync_prev_batch = room_timeline[""prev_batch""] self.assertIsNotNone(sync_prev_batch) # Ensure the relation event is not in the batch returned from /sync. self.assertNotIn( annotation_id, [ev[""event_id""] for ev in room_timeline[""events""]] ) # Request /messages, limiting it such that only the latest event is # returned (and not the relation). channel = self.make_request( ""GET"", f""/rooms/{self.room}/messages?dir=b&limit=1"", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) messages_end = channel.json_body[""end""] self.assertIsNotNone(messages_end) # Ensure the relation event is not in the chunk returned from /messages. self.assertNotIn( annotation_id, [ev[""event_id""] for ev in channel.json_body[""chunk""]] ) # Request /relations with the pagination tokens received from both the # /sync and /messages responses above, in turn. # # This is a tiny bit silly since the client wouldn't know the parent ID # from the requests above; consider the parent ID to be known from a # previous /sync. for from_token in (sync_prev_batch, messages_end): channel = self.make_request( ""GET"", f""/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?from={from_token}"", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) # The relation should be in the returned chunk. self.assertIn( annotation_id, [ev[""event_id""] for ev in channel.json_body[""chunk""]] ) ``` ###Assistant : Pagination tokens from /sync and /messages can be used to paginate /relations. " 1457,"def download(self, url_or_urls): url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True) return url_or_urls ","Download given url(s). Args: url_or_urls: url or `list`/`dict` of urls to download and extract. Each url is a `str`. Returns: downloaded_path(s): `str`, The downloaded paths matching the given input url_or_urls. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') ``` ",37,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def download(self, url_or_urls): url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True) return url_or_urls ``` ###Assistant : Download given url(s). Args: url_or_urls: url or `list`/`dict` of urls to download and extract. Each url is a `str`. Returns: downloaded_path(s): `str`, The downloaded paths matching the given input url_or_urls. Example: ```py >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz') ``` " 1458,"def draw_gaussian(image, point, sigma): # Check if the gaussian is inside point[0] = round(point[0], 2) point[1] = round(point[1], 2) ul = [math.floor(point[0] - 7.5 * sigma), math.floor(point[1] - 7.5 * sigma)] br = [math.floor(point[0] + 7.5 * sigma), math.floor(point[1] + 7.5 * sigma)] if (ul[0] > image.shape[1] or ul[1] > image.shape[0] or br[0] < 1 or br[1] < 1): return image size = 15 * sigma + 1 g = _gaussian(size, sigma=0.1) g_x = [int(max(1, -ul[0])), int(min(br[0], image.shape[1])) - int(max(1, ul[0])) + int(max(1, -ul[0]))] g_y = [int(max(1, -ul[1])), int(min(br[1], image.shape[0])) - int(max(1, ul[1])) + int(max(1, -ul[1]))] img_x = [int(max(1, ul[0])), int(min(br[0], image.shape[1]))] img_y = [int(max(1, ul[1])), int(min(br[1], image.shape[0]))] assert (g_x[0] > 0 and g_y[1] > 0) image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]] = \ image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]] + g[g_y[0] - 1:g_y[1], g_x[0] - 1:g_x[1]] image[image > 1] = 1 return image # Adapted from: https://github.com/1adrianb/face-alignment/blob/master/face_alignment/api.py"," Draw gaussian circle at a point in an image. Args: image (np.array): An image of shape (H, W) point (np.array): The center point of the guassian circle sigma (float): Standard deviation of the gaussian kernel Returns: np.array: The image with the drawn gaussian. ",43,148,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def draw_gaussian(image, point, sigma): # Check if the gaussian is inside point[0] = round(point[0], 2) point[1] = round(point[1], 2) ul = [math.floor(point[0] - 7.5 * sigma), math.floor(point[1] - 7.5 * sigma)] br = [math.floor(point[0] + 7.5 * sigma), math.floor(point[1] + 7.5 * sigma)] if (ul[0] > image.shape[1] or ul[1] > image.shape[0] or br[0] < 1 or br[1] < 1): return image size = 15 * sigma + 1 g = _gaussian(size, sigma=0.1) g_x = [int(max(1, -ul[0])), int(min(br[0], image.shape[1])) - int(max(1, ul[0])) + int(max(1, -ul[0]))] g_y = [int(max(1, -ul[1])), int(min(br[1], image.shape[0])) - int(max(1, ul[1])) + int(max(1, -ul[1]))] img_x = [int(max(1, ul[0])), int(min(br[0], image.shape[1]))] img_y = [int(max(1, ul[1])), int(min(br[1], image.shape[0]))] assert (g_x[0] > 0 and g_y[1] > 0) image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]] = \ image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]] + g[g_y[0] - 1:g_y[1], g_x[0] - 1:g_x[1]] image[image > 1] = 1 return image # Adapted from: https://github.com/1adrianb/face-alignment/blob/master/face_alignment/api.py ``` ###Assistant : Draw gaussian circle at a point in an image. Args: image (np.array): An image of shape (H, W) point (np.array): The center point of the guassian circle sigma (float): Standard deviation of the gaussian kernel Returns: np.array: The image with the drawn gaussian. " 1459,"def setdefault(self, key, default=None): if key in self: return self[key] self[key] = default return default ","Insert key with a value of default if key is not in the dictionary. Return the value for key if key is in the dictionary, else default. ",27,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def setdefault(self, key, default=None): if key in self: return self[key] self[key] = default return default ``` ###Assistant : Insert key with a value of default if key is not in the dictionary. Return the value for key if key is in the dictionary, else default. " 1460,"def current_year(context): context[""current_year""] = datetime.datetime.now().year return context "," Add the current year to the context, so it can be used for the copyright note, or other places where it is needed. ",23,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def current_year(context): context[""current_year""] = datetime.datetime.now().year return context ``` ###Assistant : Add the current year to the context, so it can be used for the copyright note, or other places where it is needed. " 1461,"def fileformatparser_map(self) -> Mapping[str, type]: return { ""csv"": CsvParser, ""parquet"": ParquetParser, } # TODO: make these user configurable in spec.json ab_additional_col = ""_ab_additional_properties"" ab_last_mod_col = ""_ab_source_file_last_modified"" ab_file_name_col = ""_ab_source_file_url"" airbyte_columns = [ab_additional_col, ab_last_mod_col, ab_file_name_col] datetime_format_string = ""%Y-%m-%dT%H:%M:%S%z"" ",Mapping where every key is equal 'filetype' and values are corresponding parser classes.,13,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fileformatparser_map(self) -> Mapping[str, type]: return { ""csv"": CsvParser, ""parquet"": ParquetParser, } # TODO: make these user configurable in spec.json ab_additional_col = ""_ab_additional_properties"" ab_last_mod_col = ""_ab_source_file_last_modified"" ab_file_name_col = ""_ab_source_file_url"" airbyte_columns = [ab_additional_col, ab_last_mod_col, ab_file_name_col] datetime_format_string = ""%Y-%m-%dT%H:%M:%S%z"" ``` ###Assistant : Mapping where every key is equal 'filetype' and values are corresponding parser classes. " 1462,"def pivot_table(self, index=None, columns=None, values=None, aggfunc=""mean""): from dask.dataframe.reshape import pivot_table return pivot_table( self, index=index, columns=columns, values=values, aggfunc=aggfunc ) "," Create a spreadsheet-style pivot table as a DataFrame. Target ``columns`` must have category dtype to infer result's ``columns``. ``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar. Parameters ---------- values : scalar column to aggregate index : scalar column to be index columns : scalar column to be columns aggfunc : {'mean', 'sum', 'count'}, default 'mean' Returns ------- table : DataFrame ",61,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def pivot_table(self, index=None, columns=None, values=None, aggfunc=""mean""): from dask.dataframe.reshape import pivot_table return pivot_table( self, index=index, columns=columns, values=values, aggfunc=aggfunc ) ``` ###Assistant : Create a spreadsheet-style pivot table as a DataFrame. Target ``columns`` must have category dtype to infer result's ``columns``. ``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar. Parameters ---------- values : scalar column to aggregate index : scalar column to be index columns : scalar column to be columns aggfunc : {'mean', 'sum', 'count'}, default 'mean' Returns ------- table : DataFrame " 1463,"def test_multi_trial_reuse_with_failing(ray_start_4_cpus_extra): os.environ[""TUNE_MAX_PENDING_TRIALS_PG""] = ""2"" register_trainable(""foo2"", MyResettableClass) [trial1, trial2, trial3, trial4] = tune.run( ""foo2"", config={ ""fail"": tune.grid_search([False, True, False, False]), ""id"": -1, ""sleep"": 2, }, reuse_actors=True, resources_per_trial={""cpu"": 2}, raise_on_failed_trial=False, ).trials assert trial1.last_result[""num_resets""] == 0 assert trial3.last_result[""num_resets""] == 0 assert trial4.last_result[""num_resets""] == 1 ","Test that failing trial's actors are not reused. - 2 trials can run at the same time - Trial 1 succeeds, trial 2 fails - Trial 3 will be scheduled after trial 2 failed, so won't reuse actor - Trial 4 will be scheduled after trial 1 succeeded, so will reuse actor ",52,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_multi_trial_reuse_with_failing(ray_start_4_cpus_extra): os.environ[""TUNE_MAX_PENDING_TRIALS_PG""] = ""2"" register_trainable(""foo2"", MyResettableClass) [trial1, trial2, trial3, trial4] = tune.run( ""foo2"", config={ ""fail"": tune.grid_search([False, True, False, False]), ""id"": -1, ""sleep"": 2, }, reuse_actors=True, resources_per_trial={""cpu"": 2}, raise_on_failed_trial=False, ).trials assert trial1.last_result[""num_resets""] == 0 assert trial3.last_result[""num_resets""] == 0 assert trial4.last_result[""num_resets""] == 1 ``` ###Assistant : Test that failing trial's actors are not reused. - 2 trials can run at the same time - Trial 1 succeeds, trial 2 fails - Trial 3 will be scheduled after trial 2 failed, so won't reuse actor - Trial 4 will be scheduled after trial 1 succeeded, so will reuse actor " 1464,"def dispatch(self, request, *args, **kwargs): page_id = kwargs.get(""page_id"") if not get_forms_for_user(self.request.user).filter(id=page_id).exists(): raise PermissionDenied self.page = get_object_or_404(Page, id=page_id).specific self.submissions = self.get_queryset() if self.request.method == ""POST"": self.handle_delete(self.submissions) return redirect(self.get_success_url(), page_id) return super().dispatch(request, *args, **kwargs) ","Check permissions, set the page and submissions, handle delete",9,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dispatch(self, request, *args, **kwargs): page_id = kwargs.get(""page_id"") if not get_forms_for_user(self.request.user).filter(id=page_id).exists(): raise PermissionDenied self.page = get_object_or_404(Page, id=page_id).specific self.submissions = self.get_queryset() if self.request.method == ""POST"": self.handle_delete(self.submissions) return redirect(self.get_success_url(), page_id) return super().dispatch(request, *args, **kwargs) ``` ###Assistant : Check permissions, set the page and submissions, handle delete " 1465,"def _prior_bpd(self, x_start): batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) "," Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. ",48,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _prior_bpd(self, x_start): batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) ``` ###Assistant : Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. " 1466,"def test_rolling_non_monotonic(method, expected): # Based on an example found in computation.rst use_expanding = [True, False, True, False, True, True, True, True] df = DataFrame({""values"": np.arange(len(use_expanding)) ** 2}) "," Make sure the (rare) branch of non-monotonic indices is covered by a test. output from 1.1.3 is assumed to be the expected output. Output of sum/mean has manually been verified. GH 36933. ",32,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_rolling_non_monotonic(method, expected): # Based on an example found in computation.rst use_expanding = [True, False, True, False, True, True, True, True] df = DataFrame({""values"": np.arange(len(use_expanding)) ** 2}) ``` ###Assistant : Make sure the (rare) branch of non-monotonic indices is covered by a test. output from 1.1.3 is assumed to be the expected output. Output of sum/mean has manually been verified. GH 36933. " 1467,"def clip(self, min=None, max=None): from dask.array.ufunc import clip return clip(self, min, max) ","Return an array whose values are limited to ``[min, max]``. One of max or min must be given. Refer to :func:`dask.array.clip` for full documentation. See Also -------- dask.array.clip : equivalent function ",31,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def clip(self, min=None, max=None): from dask.array.ufunc import clip return clip(self, min, max) ``` ###Assistant : Return an array whose values are limited to ``[min, max]``. One of max or min must be given. Refer to :func:`dask.array.clip` for full documentation. See Also -------- dask.array.clip : equivalent function " 1468,"def _floor_std(self, std): r original_tensor = std.clone().detach() std = torch.clamp(std, min=self.std_floor) if torch.any(original_tensor != std): print( ""[*] Standard deviation was floored! The model is preventing overfitting, nothing serious to worry about"" ) return std "," It clamps the standard deviation to not to go below some level This removes the problem when the model tries to cheat for higher likelihoods by converting one of the gaussians to a point mass. Args: std (float Tensor): tensor containing the standard deviation to be ",46,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _floor_std(self, std): r original_tensor = std.clone().detach() std = torch.clamp(std, min=self.std_floor) if torch.any(original_tensor != std): print( ""[*] Standard deviation was floored! The model is preventing overfitting, nothing serious to worry about"" ) return std ``` ###Assistant : It clamps the standard deviation to not to go below some level This removes the problem when the model tries to cheat for higher likelihoods by converting one of the gaussians to a point mass. Args: std (float Tensor): tensor containing the standard deviation to be " 1469,"def asXML(self, doctag=None, namedItemsOnly=False, indent="""", formatted=True): nl = ""\n"" out = [] namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items() for v in vlist) nextLevelIndent = indent + "" "" # collapse out indents if formatting is not desired if not formatted: indent = """" nextLevelIndent = """" nl = """" selfTag = None if doctag is not None: selfTag = doctag else: if self.__name: selfTag = self.__name if not selfTag: if namedItemsOnly: return """" else: selfTag = ""ITEM"" out += [nl, indent, ""<"", selfTag, "">""] for i, res in enumerate(self.__toklist): if isinstance(res, ParseResults): if i in namedItems: out += [res.asXML(namedItems[i], namedItemsOnly and doctag is None, nextLevelIndent, formatted)] else: out += [res.asXML(None, namedItemsOnly and doctag is None, nextLevelIndent, formatted)] else: # individual token, see if there is a name for it resTag = None if i in namedItems: resTag = namedItems[i] if not resTag: if namedItemsOnly: continue else: resTag = ""ITEM"" xmlBodyText = _xml_escape(_ustr(res)) out += [nl, nextLevelIndent, ""<"", resTag, "">"", xmlBodyText, """"] out += [nl, indent, """"] return """".join(out) "," (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. ",19,175,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def asXML(self, doctag=None, namedItemsOnly=False, indent="""", formatted=True): nl = ""\n"" out = [] namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items() for v in vlist) nextLevelIndent = indent + "" "" # collapse out indents if formatting is not desired if not formatted: indent = """" nextLevelIndent = """" nl = """" selfTag = None if doctag is not None: selfTag = doctag else: if self.__name: selfTag = self.__name if not selfTag: if namedItemsOnly: return """" else: selfTag = ""ITEM"" out += [nl, indent, ""<"", selfTag, "">""] for i, res in enumerate(self.__toklist): if isinstance(res, ParseResults): if i in namedItems: out += [res.asXML(namedItems[i], namedItemsOnly and doctag is None, nextLevelIndent, formatted)] else: out += [res.asXML(None, namedItemsOnly and doctag is None, nextLevelIndent, formatted)] else: # individual token, see if there is a name for it resTag = None if i in namedItems: resTag = namedItems[i] if not resTag: if namedItemsOnly: continue else: resTag = ""ITEM"" xmlBodyText = _xml_escape(_ustr(res)) out += [nl, nextLevelIndent, ""<"", resTag, "">"", xmlBodyText, """"] out += [nl, indent, """"] return """".join(out) ``` ###Assistant : (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. " 1470,"def times_seen_with_pending(self) -> int: return self.times_seen + self.times_seen_pending "," Returns `times_seen` with any additional pending updates from `buffers` added on. This value must be set first. ",17,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def times_seen_with_pending(self) -> int: return self.times_seen + self.times_seen_pending ``` ###Assistant : Returns `times_seen` with any additional pending updates from `buffers` added on. This value must be set first. " 1471,"def test_api_create_storage_path(self): response = self.client.post( self.ENDPOINT, json.dumps( { ""name"": ""A storage path"", ""path"": ""Somewhere/{asn}"", }, ), content_type=""application/json"", ) self.assertEqual(response.status_code, 201) self.assertEqual(StoragePath.objects.count(), 2) "," GIVEN: - API request to create a storage paths WHEN: - API is called THEN: - Correct HTTP response - New storage path is created ",25,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_api_create_storage_path(self): response = self.client.post( self.ENDPOINT, json.dumps( { ""name"": ""A storage path"", ""path"": ""Somewhere/{asn}"", }, ), content_type=""application/json"", ) self.assertEqual(response.status_code, 201) self.assertEqual(StoragePath.objects.count(), 2) ``` ###Assistant : GIVEN: - API request to create a storage paths WHEN: - API is called THEN: - Correct HTTP response - New storage path is created " 1472,"def test_set_all_ask_for_prompts_true_from_post(self, post, organization, inventory, org_admin): r = post( url=reverse('api:workflow_job_template_list'), data=dict( name='workflow that tests ask_for prompts', organization=organization.id, inventory=inventory.id, job_tags='', skip_tags='', ask_inventory_on_launch=True, ask_labels_on_launch=True, ask_limit_on_launch=True, ask_scm_branch_on_launch=True, ask_skip_tags_on_launch=True, ask_tags_on_launch=True, ask_variables_on_launch=True, ), user=org_admin, expect=201, ) wfjt = WorkflowJobTemplate.objects.get(id=r.data['id']) assert wfjt.ask_inventory_on_launch is True assert wfjt.ask_labels_on_launch is True assert wfjt.ask_limit_on_launch is True assert wfjt.ask_scm_branch_on_launch is True assert wfjt.ask_skip_tags_on_launch is True assert wfjt.ask_tags_on_launch is True assert wfjt.ask_variables_on_launch is True @pytest.mark.django_db"," Tests behaviour and values of ask_for_* fields on WFJT via POST ",11,63,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_set_all_ask_for_prompts_true_from_post(self, post, organization, inventory, org_admin): r = post( url=reverse('api:workflow_job_template_list'), data=dict( name='workflow that tests ask_for prompts', organization=organization.id, inventory=inventory.id, job_tags='', skip_tags='', ask_inventory_on_launch=True, ask_labels_on_launch=True, ask_limit_on_launch=True, ask_scm_branch_on_launch=True, ask_skip_tags_on_launch=True, ask_tags_on_launch=True, ask_variables_on_launch=True, ), user=org_admin, expect=201, ) wfjt = WorkflowJobTemplate.objects.get(id=r.data['id']) assert wfjt.ask_inventory_on_launch is True assert wfjt.ask_labels_on_launch is True assert wfjt.ask_limit_on_launch is True assert wfjt.ask_scm_branch_on_launch is True assert wfjt.ask_skip_tags_on_launch is True assert wfjt.ask_tags_on_launch is True assert wfjt.ask_variables_on_launch is True @pytest.mark.django_db ``` ###Assistant : Tests behaviour and values of ask_for_* fields on WFJT via POST " 1473,"def get_used_memory(): # Try to accurately figure out the memory usage if we are in a docker # container. docker_usage = None # For cgroups v1: memory_usage_filename = ""/sys/fs/cgroup/memory/memory.stat"" # For cgroups v2: memory_usage_filename_v2 = ""/sys/fs/cgroup/memory.current"" if os.path.exists(memory_usage_filename): docker_usage = get_cgroupv1_used_memory(memory_usage_filename) elif os.path.exists(memory_usage_filename_v2): with open(memory_usage_filename_v2, ""r"") as f: docker_usage = int(f.read()) if docker_usage is not None: return docker_usage return psutil.virtual_memory().used ","Return the currently used system memory in bytes Returns: The total amount of used memory ",15,60,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_used_memory(): # Try to accurately figure out the memory usage if we are in a docker # container. docker_usage = None # For cgroups v1: memory_usage_filename = ""/sys/fs/cgroup/memory/memory.stat"" # For cgroups v2: memory_usage_filename_v2 = ""/sys/fs/cgroup/memory.current"" if os.path.exists(memory_usage_filename): docker_usage = get_cgroupv1_used_memory(memory_usage_filename) elif os.path.exists(memory_usage_filename_v2): with open(memory_usage_filename_v2, ""r"") as f: docker_usage = int(f.read()) if docker_usage is not None: return docker_usage return psutil.virtual_memory().used ``` ###Assistant : Return the currently used system memory in bytes Returns: The total amount of used memory " 1474,"def pack_x_y_sample_weight(x, y=None, sample_weight=None): if y is None: # For single x-input, we do no tuple wrapping since in this case # there is no ambiguity. This also makes NumPy and Dataset # consistent in that the user does not have to wrap their Dataset # data in an unnecessary tuple if not tf.nest.is_nested(x): return x else: return (x,) elif sample_weight is None: return (x, y) else: return (x, y, sample_weight) ","Packs user-provided data into a tuple. This is a convenience utility for packing data into the tuple formats that `Model.fit` uses. Standalone usage: >>> x = tf.ones((10, 1)) >>> data = tf.keras.utils.pack_x_y_sample_weight(x) >>> isinstance(data, tf.Tensor) True >>> y = tf.ones((10, 1)) >>> data = tf.keras.utils.pack_x_y_sample_weight(x, y) >>> isinstance(data, tuple) True >>> x, y = data Args: x: Features to pass to `Model`. y: Ground-truth targets to pass to `Model`. sample_weight: Sample weight for each element. Returns: Tuple in the format used in `Model.fit`. ",83,71,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def pack_x_y_sample_weight(x, y=None, sample_weight=None): if y is None: # For single x-input, we do no tuple wrapping since in this case # there is no ambiguity. This also makes NumPy and Dataset # consistent in that the user does not have to wrap their Dataset # data in an unnecessary tuple if not tf.nest.is_nested(x): return x else: return (x,) elif sample_weight is None: return (x, y) else: return (x, y, sample_weight) ``` ###Assistant : Packs user-provided data into a tuple. This is a convenience utility for packing data into the tuple formats that `Model.fit` uses. Standalone usage: >>> x = tf.ones((10, 1)) >>> data = tf.keras.utils.pack_x_y_sample_weight(x) >>> isinstance(data, tf.Tensor) True >>> y = tf.ones((10, 1)) >>> data = tf.keras.utils.pack_x_y_sample_weight(x, y) >>> isinstance(data, tuple) True >>> x, y = data Args: x: Features to pass to `Model`. y: Ground-truth targets to pass to `Model`. sample_weight: Sample weight for each element. Returns: Tuple in the format used in `Model.fit`. " 1475,"def test_glm_regression_vstacked_X(solver, fit_intercept, glm_dataset): model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset n_samples, n_features = X.shape params = dict( alpha=alpha, fit_intercept=fit_intercept, # solver=solver, # only lbfgs available tol=1e-12, max_iter=1000, ) model = clone(model).set_params(**params) X = X[:, :-1] # remove intercept X = np.concatenate((X, X), axis=0) assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) y = np.r_[y, y] if fit_intercept: coef = coef_with_intercept intercept = coef[-1] coef = coef[:-1] else: coef = coef_without_intercept intercept = 0 model.fit(X, y) rtol = 3e-5 assert model.intercept_ == pytest.approx(intercept, rel=rtol) assert_allclose(model.coef_, coef, rtol=rtol) @pytest.mark.parametrize(""solver"", SOLVERS) @pytest.mark.parametrize(""fit_intercept"", [True, False])","Test that GLM converges for all solvers to correct solution on vstacked data. We work with a simple constructed data set with known solution. Fit on [X] with alpha is the same as fit on [X], [y] [X], [y] with 1 * alpha. It is the same alpha as the average loss stays the same. For wide X, [X', X'] is a singular matrix. ",64,91,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_glm_regression_vstacked_X(solver, fit_intercept, glm_dataset): model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset n_samples, n_features = X.shape params = dict( alpha=alpha, fit_intercept=fit_intercept, # solver=solver, # only lbfgs available tol=1e-12, max_iter=1000, ) model = clone(model).set_params(**params) X = X[:, :-1] # remove intercept X = np.concatenate((X, X), axis=0) assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) y = np.r_[y, y] if fit_intercept: coef = coef_with_intercept intercept = coef[-1] coef = coef[:-1] else: coef = coef_without_intercept intercept = 0 model.fit(X, y) rtol = 3e-5 assert model.intercept_ == pytest.approx(intercept, rel=rtol) assert_allclose(model.coef_, coef, rtol=rtol) @pytest.mark.parametrize(""solver"", SOLVERS) @pytest.mark.parametrize(""fit_intercept"", [True, False]) ``` ###Assistant : Test that GLM converges for all solvers to correct solution on vstacked data. We work with a simple constructed data set with known solution. Fit on [X] with alpha is the same as fit on [X], [y] [X], [y] with 1 * alpha. It is the same alpha as the average loss stays the same. For wide X, [X', X'] is a singular matrix. " 1476,"def deprecate_data(): sympy_deprecation_warning( , deprecated_since_version=""1.4"", active_deprecations_target=""deprecated-tensorindextype-attrs"", stacklevel=4, ) "," The data attribute of TensorIndexType is deprecated. Use The replace_with_arrays() method instead. ",12,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def deprecate_data(): sympy_deprecation_warning( , deprecated_since_version=""1.4"", active_deprecations_target=""deprecated-tensorindextype-attrs"", stacklevel=4, ) ``` ###Assistant : The data attribute of TensorIndexType is deprecated. Use The replace_with_arrays() method instead. " 1477,"def fit(self, X, y=None): self._validate_params() X = self._check_input(X, reset=True) if self.check_inverse and not (self.func is None or self.inverse_func is None): self._check_inverse_transform(X) return self ","Fit transformer by checking X. If ``validate`` is ``True``, ``X`` will be checked. Parameters ---------- X : array-like, shape (n_samples, n_features) Input array. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object FunctionTransformer class instance. ",43,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fit(self, X, y=None): self._validate_params() X = self._check_input(X, reset=True) if self.check_inverse and not (self.func is None or self.inverse_func is None): self._check_inverse_transform(X) return self ``` ###Assistant : Fit transformer by checking X. If ``validate`` is ``True``, ``X`` will be checked. Parameters ---------- X : array-like, shape (n_samples, n_features) Input array. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object FunctionTransformer class instance. " 1478,"def styleof(expr, styles=default_styles): style = {} for typ, sty in styles: if isinstance(expr, typ): style.update(sty) return style "," Merge style dictionaries in order Examples ======== >>> from sympy import Symbol, Basic, Expr, S >>> from sympy.printing.dot import styleof >>> styles = [(Basic, {'color': 'blue', 'shape': 'ellipse'}), ... (Expr, {'color': 'black'})] >>> styleof(Basic(S(1)), styles) {'color': 'blue', 'shape': 'ellipse'} >>> x = Symbol('x') >>> styleof(x + 1, styles) # this is an Expr {'color': 'black', 'shape': 'ellipse'} ",57,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def styleof(expr, styles=default_styles): style = {} for typ, sty in styles: if isinstance(expr, typ): style.update(sty) return style ``` ###Assistant : Merge style dictionaries in order Examples ======== >>> from sympy import Symbol, Basic, Expr, S >>> from sympy.printing.dot import styleof >>> styles = [(Basic, {'color': 'blue', 'shape': 'ellipse'}), ... (Expr, {'color': 'black'})] >>> styleof(Basic(S(1)), styles) {'color': 'blue', 'shape': 'ellipse'} >>> x = Symbol('x') >>> styleof(x + 1, styles) # this is an Expr {'color': 'black', 'shape': 'ellipse'} " 1479,"def execute(filters=None): columns = [ {""fieldname"": ""creation_date"", ""label"": _(""Date""), ""fieldtype"": ""Date"", ""width"": 300}, { ""fieldname"": ""first_response_time"", ""fieldtype"": ""Duration"", ""label"": _(""First Response Time""), ""width"": 300, }, ] data = frappe.db.sql( , (filters.from_date, filters.to_date), ) return columns, data "," SELECT date(creation) as creation_date, avg(first_response_time) as avg_response_time FROM tabIssue WHERE date(creation) between %s and %s and first_response_time > 0 GROUP BY creation_date ORDER BY creation_date desc ",26,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def execute(filters=None): columns = [ {""fieldname"": ""creation_date"", ""label"": _(""Date""), ""fieldtype"": ""Date"", ""width"": 300}, { ""fieldname"": ""first_response_time"", ""fieldtype"": ""Duration"", ""label"": _(""First Response Time""), ""width"": 300, }, ] data = frappe.db.sql( , (filters.from_date, filters.to_date), ) return columns, data ``` ###Assistant : SELECT date(creation) as creation_date, avg(first_response_time) as avg_response_time FROM tabIssue WHERE date(creation) between %s and %s and first_response_time > 0 GROUP BY creation_date ORDER BY creation_date desc " 1480,"def choose_parent(self, new_node, near_inds): if not near_inds: return None # search nearest cost in near_inds costs = [] for i in near_inds: near_node = self.node_list[i] t_node = self.steer(near_node, new_node) if t_node and self.check_collision( t_node, self.obstacle_list, self.robot_radius): costs.append(self.calc_new_cost(near_node, new_node)) else: costs.append(float(""inf"")) # the cost of collision node min_cost = min(costs) if min_cost == float(""inf""): print(""There is no good path.(min_cost is inf)"") return None min_ind = near_inds[costs.index(min_cost)] new_node = self.steer(self.node_list[min_ind], new_node) new_node.cost = min_cost return new_node "," Computes the cheapest point to new_node contained in the list near_inds and set such a node as the parent of new_node. Arguments: -------- new_node, Node randomly generated node with a path from its neared point There are not coalitions between this node and th tree. near_inds: list Indices of indices of the nodes what are near to new_node Returns. ------ Node, a copy of new_node ",65,74,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def choose_parent(self, new_node, near_inds): if not near_inds: return None # search nearest cost in near_inds costs = [] for i in near_inds: near_node = self.node_list[i] t_node = self.steer(near_node, new_node) if t_node and self.check_collision( t_node, self.obstacle_list, self.robot_radius): costs.append(self.calc_new_cost(near_node, new_node)) else: costs.append(float(""inf"")) # the cost of collision node min_cost = min(costs) if min_cost == float(""inf""): print(""There is no good path.(min_cost is inf)"") return None min_ind = near_inds[costs.index(min_cost)] new_node = self.steer(self.node_list[min_ind], new_node) new_node.cost = min_cost return new_node ``` ###Assistant : Computes the cheapest point to new_node contained in the list near_inds and set such a node as the parent of new_node. Arguments: -------- new_node, Node randomly generated node with a path from its neared point There are not coalitions between this node and th tree. near_inds: list Indices of indices of the nodes what are near to new_node Returns. ------ Node, a copy of new_node " 1481,"def test_a3c_compilation(self): config = a3c.DEFAULT_CONFIG.copy() config[""num_workers""] = 2 config[""num_envs_per_worker""] = 2 num_iterations = 1 # Test against all frameworks. for _ in framework_iterator(config, with_eager_tracing=True): for env in [""CartPole-v1"", ""Pendulum-v1"", ""PongDeterministic-v0""]: print(""env={}"".format(env)) config[""model""][""use_lstm""] = env == ""CartPole-v1"" trainer = a3c.A3CTrainer(config=config, env=env) for i in range(num_iterations): results = trainer.train() check_train_results(results) print(results) check_compute_single_action( trainer, include_state=config[""model""][""use_lstm""] ) trainer.stop() ",Test whether an A3CTrainer can be built with both frameworks.,10,54,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_a3c_compilation(self): config = a3c.DEFAULT_CONFIG.copy() config[""num_workers""] = 2 config[""num_envs_per_worker""] = 2 num_iterations = 1 # Test against all frameworks. for _ in framework_iterator(config, with_eager_tracing=True): for env in [""CartPole-v1"", ""Pendulum-v1"", ""PongDeterministic-v0""]: print(""env={}"".format(env)) config[""model""][""use_lstm""] = env == ""CartPole-v1"" trainer = a3c.A3CTrainer(config=config, env=env) for i in range(num_iterations): results = trainer.train() check_train_results(results) print(results) check_compute_single_action( trainer, include_state=config[""model""][""use_lstm""] ) trainer.stop() ``` ###Assistant : Test whether an A3CTrainer can be built with both frameworks. " 1482,"def extract(self, member, path="""", set_attrs=True): self._check(""r"") if isinstance(member, str): tarinfo = self.getmember(member) else: tarinfo = member # Prepare the link target for makelink(). if tarinfo.islnk(): tarinfo._link_target = os.path.join(path, tarinfo.linkname) try: self._extract_member(tarinfo, os.path.join(path, tarinfo.name), set_attrs=set_attrs) except EnvironmentError as e: if self.errorlevel > 0: raise else: if e.filename is None: self._dbg(1, ""tarfile: %s"" % e.strerror) else: self._dbg(1, ""tarfile: %s %r"" % (e.strerror, e.filename)) except ExtractError as e: if self.errorlevel > 1: raise else: self._dbg(1, ""tarfile: %s"" % e) ","Extract a member from the archive to the current working directory, using its full name. Its file information is extracted as accurately as possible. `member' may be a filename or a TarInfo object. You can specify a different directory using `path'. File attributes (owner, mtime, mode) are set unless `set_attrs' is False. ",52,76,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def extract(self, member, path="""", set_attrs=True): self._check(""r"") if isinstance(member, str): tarinfo = self.getmember(member) else: tarinfo = member # Prepare the link target for makelink(). if tarinfo.islnk(): tarinfo._link_target = os.path.join(path, tarinfo.linkname) try: self._extract_member(tarinfo, os.path.join(path, tarinfo.name), set_attrs=set_attrs) except EnvironmentError as e: if self.errorlevel > 0: raise else: if e.filename is None: self._dbg(1, ""tarfile: %s"" % e.strerror) else: self._dbg(1, ""tarfile: %s %r"" % (e.strerror, e.filename)) except ExtractError as e: if self.errorlevel > 1: raise else: self._dbg(1, ""tarfile: %s"" % e) ``` ###Assistant : Extract a member from the archive to the current working directory, using its full name. Its file information is extracted as accurately as possible. `member' may be a filename or a TarInfo object. You can specify a different directory using `path'. File attributes (owner, mtime, mode) are set unless `set_attrs' is False. " 1483,"def test_artist_from_string(): artist = Artist.from_search_term(""artist:gorillaz"") assert artist.name == ""Gorillaz"" assert artist.url == ""http://open.spotify.com/artist/3AA28KZvwAUcZuOKwyblJQ"" assert len(artist.urls) > 1 "," Test if Artist class can be initialized from string. ",9,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_artist_from_string(): artist = Artist.from_search_term(""artist:gorillaz"") assert artist.name == ""Gorillaz"" assert artist.url == ""http://open.spotify.com/artist/3AA28KZvwAUcZuOKwyblJQ"" assert len(artist.urls) > 1 ``` ###Assistant : Test if Artist class can be initialized from string. " 1484,"def reset(self): # custom_info is used for episodic reports and tensorboard logging self.custom_info[""Invalid""] = 0 self.custom_info[""Hold""] = 0 self.custom_info[""Unknown""] = 0 self.custom_info[""pnl_factor""] = 0 self.custom_info[""duration_factor""] = 0 self.custom_info[""reward_exit""] = 0 self.custom_info[""reward_hold""] = 0 for action in self.actions: self.custom_info[f""{action.name}""] = 0 self._done = False if self.starting_point is True: if self.rl_config.get('randomize_starting_position', False): length_of_data = int(self._end_tick / 4) start_tick = random.randint(self.window_size + 1, length_of_data) self._start_tick = start_tick self._position_history = (self._start_tick * [None]) + [self._position] else: self._position_history = (self.window_size * [None]) + [self._position] self._current_tick = self._start_tick self._last_trade_tick = None self._position = Positions.Neutral self.total_reward = 0. self._total_profit = 1. # unit self.history = {} self.trade_history = [] self.portfolio_log_returns = np.zeros(len(self.prices)) self._profits = [(self._start_tick, 1)] self.close_trade_profit = [] self._total_unrealized_profit = 1 return self._get_observation() "," Reset is called at the beginning of every episode ",9,117,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def reset(self): # custom_info is used for episodic reports and tensorboard logging self.custom_info[""Invalid""] = 0 self.custom_info[""Hold""] = 0 self.custom_info[""Unknown""] = 0 self.custom_info[""pnl_factor""] = 0 self.custom_info[""duration_factor""] = 0 self.custom_info[""reward_exit""] = 0 self.custom_info[""reward_hold""] = 0 for action in self.actions: self.custom_info[f""{action.name}""] = 0 self._done = False if self.starting_point is True: if self.rl_config.get('randomize_starting_position', False): length_of_data = int(self._end_tick / 4) start_tick = random.randint(self.window_size + 1, length_of_data) self._start_tick = start_tick self._position_history = (self._start_tick * [None]) + [self._position] else: self._position_history = (self.window_size * [None]) + [self._position] self._current_tick = self._start_tick self._last_trade_tick = None self._position = Positions.Neutral self.total_reward = 0. self._total_profit = 1. # unit self.history = {} self.trade_history = [] self.portfolio_log_returns = np.zeros(len(self.prices)) self._profits = [(self._start_tick, 1)] self.close_trade_profit = [] self._total_unrealized_profit = 1 return self._get_observation() ``` ###Assistant : Reset is called at the beginning of every episode " 1485,"def get_ordered_to_be_billed_data(args): doctype, party = args.get(""doctype""), args.get(""party"") child_tab = doctype + "" Item"" precision = ( get_field_precision( frappe.get_meta(child_tab).get_field(""billed_amt""), currency=get_default_currency() ) or 2 ) project_field = get_project_field(doctype, party) return frappe.db.sql( .format( parent_tab=""tab"" + doctype, child_tab=""tab"" + child_tab, precision=precision, party=party, date_field=args.get(""date""), project_field=project_field, order=args.get(""order""), order_by=args.get(""order_by""), ) ) "," Select `{parent_tab}`.name, `{parent_tab}`.{date_field}, `{parent_tab}`.{party}, `{parent_tab}`.{party}_name, `{child_tab}`.item_code, `{child_tab}`.base_amount, (`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1)), (`{child_tab}`.base_rate * ifnull(`{child_tab}`.returned_qty, 0)), (`{child_tab}`.base_amount - (`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1)) - (`{child_tab}`.base_rate * ifnull(`{child_tab}`.returned_qty, 0))), `{child_tab}`.item_name, `{child_tab}`.description, {project_field}, `{parent_tab}`.company from `{parent_tab}`, `{child_tab}` where `{parent_tab}`.name = `{child_tab}`.parent and `{parent_tab}`.docstatus = 1 and `{parent_tab}`.status not in ('Closed', 'Completed') and `{child_tab}`.amount > 0 and (`{child_tab}`.base_amount - round(`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1), {precision}) - (`{child_tab}`.base_rate * ifnull(`{child_tab}`.returned_qty, 0))) > 0 order by `{parent_tab}`.{order} {order_by} ",70,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_ordered_to_be_billed_data(args): doctype, party = args.get(""doctype""), args.get(""party"") child_tab = doctype + "" Item"" precision = ( get_field_precision( frappe.get_meta(child_tab).get_field(""billed_amt""), currency=get_default_currency() ) or 2 ) project_field = get_project_field(doctype, party) return frappe.db.sql( .format( parent_tab=""tab"" + doctype, child_tab=""tab"" + child_tab, precision=precision, party=party, date_field=args.get(""date""), project_field=project_field, order=args.get(""order""), order_by=args.get(""order_by""), ) ) ``` ###Assistant : Select `{parent_tab}`.name, `{parent_tab}`.{date_field}, `{parent_tab}`.{party}, `{parent_tab}`.{party}_name, `{child_tab}`.item_code, `{child_tab}`.base_amount, (`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1)), (`{child_tab}`.base_rate * ifnull(`{child_tab}`.returned_qty, 0)), (`{child_tab}`.base_amount - (`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1)) - (`{child_tab}`.base_rate * ifnull(`{child_tab}`.returned_qty, 0))), `{child_tab}`.item_name, `{child_tab}`.description, {project_field}, `{parent_tab}`.company from `{parent_tab}`, `{child_tab}` where `{parent_tab}`.name = `{child_tab}`.parent and `{parent_tab}`.docstatus = 1 and `{parent_tab}`.status not in ('Closed', 'Completed') and `{child_tab}`.amount > 0 and (`{child_tab}`.base_amount - round(`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1), {precision}) - (`{child_tab}`.base_rate * ifnull(`{child_tab}`.returned_qty, 0))) > 0 order by `{parent_tab}`.{order} {order_by} " 1486,"def get_staged_trial(self): # TODO(xwjiang): This method should consider `self._cached_actor_pg`. for trial in self._staged_trials: if self._pg_manager.has_ready(trial): return trial return None ","Get a trial whose placement group was successfully staged. Can also return None if no trial is available. Returns: Trial object or None. ",23,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_staged_trial(self): # TODO(xwjiang): This method should consider `self._cached_actor_pg`. for trial in self._staged_trials: if self._pg_manager.has_ready(trial): return trial return None ``` ###Assistant : Get a trial whose placement group was successfully staged. Can also return None if no trial is available. Returns: Trial object or None. " 1487,"def register(cls, func, squeeze_self=False, **kwargs): return super().register( Resampler.build_resample(func, squeeze_self), fn_name=func.__name__, **kwargs ) "," Build function that do fallback to pandas and aggregate resampled data. Parameters ---------- func : callable Aggregation function to execute under resampled frame. squeeze_self : bool, default: False Whether or not to squeeze frame before resampling. **kwargs : kwargs Additional arguments that will be passed to function builder. Returns ------- callable Function that takes query compiler and does fallback to pandas to resample time-series data and apply aggregation on it. ",70,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def register(cls, func, squeeze_self=False, **kwargs): return super().register( Resampler.build_resample(func, squeeze_self), fn_name=func.__name__, **kwargs ) ``` ###Assistant : Build function that do fallback to pandas and aggregate resampled data. Parameters ---------- func : callable Aggregation function to execute under resampled frame. squeeze_self : bool, default: False Whether or not to squeeze frame before resampling. **kwargs : kwargs Additional arguments that will be passed to function builder. Returns ------- callable Function that takes query compiler and does fallback to pandas to resample time-series data and apply aggregation on it. " 1488,"def aggregate(self, *args, **kwargs): if self.query.distinct_fields: raise NotImplementedError(""aggregate() + distinct(fields) not implemented."") self._validate_values_are_expressions( (*args, *kwargs.values()), method_name=""aggregate"" ) for arg in args: # The default_alias property raises TypeError if default_alias # can't be set automatically or AttributeError if it isn't an # attribute. try: arg.default_alias except (AttributeError, TypeError): raise TypeError(""Complex aggregates require an alias"") kwargs[arg.default_alias] = arg query = self.query.chain() for (alias, aggregate_expr) in kwargs.items(): query.add_annotation(aggregate_expr, alias, is_summary=True) annotation = query.annotations[alias] if not annotation.contains_aggregate: raise TypeError(""%s is not an aggregate expression"" % alias) for expr in annotation.get_source_expressions(): if ( expr.contains_aggregate and isinstance(expr, Ref) and expr.refs in kwargs ): name = expr.refs raise exceptions.FieldError( ""Cannot compute %s('%s'): '%s' is an aggregate"" % (annotation.name, name, name) ) return query.get_aggregation(self.db, kwargs) "," Return a dictionary containing the calculations (aggregation) over the current queryset. If args is present the expression is passed as a kwarg using the Aggregate object's default alias. ",28,117,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def aggregate(self, *args, **kwargs): if self.query.distinct_fields: raise NotImplementedError(""aggregate() + distinct(fields) not implemented."") self._validate_values_are_expressions( (*args, *kwargs.values()), method_name=""aggregate"" ) for arg in args: # The default_alias property raises TypeError if default_alias # can't be set automatically or AttributeError if it isn't an # attribute. try: arg.default_alias except (AttributeError, TypeError): raise TypeError(""Complex aggregates require an alias"") kwargs[arg.default_alias] = arg query = self.query.chain() for (alias, aggregate_expr) in kwargs.items(): query.add_annotation(aggregate_expr, alias, is_summary=True) annotation = query.annotations[alias] if not annotation.contains_aggregate: raise TypeError(""%s is not an aggregate expression"" % alias) for expr in annotation.get_source_expressions(): if ( expr.contains_aggregate and isinstance(expr, Ref) and expr.refs in kwargs ): name = expr.refs raise exceptions.FieldError( ""Cannot compute %s('%s'): '%s' is an aggregate"" % (annotation.name, name, name) ) return query.get_aggregation(self.db, kwargs) ``` ###Assistant : Return a dictionary containing the calculations (aggregation) over the current queryset. If args is present the expression is passed as a kwarg using the Aggregate object's default alias. " 1489,"def _detect_bytelen_from_str(s): # type: (str) -> int assert len(s) >= 2 tmp_len = len(s) i = 1 while orb(s[i]) & 0x80 > 0: i += 1 assert i < tmp_len, 'EINVAL: s: out-of-bound read: unfinished AbstractUVarIntField detected' # noqa: E501 ret = i + 1 assert ret >= 0 return ret "," _detect_bytelen_from_str returns the length of the machine representation of an AbstractUVarIntField starting at the beginning of s and which is assumed to expand over multiple bytes (value > _max_prefix_value). :param str s: the string to parse. It is assumed that it is a multibyte int. # noqa: E501 :return: The bytelength of the AbstractUVarIntField. :raises: AssertionError ",56,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _detect_bytelen_from_str(s): # type: (str) -> int assert len(s) >= 2 tmp_len = len(s) i = 1 while orb(s[i]) & 0x80 > 0: i += 1 assert i < tmp_len, 'EINVAL: s: out-of-bound read: unfinished AbstractUVarIntField detected' # noqa: E501 ret = i + 1 assert ret >= 0 return ret ``` ###Assistant : _detect_bytelen_from_str returns the length of the machine representation of an AbstractUVarIntField starting at the beginning of s and which is assumed to expand over multiple bytes (value > _max_prefix_value). :param str s: the string to parse. It is assumed that it is a multibyte int. # noqa: E501 :return: The bytelength of the AbstractUVarIntField. :raises: AssertionError " 1490,"def get_gl_entries(voucher_type, voucher_no): return frappe.db.sql( , (voucher_type, voucher_no), as_dict=1, ) ","select account, debit, credit, cost_center, is_cancelled from `tabGL Entry` where voucher_type=%s and voucher_no=%s order by account desc",17,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_gl_entries(voucher_type, voucher_no): return frappe.db.sql( , (voucher_type, voucher_no), as_dict=1, ) ``` ###Assistant : select account, debit, credit, cost_center, is_cancelled from `tabGL Entry` where voucher_type=%s and voucher_no=%s order by account desc " 1491,"def random_normal(mean=0.0, std=1.0, shape=None, dev=None, f=None): return _cur_framework(f=f).random_normal(mean, std, shape, dev) "," Draws samples from a normal distribution. :param mean: The mean of the normal distribution to sample from. Default is 0. :type mean: float :param std: The standard deviation of the normal distribution to sample from. Default is 1. :type std: float :param shape: Output shape. If the given shape is, e.g., (m, n, k), then m * n * k samples are drawn. If size is None (default), a single value is returned. :param dev: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. :type dev: str :param f: Machine learning framework. Inferred from inputs if None. :type f: ml_framework, optional :return: Drawn samples from the parameterized uniform distribution. ",111,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def random_normal(mean=0.0, std=1.0, shape=None, dev=None, f=None): return _cur_framework(f=f).random_normal(mean, std, shape, dev) ``` ###Assistant : Draws samples from a normal distribution. :param mean: The mean of the normal distribution to sample from. Default is 0. :type mean: float :param std: The standard deviation of the normal distribution to sample from. Default is 1. :type std: float :param shape: Output shape. If the given shape is, e.g., (m, n, k), then m * n * k samples are drawn. If size is None (default), a single value is returned. :param dev: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. :type dev: str :param f: Machine learning framework. Inferred from inputs if None. :type f: ml_framework, optional :return: Drawn samples from the parameterized uniform distribution. " 1492,"def get_font_preamble(cls): font_preamble, command = cls._get_font_preamble_and_command() return font_preamble "," Return a string containing font configuration for the tex preamble. ",10,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_font_preamble(cls): font_preamble, command = cls._get_font_preamble_and_command() return font_preamble ``` ###Assistant : Return a string containing font configuration for the tex preamble. " 1493,"def do_test_explorations(config, dummy_obs, prev_a=None, expected_mean_action=None): # Test all frameworks. for _ in framework_iterator(config): print(f""Algorithm={config.algo_class}"") # Test for both the default Agent's exploration AND the `Random` # exploration class. for exploration in [None, ""Random""]: local_config = config.copy() if exploration == ""Random"": local_config.exploration(exploration_config={""type"": ""Random""}) print(""exploration={}"".format(exploration or ""default"")) algo = local_config.build() # Make sure all actions drawn are the same, given same # observations. actions = [] for _ in range(25): actions.append( algo.compute_single_action( observation=dummy_obs, explore=False, prev_action=prev_a, prev_reward=1.0 if prev_a is not None else None, ) ) check(actions[-1], actions[0]) # Make sure actions drawn are different # (around some mean value), given constant observations. actions = [] for _ in range(500): actions.append( algo.compute_single_action( observation=dummy_obs, explore=True, prev_action=prev_a, prev_reward=1.0 if prev_a is not None else None, ) ) check( np.mean(actions), expected_mean_action if expected_mean_action is not None else 0.5, atol=0.4, ) # Check that the stddev is not 0.0 (values differ). check(np.std(actions), 0.0, false=True) ",Calls an Agent's `compute_actions` with different `explore` options.,8,147,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def do_test_explorations(config, dummy_obs, prev_a=None, expected_mean_action=None): # Test all frameworks. for _ in framework_iterator(config): print(f""Algorithm={config.algo_class}"") # Test for both the default Agent's exploration AND the `Random` # exploration class. for exploration in [None, ""Random""]: local_config = config.copy() if exploration == ""Random"": local_config.exploration(exploration_config={""type"": ""Random""}) print(""exploration={}"".format(exploration or ""default"")) algo = local_config.build() # Make sure all actions drawn are the same, given same # observations. actions = [] for _ in range(25): actions.append( algo.compute_single_action( observation=dummy_obs, explore=False, prev_action=prev_a, prev_reward=1.0 if prev_a is not None else None, ) ) check(actions[-1], actions[0]) # Make sure actions drawn are different # (around some mean value), given constant observations. actions = [] for _ in range(500): actions.append( algo.compute_single_action( observation=dummy_obs, explore=True, prev_action=prev_a, prev_reward=1.0 if prev_a is not None else None, ) ) check( np.mean(actions), expected_mean_action if expected_mean_action is not None else 0.5, atol=0.4, ) # Check that the stddev is not 0.0 (values differ). check(np.std(actions), 0.0, false=True) ``` ###Assistant : Calls an Agent's `compute_actions` with different `explore` options. " 1494,"def exit_with_success(message, **kwargs): kwargs.setdefault(""style"", ""green"") app.console.print(message, **kwargs) raise typer.Exit(0) "," Utility to print a stylized success message and exit with a zero code ",13,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def exit_with_success(message, **kwargs): kwargs.setdefault(""style"", ""green"") app.console.print(message, **kwargs) raise typer.Exit(0) ``` ###Assistant : Utility to print a stylized success message and exit with a zero code " 1495,"def get_keywords() -> Dict[str, str]: # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = ""$Format:%d$"" git_full = ""$Format:%H$"" git_date = ""$Format:%ci$"" keywords = {""refnames"": git_refnames, ""full"": git_full, ""date"": git_date} return keywords ",Get the keywords needed to look up the version information.,10,61,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_keywords() -> Dict[str, str]: # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = ""$Format:%d$"" git_full = ""$Format:%H$"" git_date = ""$Format:%ci$"" keywords = {""refnames"": git_refnames, ""full"": git_full, ""date"": git_date} return keywords ``` ###Assistant : Get the keywords needed to look up the version information. " 1496,"def take(outname, inname, chunks, index, itemsize, axis=0): from .core import PerformanceWarning plan = slicing_plan(chunks[axis], index) if len(plan) >= len(chunks[axis]) * 10: factor = math.ceil(len(plan) / len(chunks[axis])) warnings.warn( ""Slicing with an out-of-order index is generating %d "" ""times more chunks"" % factor, PerformanceWarning, stacklevel=6, ) if not is_arraylike(index): index = np.asarray(index) # Check for chunks from the plan that would violate the user's # configured chunk size. nbytes = utils.parse_bytes(config.get(""array.chunk-size"")) other_chunks = [chunks[i] for i in range(len(chunks)) if i != axis] other_numel = np.prod([sum(x) for x in other_chunks]) if math.isnan(other_numel): warnsize = maxsize = math.inf else: maxsize = math.ceil(nbytes / (other_numel * itemsize)) warnsize = maxsize * 5 split = config.get(""array.slicing.split-large-chunks"", None) # Warn only when the default is not specified. warned = split is not None for _, index_list in plan: if not warned and len(index_list) > warnsize: msg = ( ""Slicing is producing a large chunk. To accept the large\n"" ""chunk and silence this warning, set the option\n"" "" >>> with dask.config.set(**{'array.slicing.split_large_chunks': False}):\n"" "" ... array[indexer]\n\n"" ""To avoid creating the large chunks, set the option\n"" "" >>> with dask.config.set(**{'array.slicing.split_large_chunks': True}):\n"" "" ... array[indexer]"" ) warnings.warn(msg, PerformanceWarning, stacklevel=6) warned = True where_index = [] index_lists = [] for where_idx, index_list in plan: index_length = len(index_list) if split and index_length > maxsize: index_sublist = np.array_split( index_list, math.ceil(index_length / maxsize) ) index_lists.extend(index_sublist) where_index.extend([where_idx] * len(index_sublist)) else: if not is_arraylike(index_list): index_list = np.array(index_list) index_lists.append(index_list) where_index.append(where_idx) dims = [range(len(bd)) for bd in chunks] indims = list(dims) indims[axis] = list(range(len(where_index))) keys = list(product([outname], *indims)) outdims = list(dims) outdims[axis] = where_index slices = [[colon] * len(bd) for bd in chunks] slices[axis] = index_lists slices = list(product(*slices)) inkeys = list(product([inname], *outdims)) values = [(getitem, inkey, slc) for inkey, slc in zip(inkeys, slices)] chunks2 = list(chunks) chunks2[axis] = tuple(map(len, index_lists)) dsk = dict(zip(keys, values)) return tuple(chunks2), dsk ","Index array with an iterable of index Handles a single index by a single list Mimics ``np.take`` >>> from pprint import pprint >>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [5, 1, 47, 3], 8, axis=0) >>> chunks ((2, 1, 1),) >>> pprint(dsk) # doctest: +ELLIPSIS {('y', 0): (, ('x', 0), (array([5, 1]),)), ('y', 1): (, ('x', 2), (array([7]),)), ('y', 2): (, ('x', 0), (array([3]),))} When list is sorted we retain original block structure >>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], 8, axis=0) >>> chunks ((3, 1),) >>> pprint(dsk) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE {('y', 0): (, ('x', 0), (array([1, 3, 5]),)), ('y', 1): (, ('x', 2), (array([7]),))} When any indexed blocks would otherwise grow larger than dask.config.array.chunk-size, we might split them, depending on the value of ``dask.config.slicing.split-large-chunks``. >>> import dask >>> with dask.config.set({""array.slicing.split-large-chunks"": True}): ... chunks, dsk = take('y', 'x', [(1, 1, 1), (1000, 1000), (1000, 1000)], ... [0] + [1] * 6 + [2], axis=0, itemsize=8) >>> chunks ((1, 3, 3, 1), (1000, 1000), (1000, 1000)) ",191,299,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def take(outname, inname, chunks, index, itemsize, axis=0): from .core import PerformanceWarning plan = slicing_plan(chunks[axis], index) if len(plan) >= len(chunks[axis]) * 10: factor = math.ceil(len(plan) / len(chunks[axis])) warnings.warn( ""Slicing with an out-of-order index is generating %d "" ""times more chunks"" % factor, PerformanceWarning, stacklevel=6, ) if not is_arraylike(index): index = np.asarray(index) # Check for chunks from the plan that would violate the user's # configured chunk size. nbytes = utils.parse_bytes(config.get(""array.chunk-size"")) other_chunks = [chunks[i] for i in range(len(chunks)) if i != axis] other_numel = np.prod([sum(x) for x in other_chunks]) if math.isnan(other_numel): warnsize = maxsize = math.inf else: maxsize = math.ceil(nbytes / (other_numel * itemsize)) warnsize = maxsize * 5 split = config.get(""array.slicing.split-large-chunks"", None) # Warn only when the default is not specified. warned = split is not None for _, index_list in plan: if not warned and len(index_list) > warnsize: msg = ( ""Slicing is producing a large chunk. To accept the large\n"" ""chunk and silence this warning, set the option\n"" "" >>> with dask.config.set(**{'array.slicing.split_large_chunks': False}):\n"" "" ... array[indexer]\n\n"" ""To avoid creating the large chunks, set the option\n"" "" >>> with dask.config.set(**{'array.slicing.split_large_chunks': True}):\n"" "" ... array[indexer]"" ) warnings.warn(msg, PerformanceWarning, stacklevel=6) warned = True where_index = [] index_lists = [] for where_idx, index_list in plan: index_length = len(index_list) if split and index_length > maxsize: index_sublist = np.array_split( index_list, math.ceil(index_length / maxsize) ) index_lists.extend(index_sublist) where_index.extend([where_idx] * len(index_sublist)) else: if not is_arraylike(index_list): index_list = np.array(index_list) index_lists.append(index_list) where_index.append(where_idx) dims = [range(len(bd)) for bd in chunks] indims = list(dims) indims[axis] = list(range(len(where_index))) keys = list(product([outname], *indims)) outdims = list(dims) outdims[axis] = where_index slices = [[colon] * len(bd) for bd in chunks] slices[axis] = index_lists slices = list(product(*slices)) inkeys = list(product([inname], *outdims)) values = [(getitem, inkey, slc) for inkey, slc in zip(inkeys, slices)] chunks2 = list(chunks) chunks2[axis] = tuple(map(len, index_lists)) dsk = dict(zip(keys, values)) return tuple(chunks2), dsk ``` ###Assistant : Index array with an iterable of index Handles a single index by a single list Mimics ``np.take`` >>> from pprint import pprint >>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [5, 1, 47, 3], 8, axis=0) >>> chunks ((2, 1, 1),) >>> pprint(dsk) # doctest: +ELLIPSIS {('y', 0): (, ('x', 0), (array([5, 1]),)), ('y', 1): (, ('x', 2), (array([7]),)), ('y', 2): (, ('x', 0), (array([3]),))} When list is sorted we retain original block structure >>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], 8, axis=0) >>> chunks ((3, 1),) >>> pprint(dsk) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE {('y', 0): (, ('x', 0), (array([1, 3, 5]),)), ('y', 1): (, ('x', 2), (array([7]),))} When any indexed blocks would otherwise grow larger than dask.config.array.chunk-size, we might split them, depending on the value of ``dask.config.slicing.split-large-chunks``. >>> import dask >>> with dask.config.set({""array.slicing.split-large-chunks"": True}): ... chunks, dsk = take('y', 'x', [(1, 1, 1), (1000, 1000), (1000, 1000)], ... [0] + [1] * 6 + [2], axis=0, itemsize=8) >>> chunks ((1, 3, 3, 1), (1000, 1000), (1000, 1000)) " 1497,"def test_multidb(self): ContentType.objects.clear_cache() with self.assertNumQueries(0, using=""default""), self.assertNumQueries( 1, using=""other"" ): ContentType.objects.get_for_model(Author) "," When using multiple databases, ContentType.objects.get_for_model() uses db_for_read(). ",7,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_multidb(self): ContentType.objects.clear_cache() with self.assertNumQueries(0, using=""default""), self.assertNumQueries( 1, using=""other"" ): ContentType.objects.get_for_model(Author) ``` ###Assistant : When using multiple databases, ContentType.objects.get_for_model() uses db_for_read(). " 1498,"def get_engle_granger_two_step_cointegration_test(y, x): warnings.simplefilter(action=""ignore"", category=FutureWarning) long_run_ols = sm.OLS(y, sm.add_constant(x)) warnings.simplefilter(action=""default"", category=FutureWarning) long_run_ols_fit = long_run_ols.fit() c, gamma = long_run_ols_fit.params z = long_run_ols_fit.resid short_run_ols = sm.OLS(y.diff().iloc[1:], (z.shift().iloc[1:])) short_run_ols_fit = short_run_ols.fit() alpha = short_run_ols_fit.params[0] # NOTE: The p-value returned by the adfuller function assumes we do not estimate z # first, but test stationarity of an unestimated series directly. This assumption # should have limited effect for high N, however. Critical values taking this into # account more accurately are provided in e.g. McKinnon (1990) and Engle & Yoo (1987). adfstat, pvalue, _, _, _ = adfuller(z, maxlag=1, autolag=None) return c, gamma, alpha, z, adfstat, pvalue ","Estimates long-run and short-run cointegration relationship for series y and x and apply the two-step Engle & Granger test for cointegration. Uses a 2-step process to first estimate coefficients for the long-run relationship y_t = c + gamma * x_t + z_t and then the short-term relationship, y_t - y_(t-1) = alpha * z_(t-1) + epsilon_t, with z the found residuals of the first equation. Then tests cointegration by Dickey-Fuller phi=1 vs phi < 1 in z_t = phi * z_(t-1) + eta_t If this implies phi < 1, the z series is stationary is concluded to be stationary, and thus the series y and x are concluded to be cointegrated. Parameters ---------- y : pd.Series The first time series of the pair to analyse. x : pd.Series The second time series of the pair to analyse. Returns ------- c : float The constant term in the long-run relationship y_t = c + gamma * x_t + z_t. This describes the static shift of y with respect to gamma * x. gamma : float The gamma term in the long-run relationship y_t = c + gamma * x_t + z_t. This describes the ratio between the const-shifted y and x. alpha : float The alpha term in the short-run relationship y_t - y_(t-1) = alpha * z_(t-1) + epsilon. This gives an indication of the strength of the error correction toward the long-run mean. z : pd.Series Series of residuals z_t from the long-run relationship y_t = c + gamma * x_t + z_t, representing the value of the error correction term. dfstat : float The Dickey Fuller test-statistic for phi = 1 vs phi < 1 in the second equation. A more negative value implies the existence of stronger cointegration. pvalue : float The p-value corresponding to the Dickey Fuller test-statistic. A lower value implies stronger rejection of no-cointegration, thus stronger evidence of cointegration. ",315,103,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_engle_granger_two_step_cointegration_test(y, x): warnings.simplefilter(action=""ignore"", category=FutureWarning) long_run_ols = sm.OLS(y, sm.add_constant(x)) warnings.simplefilter(action=""default"", category=FutureWarning) long_run_ols_fit = long_run_ols.fit() c, gamma = long_run_ols_fit.params z = long_run_ols_fit.resid short_run_ols = sm.OLS(y.diff().iloc[1:], (z.shift().iloc[1:])) short_run_ols_fit = short_run_ols.fit() alpha = short_run_ols_fit.params[0] # NOTE: The p-value returned by the adfuller function assumes we do not estimate z # first, but test stationarity of an unestimated series directly. This assumption # should have limited effect for high N, however. Critical values taking this into # account more accurately are provided in e.g. McKinnon (1990) and Engle & Yoo (1987). adfstat, pvalue, _, _, _ = adfuller(z, maxlag=1, autolag=None) return c, gamma, alpha, z, adfstat, pvalue ``` ###Assistant : Estimates long-run and short-run cointegration relationship for series y and x and apply the two-step Engle & Granger test for cointegration. Uses a 2-step process to first estimate coefficients for the long-run relationship y_t = c + gamma * x_t + z_t and then the short-term relationship, y_t - y_(t-1) = alpha * z_(t-1) + epsilon_t, with z the found residuals of the first equation. Then tests cointegration by Dickey-Fuller phi=1 vs phi < 1 in z_t = phi * z_(t-1) + eta_t If this implies phi < 1, the z series is stationary is concluded to be stationary, and thus the series y and x are concluded to be cointegrated. Parameters ---------- y : pd.Series The first time series of the pair to analyse. x : pd.Series The second time series of the pair to analyse. Returns ------- c : float The constant term in the long-run relationship y_t = c + gamma * x_t + z_t. This describes the static shift of y with respect to gamma * x. gamma : float The gamma term in the long-run relationship y_t = c + gamma * x_t + z_t. This describes the ratio between the const-shifted y and x. alpha : float The alpha term in the short-run relationship y_t - y_(t-1) = alpha * z_(t-1) + epsilon. This gives an indication of the strength of the error correction toward the long-run mean. z : pd.Series Series of residuals z_t from the long-run relationship y_t = c + gamma * x_t + z_t, representing the value of the error correction term. dfstat : float The Dickey Fuller test-statistic for phi = 1 vs phi < 1 in the second equation. A more negative value implies the existence of stronger cointegration. pvalue : float The p-value corresponding to the Dickey Fuller test-statistic. A lower value implies stronger rejection of no-cointegration, thus stronger evidence of cointegration. " 1499,"def test_deterministic_order_for_unordered_model(self): superuser = self._create_superuser(""superuser"") for counter in range(1, 51): UnorderedObject.objects.create(id=counter, bool=True) "," The primary key is used in the ordering of the changelist's results to guarantee a deterministic order, even when the model doesn't have any default ordering defined (#17198). ",28,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_deterministic_order_for_unordered_model(self): superuser = self._create_superuser(""superuser"") for counter in range(1, 51): UnorderedObject.objects.create(id=counter, bool=True) ``` ###Assistant : The primary key is used in the ordering of the changelist's results to guarantee a deterministic order, even when the model doesn't have any default ordering defined (#17198). " 1500,"async def read_settings() -> prefect.settings.Settings: return prefect.settings.get_current_settings().with_obfuscated_secrets() @router.get(""/version"")"," Get the current Orion settings. Secret setting values will be obfuscated. ",11,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def read_settings() -> prefect.settings.Settings: return prefect.settings.get_current_settings().with_obfuscated_secrets() @router.get(""/version"") ``` ###Assistant : Get the current Orion settings. Secret setting values will be obfuscated. " 1501,"def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs): from dask.array.overlap import map_overlap return map_overlap( func, self, depth=depth, boundary=boundary, trim=trim, **kwargs ) ","Map a function over blocks of the array with some overlap We share neighboring zones between blocks of the array, then map a function, then trim away the neighboring strips. Note that this function will attempt to automatically determine the output array type before computing it, please refer to the ``meta`` keyword argument in :func:`map_blocks ` if you expect that the function will not succeed when operating on 0-d arrays. Parameters ---------- func: function The function to apply to each extended block depth: int, tuple, or dict The number of elements that each block should share with its neighbors If a tuple or dict then this can be different per axis boundary: str, tuple, dict How to handle the boundaries. Values include 'reflect', 'periodic', 'nearest', 'none', or any constant value like 0 or np.nan trim: bool Whether or not to trim ``depth`` elements from each block after calling the map function. Set this to False if your mapping function already does this for you **kwargs: Other keyword arguments valid in :func:`map_blocks `. Examples -------- >>> import dask.array as da >>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1]) >>> x = da.from_array(x, chunks=5) >>> def derivative(x): ... return x - np.roll(x, 1) >>> y = x.map_overlap(derivative, depth=1, boundary=0) >>> y.compute() array([ 1, 0, 1, 1, 0, 0, -1, -1, 0]) >>> import dask.array as da >>> x = np.arange(16).reshape((4, 4)) >>> d = da.from_array(x, chunks=(2, 2)) >>> y = d.map_overlap(lambda x: x + x.size, depth=1, boundary='reflect') >>> y.compute() array([[16, 17, 18, 19], [20, 21, 22, 23], [24, 25, 26, 27], [28, 29, 30, 31]]) >>> func = lambda x: x + x.size >>> depth = {0: 1, 1: 1} >>> boundary = {0: 'reflect', 1: 'none'} >>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE array([[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23], [24, 25, 26, 27]]) >>> x = np.arange(16).reshape((4, 4)) >>> d = da.from_array(x, chunks=(2, 2)) >>> y = d.map_overlap(lambda x: x + x[2], depth=1, boundary='reflect', meta=np.array(())) >>> y dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=numpy.ndarray> >>> y.compute() array([[ 4, 6, 8, 10], [ 8, 10, 12, 14], [20, 22, 24, 26], [24, 26, 28, 30]]) >>> import cupy # doctest: +SKIP >>> x = cupy.arange(16).reshape((4, 4)) # doctest: +SKIP >>> d = da.from_array(x, chunks=(2, 2)) # doctest: +SKIP >>> y = d.map_overlap(lambda x: x + x[2], depth=1, boundary='reflect', meta=cupy.array(())) # doctest: +SKIP >>> y # doctest: +SKIP dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=cupy.ndarray> >>> y.compute() # doctest: +SKIP array([[ 4, 6, 8, 10], [ 8, 10, 12, 14], [20, 22, 24, 26], [24, 26, 28, 30]]) ",435,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs): from dask.array.overlap import map_overlap return map_overlap( func, self, depth=depth, boundary=boundary, trim=trim, **kwargs ) ``` ###Assistant : Map a function over blocks of the array with some overlap We share neighboring zones between blocks of the array, then map a function, then trim away the neighboring strips. Note that this function will attempt to automatically determine the output array type before computing it, please refer to the ``meta`` keyword argument in :func:`map_blocks ` if you expect that the function will not succeed when operating on 0-d arrays. Parameters ---------- func: function The function to apply to each extended block depth: int, tuple, or dict The number of elements that each block should share with its neighbors If a tuple or dict then this can be different per axis boundary: str, tuple, dict How to handle the boundaries. Values include 'reflect', 'periodic', 'nearest', 'none', or any constant value like 0 or np.nan trim: bool Whether or not to trim ``depth`` elements from each block after calling the map function. Set this to False if your mapping function already does this for you **kwargs: Other keyword arguments valid in :func:`map_blocks `. Examples -------- >>> import dask.array as da >>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1]) >>> x = da.from_array(x, chunks=5) >>> def derivative(x): ... return x - np.roll(x, 1) >>> y = x.map_overlap(derivative, depth=1, boundary=0) >>> y.compute() array([ 1, 0, 1, 1, 0, 0, -1, -1, 0]) >>> import dask.array as da >>> x = np.arange(16).reshape((4, 4)) >>> d = da.from_array(x, chunks=(2, 2)) >>> y = d.map_overlap(lambda x: x + x.size, depth=1, boundary='reflect') >>> y.compute() array([[16, 17, 18, 19], [20, 21, 22, 23], [24, 25, 26, 27], [28, 29, 30, 31]]) >>> func = lambda x: x + x.size >>> depth = {0: 1, 1: 1} >>> boundary = {0: 'reflect', 1: 'none'} >>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE array([[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23], [24, 25, 26, 27]]) >>> x = np.arange(16).reshape((4, 4)) >>> d = da.from_array(x, chunks=(2, 2)) >>> y = d.map_overlap(lambda x: x + x[2], depth=1, boundary='reflect', meta=np.array(())) >>> y dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=numpy.ndarray> >>> y.compute() array([[ 4, 6, 8, 10], [ 8, 10, 12, 14], [20, 22, 24, 26], [24, 26, 28, 30]]) >>> import cupy # doctest: +SKIP >>> x = cupy.arange(16).reshape((4, 4)) # doctest: +SKIP >>> d = da.from_array(x, chunks=(2, 2)) # doctest: +SKIP >>> y = d.map_overlap(lambda x: x + x[2], depth=1, boundary='reflect', meta=cupy.array(())) # doctest: +SKIP >>> y # doctest: +SKIP dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=cupy.ndarray> >>> y.compute() # doctest: +SKIP array([[ 4, 6, 8, 10], [ 8, 10, 12, 14], [20, 22, 24, 26], [24, 26, 28, 30]]) " 1502,"def _flush(self): # Raise an Exception if the value is being changed in read mode. if not self._write: raise GDALException( ""Raster needs to be opened in write mode to change values."" ) capi.flush_ds(self._ptr) "," Flush all data from memory into the source file if it exists. The data that needs flushing are geotransforms, coordinate systems, nodata_values and pixel values. This function will be called automatically wherever it is needed. ",35,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _flush(self): # Raise an Exception if the value is being changed in read mode. if not self._write: raise GDALException( ""Raster needs to be opened in write mode to change values."" ) capi.flush_ds(self._ptr) ``` ###Assistant : Flush all data from memory into the source file if it exists. The data that needs flushing are geotransforms, coordinate systems, nodata_values and pixel values. This function will be called automatically wherever it is needed. " 1503,"def _round(self, places, rounding): if places <= 0: raise ValueError(""argument should be at least 1 in _round"") if self._is_special or not self: return Decimal(self) ans = self._rescale(self.adjusted()+1-places, rounding) # it can happen that the rescale alters the adjusted exponent; # for example when rounding 99.97 to 3 significant figures. # When this happens we end up with an extra 0 at the end of # the number; a second rescale fixes this. if ans.adjusted() != self.adjusted(): ans = ans._rescale(ans.adjusted()+1-places, rounding) return ans ","Round a nonzero, nonspecial Decimal to a fixed number of significant figures, using the given rounding mode. Infinities, NaNs and zeros are returned unaltered. This operation is quiet: it raises no flags, and uses no information from the context. ",39,82,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _round(self, places, rounding): if places <= 0: raise ValueError(""argument should be at least 1 in _round"") if self._is_special or not self: return Decimal(self) ans = self._rescale(self.adjusted()+1-places, rounding) # it can happen that the rescale alters the adjusted exponent; # for example when rounding 99.97 to 3 significant figures. # When this happens we end up with an extra 0 at the end of # the number; a second rescale fixes this. if ans.adjusted() != self.adjusted(): ans = ans._rescale(ans.adjusted()+1-places, rounding) return ans ``` ###Assistant : Round a nonzero, nonspecial Decimal to a fixed number of significant figures, using the given rounding mode. Infinities, NaNs and zeros are returned unaltered. This operation is quiet: it raises no flags, and uses no information from the context. " 1504,"def is_redirect(self): return ""location"" in self.headers and self.status_code in REDIRECT_STATI ","True if this Response is a well-formed HTTP redirect that could have been processed automatically (by :meth:`Session.resolve_redirects`). ",17,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_redirect(self): return ""location"" in self.headers and self.status_code in REDIRECT_STATI ``` ###Assistant : True if this Response is a well-formed HTTP redirect that could have been processed automatically (by :meth:`Session.resolve_redirects`). " 1505,"def write_eqapo_graphic_eq(self, file_path, normalize=True): file_path = os.path.abspath(file_path) s = self.eqapo_graphic_eq(normalize=normalize) with open(file_path, 'w', encoding='utf-8') as f: f.write(s) return s ",Writes equalization graph to a file as Equalizer APO config.,10,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def write_eqapo_graphic_eq(self, file_path, normalize=True): file_path = os.path.abspath(file_path) s = self.eqapo_graphic_eq(normalize=normalize) with open(file_path, 'w', encoding='utf-8') as f: f.write(s) return s ``` ###Assistant : Writes equalization graph to a file as Equalizer APO config. " 1506,"def test_export_pipeline_6(): pipeline_string = ( 'DecisionTreeClassifier(SelectPercentile(input_matrix, SelectPercentile__percentile=20),' 'DecisionTreeClassifier__criterion=gini, DecisionTreeClassifier__max_depth=8,' 'DecisionTreeClassifier__min_samples_leaf=5, DecisionTreeClassifier__min_samples_split=5)' ) pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset) expected_code = exported_code = export_pipeline(pipeline, tpot_obj.operators, tpot_obj._pset, random_state=42, data_file_path='test_path') assert expected_code == exported_code ","Assert that exported_pipeline() generated a compile source file with random_state and data_file_path.import numpy as np import pandas as pd from sklearn.feature_selection import SelectPercentile, f_classif from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.tree import DecisionTreeClassifier from tpot.export_utils import set_param_recursive # NOTE: Make sure that the outcome column is labeled 'target' in the data file tpot_data = pd.read_csv('test_path', sep='COLUMN_SEPARATOR', dtype=np.float64) features = tpot_data.drop('target', axis=1) training_features, testing_features, training_target, testing_target = \\ train_test_split(features, tpot_data['target'], random_state=42) exported_pipeline = make_pipeline( SelectPercentile(score_func=f_classif, percentile=20), DecisionTreeClassifier(criterion=""gini"", max_depth=8, min_samples_leaf=5, min_samples_split=5) ) # Fix random state for all the steps in exported pipeline set_param_recursive(exported_pipeline.steps, 'random_state', 42) exported_pipeline.fit(training_features, training_target) results = exported_pipeline.predict(testing_features) ",102,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_export_pipeline_6(): pipeline_string = ( 'DecisionTreeClassifier(SelectPercentile(input_matrix, SelectPercentile__percentile=20),' 'DecisionTreeClassifier__criterion=gini, DecisionTreeClassifier__max_depth=8,' 'DecisionTreeClassifier__min_samples_leaf=5, DecisionTreeClassifier__min_samples_split=5)' ) pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset) expected_code = exported_code = export_pipeline(pipeline, tpot_obj.operators, tpot_obj._pset, random_state=42, data_file_path='test_path') assert expected_code == exported_code ``` ###Assistant : Assert that exported_pipeline() generated a compile source file with random_state and data_file_path.import numpy as np import pandas as pd from sklearn.feature_selection import SelectPercentile, f_classif from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.tree import DecisionTreeClassifier from tpot.export_utils import set_param_recursive # NOTE: Make sure that the outcome column is labeled 'target' in the data file tpot_data = pd.read_csv('test_path', sep='COLUMN_SEPARATOR', dtype=np.float64) features = tpot_data.drop('target', axis=1) training_features, testing_features, training_target, testing_target = \\ train_test_split(features, tpot_data['target'], random_state=42) exported_pipeline = make_pipeline( SelectPercentile(score_func=f_classif, percentile=20), DecisionTreeClassifier(criterion=""gini"", max_depth=8, min_samples_leaf=5, min_samples_split=5) ) # Fix random state for all the steps in exported pipeline set_param_recursive(exported_pipeline.steps, 'random_state', 42) exported_pipeline.fit(training_features, training_target) results = exported_pipeline.predict(testing_features) " 1507,"def _check_edge_connectivity(G): # Construct the auxiliary graph that can be used to make each k-cc or k-sub aux_graph = EdgeComponentAuxGraph.construct(G) # memoize the local connectivity in this graph memo = {} for k in it.count(1): # Test ""local"" k-edge-components and k-edge-subgraphs ccs_local = fset(aux_graph.k_edge_components(k)) ccs_subgraph = fset(aux_graph.k_edge_subgraphs(k)) # Check connectivity properties that should be guaranteed by the # algorithms. _assert_local_cc_edge_connectivity(G, ccs_local, k, memo) _assert_subgraph_edge_connectivity(G, ccs_subgraph, k) if k == 1 or k == 2 and not G.is_directed(): assert ( ccs_local == ccs_subgraph ), ""Subgraphs and components should be the same when k == 1 or (k == 2 and not G.directed())"" if G.is_directed(): # Test special case methods are the same as the aux graph if k == 1: alt_sccs = fset(nx.strongly_connected_components(G)) assert alt_sccs == ccs_local, ""k=1 failed alt"" assert alt_sccs == ccs_subgraph, ""k=1 failed alt"" else: # Test special case methods are the same as the aux graph if k == 1: alt_ccs = fset(nx.connected_components(G)) assert alt_ccs == ccs_local, ""k=1 failed alt"" assert alt_ccs == ccs_subgraph, ""k=1 failed alt"" elif k == 2: alt_bridge_ccs = fset(bridge_components(G)) assert alt_bridge_ccs == ccs_local, ""k=2 failed alt"" assert alt_bridge_ccs == ccs_subgraph, ""k=2 failed alt"" # if new methods for k == 3 or k == 4 are implemented add them here # Check the general subgraph method works by itself alt_subgraph_ccs = fset( [set(C.nodes()) for C in general_k_edge_subgraphs(G, k=k)] ) assert alt_subgraph_ccs == ccs_subgraph, ""alt subgraph method failed"" # Stop once k is larger than all special case methods # and we cannot break down ccs any further. if k > 2 and all(len(cc) == 1 for cc in ccs_local): break # ---------------- # Misc tests # ---------------- "," Helper - generates all k-edge-components using the aux graph. Checks the both local and subgraph edge connectivity of each cc. Also checks that alternate methods of computing the k-edge-ccs generate the same result. ",33,275,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_edge_connectivity(G): # Construct the auxiliary graph that can be used to make each k-cc or k-sub aux_graph = EdgeComponentAuxGraph.construct(G) # memoize the local connectivity in this graph memo = {} for k in it.count(1): # Test ""local"" k-edge-components and k-edge-subgraphs ccs_local = fset(aux_graph.k_edge_components(k)) ccs_subgraph = fset(aux_graph.k_edge_subgraphs(k)) # Check connectivity properties that should be guaranteed by the # algorithms. _assert_local_cc_edge_connectivity(G, ccs_local, k, memo) _assert_subgraph_edge_connectivity(G, ccs_subgraph, k) if k == 1 or k == 2 and not G.is_directed(): assert ( ccs_local == ccs_subgraph ), ""Subgraphs and components should be the same when k == 1 or (k == 2 and not G.directed())"" if G.is_directed(): # Test special case methods are the same as the aux graph if k == 1: alt_sccs = fset(nx.strongly_connected_components(G)) assert alt_sccs == ccs_local, ""k=1 failed alt"" assert alt_sccs == ccs_subgraph, ""k=1 failed alt"" else: # Test special case methods are the same as the aux graph if k == 1: alt_ccs = fset(nx.connected_components(G)) assert alt_ccs == ccs_local, ""k=1 failed alt"" assert alt_ccs == ccs_subgraph, ""k=1 failed alt"" elif k == 2: alt_bridge_ccs = fset(bridge_components(G)) assert alt_bridge_ccs == ccs_local, ""k=2 failed alt"" assert alt_bridge_ccs == ccs_subgraph, ""k=2 failed alt"" # if new methods for k == 3 or k == 4 are implemented add them here # Check the general subgraph method works by itself alt_subgraph_ccs = fset( [set(C.nodes()) for C in general_k_edge_subgraphs(G, k=k)] ) assert alt_subgraph_ccs == ccs_subgraph, ""alt subgraph method failed"" # Stop once k is larger than all special case methods # and we cannot break down ccs any further. if k > 2 and all(len(cc) == 1 for cc in ccs_local): break # ---------------- # Misc tests # ---------------- ``` ###Assistant : Helper - generates all k-edge-components using the aux graph. Checks the both local and subgraph edge connectivity of each cc. Also checks that alternate methods of computing the k-edge-ccs generate the same result. " 1508,"def test_sequence_name_length_limits_flush(self): # A full flush is expensive to the full test, so we dig into the # internals to generate the likely offending SQL and run it manually # Some convenience aliases VLM = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ VLM_m2m = ( VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through ) tables = [ VLM._meta.db_table, VLM_m2m._meta.db_table, ] sql_list = connection.ops.sql_flush(no_style(), tables, reset_sequences=True) connection.ops.execute_sql_flush(sql_list) "," Sequence resetting as part of a flush with model with long name and long pk name doesn't error (#8901). ",19,53,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_sequence_name_length_limits_flush(self): # A full flush is expensive to the full test, so we dig into the # internals to generate the likely offending SQL and run it manually # Some convenience aliases VLM = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ VLM_m2m = ( VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through ) tables = [ VLM._meta.db_table, VLM_m2m._meta.db_table, ] sql_list = connection.ops.sql_flush(no_style(), tables, reset_sequences=True) connection.ops.execute_sql_flush(sql_list) ``` ###Assistant : Sequence resetting as part of a flush with model with long name and long pk name doesn't error (#8901). " 1509,"def test_delete_media(self) -> None: download_resource = self.media_repo.children[b""download""] upload_resource = self.media_repo.children[b""upload""] # Upload some media into the room response = self.helper.upload_media( upload_resource, SMALL_PNG, tok=self.admin_user_tok, expect_code=200, ) # Extract media ID from the response server_and_media_id = response[""content_uri""][6:] # Cut off 'mxc://' server_name, media_id = server_and_media_id.split(""/"") self.assertEqual(server_name, self.server_name) # Attempt to access media channel = make_request( self.reactor, FakeSite(download_resource, self.reactor), ""GET"", server_and_media_id, shorthand=False, access_token=self.admin_user_tok, ) # Should be successful self.assertEqual( 200, channel.code, msg=( ""Expected to receive a 200 on accessing media: %s"" % server_and_media_id ), ) # Test if the file exists local_path = self.filepaths.local_media_filepath(media_id) self.assertTrue(os.path.exists(local_path)) url = ""/_synapse/admin/v1/media/%s/%s"" % (self.server_name, media_id) # Delete media channel = self.make_request( ""DELETE"", url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body[""total""]) self.assertEqual( media_id, channel.json_body[""deleted_media""][0], ) # Attempt to access media channel = make_request( self.reactor, FakeSite(download_resource, self.reactor), ""GET"", server_and_media_id, shorthand=False, access_token=self.admin_user_tok, ) self.assertEqual( HTTPStatus.NOT_FOUND, channel.code, msg=( ""Expected to receive a HTTPStatus.NOT_FOUND on accessing deleted media: %s"" % server_and_media_id ), ) # Test if the file is deleted self.assertFalse(os.path.exists(local_path)) "," Tests that delete a media is successfully ",7,159,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_delete_media(self) -> None: download_resource = self.media_repo.children[b""download""] upload_resource = self.media_repo.children[b""upload""] # Upload some media into the room response = self.helper.upload_media( upload_resource, SMALL_PNG, tok=self.admin_user_tok, expect_code=200, ) # Extract media ID from the response server_and_media_id = response[""content_uri""][6:] # Cut off 'mxc://' server_name, media_id = server_and_media_id.split(""/"") self.assertEqual(server_name, self.server_name) # Attempt to access media channel = make_request( self.reactor, FakeSite(download_resource, self.reactor), ""GET"", server_and_media_id, shorthand=False, access_token=self.admin_user_tok, ) # Should be successful self.assertEqual( 200, channel.code, msg=( ""Expected to receive a 200 on accessing media: %s"" % server_and_media_id ), ) # Test if the file exists local_path = self.filepaths.local_media_filepath(media_id) self.assertTrue(os.path.exists(local_path)) url = ""/_synapse/admin/v1/media/%s/%s"" % (self.server_name, media_id) # Delete media channel = self.make_request( ""DELETE"", url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body[""total""]) self.assertEqual( media_id, channel.json_body[""deleted_media""][0], ) # Attempt to access media channel = make_request( self.reactor, FakeSite(download_resource, self.reactor), ""GET"", server_and_media_id, shorthand=False, access_token=self.admin_user_tok, ) self.assertEqual( HTTPStatus.NOT_FOUND, channel.code, msg=( ""Expected to receive a HTTPStatus.NOT_FOUND on accessing deleted media: %s"" % server_and_media_id ), ) # Test if the file is deleted self.assertFalse(os.path.exists(local_path)) ``` ###Assistant : Tests that delete a media is successfully " 1510,"def load_data_for_viz(load_type, model_file_statistics, **kwargs): supported_load_types = dict( load_json=load_json, load_from_file=partial( load_from_file, dtype=kwargs.get(""dtype"", int), ground_truth_split=kwargs.get(""ground_truth_split"", 2) ), ) loader = supported_load_types[load_type] try: stats_per_model = [loader(stats_f) for stats_f in model_file_statistics] except (TypeError, AttributeError): logger.exception(f""Unable to open model statistics file {model_file_statistics}!"") raise return stats_per_model ","Load model file data in to list of . :param load_type: type of the data loader to be used. :param model_file_statistics: JSON file or list of json files containing any model experiment stats. :return List of training statistics loaded as json objects. ",42,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_data_for_viz(load_type, model_file_statistics, **kwargs): supported_load_types = dict( load_json=load_json, load_from_file=partial( load_from_file, dtype=kwargs.get(""dtype"", int), ground_truth_split=kwargs.get(""ground_truth_split"", 2) ), ) loader = supported_load_types[load_type] try: stats_per_model = [loader(stats_f) for stats_f in model_file_statistics] except (TypeError, AttributeError): logger.exception(f""Unable to open model statistics file {model_file_statistics}!"") raise return stats_per_model ``` ###Assistant : Load model file data in to list of . :param load_type: type of the data loader to be used. :param model_file_statistics: JSON file or list of json files containing any model experiment stats. :return List of training statistics loaded as json objects. " 1511,"def scan_vocab(self, corpus_iterable=None, corpus_file=None, progress_per=100000, trim_rule=None): logger.info(""collecting all words and their counts"") if corpus_file is not None: corpus_iterable = TaggedLineDocument(corpus_file) total_words, corpus_count = self._scan_vocab(corpus_iterable, progress_per, trim_rule) logger.info( ""collected %i word types and %i unique tags from a corpus of %i examples and %i words"", len(self.raw_vocab), len(self.dv), corpus_count, total_words, ) return total_words, corpus_count ","Create the model's vocabulary: a mapping from unique words in the corpus to their frequency count. Parameters ---------- documents : iterable of :class:`~gensim.models.doc2vec.TaggedDocument`, optional The tagged documents used to create the vocabulary. Their tags can be either str tokens or ints (faster). corpus_file : str, optional Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format. You may use this argument instead of `documents` to get performance boost. Only one of `documents` or `corpus_file` arguments need to be passed (not both of them). progress_per : int Progress will be logged every `progress_per` documents. trim_rule : function, optional Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count). Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`), or a callable that accepts parameters (word, count, min_count) and returns either :attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`. The rule, if given, is only used to prune vocabulary during :meth:`~gensim.models.doc2vec.Doc2Vec.build_vocab` and is not stored as part of the model. The input parameters are of the following types: * `word` (str) - the word we are examining * `count` (int) - the word's frequency count in the corpus * `min_count` (int) - the minimum count threshold. Returns ------- (int, int) Tuple of `(total words in the corpus, number of documents)`. ",218,52,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def scan_vocab(self, corpus_iterable=None, corpus_file=None, progress_per=100000, trim_rule=None): logger.info(""collecting all words and their counts"") if corpus_file is not None: corpus_iterable = TaggedLineDocument(corpus_file) total_words, corpus_count = self._scan_vocab(corpus_iterable, progress_per, trim_rule) logger.info( ""collected %i word types and %i unique tags from a corpus of %i examples and %i words"", len(self.raw_vocab), len(self.dv), corpus_count, total_words, ) return total_words, corpus_count ``` ###Assistant : Create the model's vocabulary: a mapping from unique words in the corpus to their frequency count. Parameters ---------- documents : iterable of :class:`~gensim.models.doc2vec.TaggedDocument`, optional The tagged documents used to create the vocabulary. Their tags can be either str tokens or ints (faster). corpus_file : str, optional Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format. You may use this argument instead of `documents` to get performance boost. Only one of `documents` or `corpus_file` arguments need to be passed (not both of them). progress_per : int Progress will be logged every `progress_per` documents. trim_rule : function, optional Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count). Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`), or a callable that accepts parameters (word, count, min_count) and returns either :attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`. The rule, if given, is only used to prune vocabulary during :meth:`~gensim.models.doc2vec.Doc2Vec.build_vocab` and is not stored as part of the model. The input parameters are of the following types: * `word` (str) - the word we are examining * `count` (int) - the word's frequency count in the corpus * `min_count` (int) - the minimum count threshold. Returns ------- (int, int) Tuple of `(total words in the corpus, number of documents)`. " 1512,"def _parse_command_opts(self, parser, args): # late import because of mutual dependence between these modules from distutils.cmd import Command # Pull the current command from the head of the command line command = args[0] if not command_re.match(command): raise SystemExit(""invalid command name '%s'"" % command) self.commands.append(command) # Dig up the command class that implements this command, so we # 1) know that it's a valid command, and 2) know which options # it takes. try: cmd_class = self.get_command_class(command) except DistutilsModuleError as msg: raise DistutilsArgError(msg) # Require that the command class be derived from Command -- want # to be sure that the basic ""command"" interface is implemented. if not issubclass(cmd_class, Command): raise DistutilsClassError( ""command class %s must subclass Command"" % cmd_class) # Also make sure that the command object provides a list of its # known options. if not (hasattr(cmd_class, 'user_options') and isinstance(cmd_class.user_options, list)): msg = (""command class %s must provide "" ""'user_options' attribute (a list of tuples)"") raise DistutilsClassError(msg % cmd_class) # If the command class has a list of negative alias options, # merge it in with the global negative aliases. negative_opt = self.negative_opt if hasattr(cmd_class, 'negative_opt'): negative_opt = negative_opt.copy() negative_opt.update(cmd_class.negative_opt) # Check for help_options in command class. They have a different # format (tuple of four) so we need to preprocess them here. if (hasattr(cmd_class, 'help_options') and isinstance(cmd_class.help_options, list)): help_options = fix_help_options(cmd_class.help_options) else: help_options = [] # All commands support the global options too, just by adding # in 'global_options'. parser.set_option_table(self.global_options + cmd_class.user_options + help_options) parser.set_negative_aliases(negative_opt) (args, opts) = parser.getopt(args[1:]) if hasattr(opts, 'help') and opts.help: self._show_help(parser, display_options=0, commands=[cmd_class]) return if (hasattr(cmd_class, 'help_options') and isinstance(cmd_class.help_options, list)): help_option_found=0 for (help_option, short, desc, func) in cmd_class.help_options: if hasattr(opts, parser.get_attr_name(help_option)): help_option_found=1 if callable(func): func() else: raise DistutilsClassError( ""invalid help function %r for help option '%s': "" ""must be a callable object (function, etc.)"" % (func, help_option)) if help_option_found: return # Put the options from the command-line into their official # holding pen, the 'command_options' dictionary. opt_dict = self.get_option_dict(command) for (name, value) in vars(opts).items(): opt_dict[name] = (""command line"", value) return args ","Parse the command-line options for a single command. 'parser' must be a FancyGetopt instance; 'args' must be the list of arguments, starting with the current command (whose options we are about to parse). Returns a new version of 'args' with the next command at the front of the list; will be the empty list if there are no more commands on the command line. Returns None if the user asked for help on this command. ",75,337,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _parse_command_opts(self, parser, args): # late import because of mutual dependence between these modules from distutils.cmd import Command # Pull the current command from the head of the command line command = args[0] if not command_re.match(command): raise SystemExit(""invalid command name '%s'"" % command) self.commands.append(command) # Dig up the command class that implements this command, so we # 1) know that it's a valid command, and 2) know which options # it takes. try: cmd_class = self.get_command_class(command) except DistutilsModuleError as msg: raise DistutilsArgError(msg) # Require that the command class be derived from Command -- want # to be sure that the basic ""command"" interface is implemented. if not issubclass(cmd_class, Command): raise DistutilsClassError( ""command class %s must subclass Command"" % cmd_class) # Also make sure that the command object provides a list of its # known options. if not (hasattr(cmd_class, 'user_options') and isinstance(cmd_class.user_options, list)): msg = (""command class %s must provide "" ""'user_options' attribute (a list of tuples)"") raise DistutilsClassError(msg % cmd_class) # If the command class has a list of negative alias options, # merge it in with the global negative aliases. negative_opt = self.negative_opt if hasattr(cmd_class, 'negative_opt'): negative_opt = negative_opt.copy() negative_opt.update(cmd_class.negative_opt) # Check for help_options in command class. They have a different # format (tuple of four) so we need to preprocess them here. if (hasattr(cmd_class, 'help_options') and isinstance(cmd_class.help_options, list)): help_options = fix_help_options(cmd_class.help_options) else: help_options = [] # All commands support the global options too, just by adding # in 'global_options'. parser.set_option_table(self.global_options + cmd_class.user_options + help_options) parser.set_negative_aliases(negative_opt) (args, opts) = parser.getopt(args[1:]) if hasattr(opts, 'help') and opts.help: self._show_help(parser, display_options=0, commands=[cmd_class]) return if (hasattr(cmd_class, 'help_options') and isinstance(cmd_class.help_options, list)): help_option_found=0 for (help_option, short, desc, func) in cmd_class.help_options: if hasattr(opts, parser.get_attr_name(help_option)): help_option_found=1 if callable(func): func() else: raise DistutilsClassError( ""invalid help function %r for help option '%s': "" ""must be a callable object (function, etc.)"" % (func, help_option)) if help_option_found: return # Put the options from the command-line into their official # holding pen, the 'command_options' dictionary. opt_dict = self.get_option_dict(command) for (name, value) in vars(opts).items(): opt_dict[name] = (""command line"", value) return args ``` ###Assistant : Parse the command-line options for a single command. 'parser' must be a FancyGetopt instance; 'args' must be the list of arguments, starting with the current command (whose options we are about to parse). Returns a new version of 'args' with the next command at the front of the list; will be the empty list if there are no more commands on the command line. Returns None if the user asked for help on this command. " 1513,"def interval(self, confidence=None, *args, **kwds): # This function was originally written with parameter `alpha`, but # `alpha` is also the name of a shape parameter of two distributions. # This block allows the function to accept both `alpha` and its # replacement `confidence` during a deprecation period; it can be # removed in the second release after 1.9.0. # See description of logic in `moment` method. has_shape_alpha = (self.shapes is not None and ""alpha"" in (self.shapes.split("", ""))) got_confidence = confidence is not None got_keyword_alpha = kwds.get(""alpha"", None) is not None if not got_confidence and ((not got_keyword_alpha) or (got_keyword_alpha and has_shape_alpha)): message = (""interval() missing 1 required positional argument: "" ""`confidence`"") raise TypeError(message) if got_keyword_alpha and not has_shape_alpha: if got_confidence: # this will change to ""interval got unexpected argument alpha"" message = ""interval() got multiple values for first argument"" raise TypeError(message) else: message = (""Use of keyword argument `alpha` for method "" ""`interval` is deprecated. Use first positional "" ""argument or keyword argument `confidence` "" ""instead."") confidence = kwds.pop(""alpha"") warnings.warn(message, DeprecationWarning, stacklevel=2) alpha = confidence alpha = asarray(alpha) if np.any((alpha > 1) | (alpha < 0)): raise ValueError(""alpha must be between 0 and 1 inclusive"") q1 = (1.0-alpha)/2 q2 = (1.0+alpha)/2 a = self.ppf(q1, *args, **kwds) b = self.ppf(q2, *args, **kwds) return a, b ","Confidence interval with equal areas around the median. .. deprecated:: 1.9.0 Parameter `alpha` is replaced by parameter `confidence` to avoid name collisions with the shape parameter `alpha` of some distributions. Parameter `alpha` will be removed in the second release after 1.9.0. Parameters ---------- confidence : array_like of float Probability that an rv will be drawn from the returned range. Each value should be in the range [0, 1]. arg1, arg2, ... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional location parameter, Default is 0. scale : array_like, optional scale parameter, Default is 1. Returns ------- a, b : ndarray of float end-points of range that contain ``100 * alpha %`` of the rv's possible values. ",128,213,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def interval(self, confidence=None, *args, **kwds): # This function was originally written with parameter `alpha`, but # `alpha` is also the name of a shape parameter of two distributions. # This block allows the function to accept both `alpha` and its # replacement `confidence` during a deprecation period; it can be # removed in the second release after 1.9.0. # See description of logic in `moment` method. has_shape_alpha = (self.shapes is not None and ""alpha"" in (self.shapes.split("", ""))) got_confidence = confidence is not None got_keyword_alpha = kwds.get(""alpha"", None) is not None if not got_confidence and ((not got_keyword_alpha) or (got_keyword_alpha and has_shape_alpha)): message = (""interval() missing 1 required positional argument: "" ""`confidence`"") raise TypeError(message) if got_keyword_alpha and not has_shape_alpha: if got_confidence: # this will change to ""interval got unexpected argument alpha"" message = ""interval() got multiple values for first argument"" raise TypeError(message) else: message = (""Use of keyword argument `alpha` for method "" ""`interval` is deprecated. Use first positional "" ""argument or keyword argument `confidence` "" ""instead."") confidence = kwds.pop(""alpha"") warnings.warn(message, DeprecationWarning, stacklevel=2) alpha = confidence alpha = asarray(alpha) if np.any((alpha > 1) | (alpha < 0)): raise ValueError(""alpha must be between 0 and 1 inclusive"") q1 = (1.0-alpha)/2 q2 = (1.0+alpha)/2 a = self.ppf(q1, *args, **kwds) b = self.ppf(q2, *args, **kwds) return a, b ``` ###Assistant : Confidence interval with equal areas around the median. .. deprecated:: 1.9.0 Parameter `alpha` is replaced by parameter `confidence` to avoid name collisions with the shape parameter `alpha` of some distributions. Parameter `alpha` will be removed in the second release after 1.9.0. Parameters ---------- confidence : array_like of float Probability that an rv will be drawn from the returned range. Each value should be in the range [0, 1]. arg1, arg2, ... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional location parameter, Default is 0. scale : array_like, optional scale parameter, Default is 1. Returns ------- a, b : ndarray of float end-points of range that contain ``100 * alpha %`` of the rv's possible values. " 1514,"def __call__(self) -> bool: for meta in tqdm(self._face_alignments, desc=""Updating Alignments File from PNG Header"", leave=False): src = meta[""source""] alignment = meta[""alignments""] if not any(alignment.get(key, {}) for key in self._updatable_keys): continue faces = self._alignments.get_faces_in_frame(src[""source_filename""]) if len(faces) < src[""face_index""] + 1: # list index out of range logger.debug(""Skipped face '%s'. Index does not exist in alignments file"", src[""original_filename""]) continue face = faces[src[""face_index""]] self._check_and_update(alignment, face) retval = False if self._counts: retval = True logger.info(""Updated alignments file from PNG Data: %s"", self._counts) return retval "," Parse through the face data updating any entries in the alignments file. Returns ------- bool ``True`` if any alignment information was updated otherwise ``False`` ",24,80,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __call__(self) -> bool: for meta in tqdm(self._face_alignments, desc=""Updating Alignments File from PNG Header"", leave=False): src = meta[""source""] alignment = meta[""alignments""] if not any(alignment.get(key, {}) for key in self._updatable_keys): continue faces = self._alignments.get_faces_in_frame(src[""source_filename""]) if len(faces) < src[""face_index""] + 1: # list index out of range logger.debug(""Skipped face '%s'. Index does not exist in alignments file"", src[""original_filename""]) continue face = faces[src[""face_index""]] self._check_and_update(alignment, face) retval = False if self._counts: retval = True logger.info(""Updated alignments file from PNG Data: %s"", self._counts) return retval ``` ###Assistant : Parse through the face data updating any entries in the alignments file. Returns ------- bool ``True`` if any alignment information was updated otherwise ``False`` " 1515,"def test_dynamic_path(self): doc = Document.objects.create( title=""does not matter"", created=timezone.make_aware(datetime.datetime(2020, 6, 25, 7, 36, 51, 153)), mime_type=""application/pdf"", pk=2, checksum=""2"", storage_path=StoragePath.objects.create(path=""TestFolder/{created}""), ) self.assertEqual(generate_filename(doc), ""TestFolder/2020-06-25.pdf"") "," GIVEN: - A document with a defined storage path WHEN: - the filename is generated for the document THEN: - the generated filename uses the defined storage path for the document ",31,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_dynamic_path(self): doc = Document.objects.create( title=""does not matter"", created=timezone.make_aware(datetime.datetime(2020, 6, 25, 7, 36, 51, 153)), mime_type=""application/pdf"", pk=2, checksum=""2"", storage_path=StoragePath.objects.create(path=""TestFolder/{created}""), ) self.assertEqual(generate_filename(doc), ""TestFolder/2020-06-25.pdf"") ``` ###Assistant : GIVEN: - A document with a defined storage path WHEN: - the filename is generated for the document THEN: - the generated filename uses the defined storage path for the document " 1516,"async def follower_loop(self): try: await self._connect_to_leaders() except Exception as e: logger.error(""Exception occurred in follower loop: "") logger.exception(e) "," Main follower coroutine This starts all of the leader connection coros ",11,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def follower_loop(self): try: await self._connect_to_leaders() except Exception as e: logger.error(""Exception occurred in follower loop: "") logger.exception(e) ``` ###Assistant : Main follower coroutine This starts all of the leader connection coros " 1517,"def test_launcher_ensures_stdio(self): from kitty.constants import kitty_exe import subprocess exe = kitty_exe() cp = subprocess.run([exe, '+runpy', ]) self.assertEqual(cp.returncode, 0) ","\ import os, sys if sys.stdin: os.close(sys.stdin.fileno()) if sys.stdout: os.close(sys.stdout.fileno()) if sys.stderr: os.close(sys.stderr.fileno()) os.execlp('kitty', 'kitty', '+runpy', 'import sys; raise SystemExit(1 if sys.stdout is None or sys.stdin is None or sys.stderr is None else 0)') ",34,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_launcher_ensures_stdio(self): from kitty.constants import kitty_exe import subprocess exe = kitty_exe() cp = subprocess.run([exe, '+runpy', ]) self.assertEqual(cp.returncode, 0) ``` ###Assistant : \ import os, sys if sys.stdin: os.close(sys.stdin.fileno()) if sys.stdout: os.close(sys.stdout.fileno()) if sys.stderr: os.close(sys.stderr.fileno()) os.execlp('kitty', 'kitty', '+runpy', 'import sys; raise SystemExit(1 if sys.stdout is None or sys.stdin is None or sys.stderr is None else 0)') " 1518,"def test_interface_label_count_mismatch(self): bad_interface_data = { 'device': self.device.pk, 'name': 'eth[0-9]', 'label': 'Interface[0-1]', 'type': InterfaceTypeChoices.TYPE_1GE_GBIC, } form = InterfaceCreateForm(bad_interface_data) self.assertFalse(form.is_valid()) self.assertIn('label', form.errors) "," Check that attempting to generate a differing number of names and labels results in a validation error. ",17,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_interface_label_count_mismatch(self): bad_interface_data = { 'device': self.device.pk, 'name': 'eth[0-9]', 'label': 'Interface[0-1]', 'type': InterfaceTypeChoices.TYPE_1GE_GBIC, } form = InterfaceCreateForm(bad_interface_data) self.assertFalse(form.is_valid()) self.assertIn('label', form.errors) ``` ###Assistant : Check that attempting to generate a differing number of names and labels results in a validation error. " 1519,"def power_transform(X, method=""yeo-johnson"", *, standardize=True, copy=True): pt = PowerTransformer(method=method, standardize=standardize, copy=copy) return pt.fit_transform(X) ","Parametric, monotonic transformation to make data more Gaussian-like. Power transforms are a family of parametric, monotonic transformations that are applied to make data more Gaussian-like. This is useful for modeling issues related to heteroscedasticity (non-constant variance), or other situations where normality is desired. Currently, power_transform supports the Box-Cox transform and the Yeo-Johnson transform. The optimal parameter for stabilizing variance and minimizing skewness is estimated through maximum likelihood. Box-Cox requires input data to be strictly positive, while Yeo-Johnson supports both positive or negative data. By default, zero-mean, unit-variance normalization is applied to the transformed data. Read more in the :ref:`User Guide `. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to be transformed using a power transformation. method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson' The power transform method. Available methods are: - 'yeo-johnson' [1]_, works with positive and negative values - 'box-cox' [2]_, only works with strictly positive values .. versionchanged:: 0.23 The default value of the `method` parameter changed from 'box-cox' to 'yeo-johnson' in 0.23. standardize : bool, default=True Set to True to apply zero-mean, unit-variance normalization to the transformed output. copy : bool, default=True Set to False to perform inplace computation during transformation. Returns ------- X_trans : ndarray of shape (n_samples, n_features) The transformed data. See Also -------- PowerTransformer : Equivalent transformation with the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). quantile_transform : Maps data to a standard normal distribution with the parameter `output_distribution='normal'`. Notes ----- NaNs are treated as missing values: disregarded in ``fit``, and maintained in ``transform``. For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py `. References ---------- .. [1] I.K. Yeo and R.A. Johnson, ""A new family of power transformations to improve normality or symmetry."" Biometrika, 87(4), pp.954-959, (2000). .. [2] G.E.P. Box and D.R. Cox, ""An Analysis of Transformations"", Journal of the Royal Statistical Society B, 26, 211-252 (1964). Examples -------- >>> import numpy as np >>> from sklearn.preprocessing import power_transform >>> data = [[1, 2], [3, 2], [4, 5]] >>> print(power_transform(data, method='box-cox')) [[-1.332... -0.707...] [ 0.256... -0.707...] [ 1.076... 1.414...]] .. warning:: Risk of data leak. Do not use :func:`~sklearn.preprocessing.power_transform` unless you know what you are doing. A common mistake is to apply it to the entire data *before* splitting into training and test sets. This will bias the model evaluation because information would have leaked from the test set to the training set. In general, we recommend using :class:`~sklearn.preprocessing.PowerTransformer` within a :ref:`Pipeline ` in order to prevent most risks of data leaking, e.g.: `pipe = make_pipeline(PowerTransformer(), LogisticRegression())`. ",421,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def power_transform(X, method=""yeo-johnson"", *, standardize=True, copy=True): pt = PowerTransformer(method=method, standardize=standardize, copy=copy) return pt.fit_transform(X) ``` ###Assistant : Parametric, monotonic transformation to make data more Gaussian-like. Power transforms are a family of parametric, monotonic transformations that are applied to make data more Gaussian-like. This is useful for modeling issues related to heteroscedasticity (non-constant variance), or other situations where normality is desired. Currently, power_transform supports the Box-Cox transform and the Yeo-Johnson transform. The optimal parameter for stabilizing variance and minimizing skewness is estimated through maximum likelihood. Box-Cox requires input data to be strictly positive, while Yeo-Johnson supports both positive or negative data. By default, zero-mean, unit-variance normalization is applied to the transformed data. Read more in the :ref:`User Guide `. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to be transformed using a power transformation. method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson' The power transform method. Available methods are: - 'yeo-johnson' [1]_, works with positive and negative values - 'box-cox' [2]_, only works with strictly positive values .. versionchanged:: 0.23 The default value of the `method` parameter changed from 'box-cox' to 'yeo-johnson' in 0.23. standardize : bool, default=True Set to True to apply zero-mean, unit-variance normalization to the transformed output. copy : bool, default=True Set to False to perform inplace computation during transformation. Returns ------- X_trans : ndarray of shape (n_samples, n_features) The transformed data. See Also -------- PowerTransformer : Equivalent transformation with the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). quantile_transform : Maps data to a standard normal distribution with the parameter `output_distribution='normal'`. Notes ----- NaNs are treated as missing values: disregarded in ``fit``, and maintained in ``transform``. For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py `. References ---------- .. [1] I.K. Yeo and R.A. Johnson, ""A new family of power transformations to improve normality or symmetry."" Biometrika, 87(4), pp.954-959, (2000). .. [2] G.E.P. Box and D.R. Cox, ""An Analysis of Transformations"", Journal of the Royal Statistical Society B, 26, 211-252 (1964). Examples -------- >>> import numpy as np >>> from sklearn.preprocessing import power_transform >>> data = [[1, 2], [3, 2], [4, 5]] >>> print(power_transform(data, method='box-cox')) [[-1.332... -0.707...] [ 0.256... -0.707...] [ 1.076... 1.414...]] .. warning:: Risk of data leak. Do not use :func:`~sklearn.preprocessing.power_transform` unless you know what you are doing. A common mistake is to apply it to the entire data *before* splitting into training and test sets. This will bias the model evaluation because information would have leaked from the test set to the training set. In general, we recommend using :class:`~sklearn.preprocessing.PowerTransformer` within a :ref:`Pipeline ` in order to prevent most risks of data leaking, e.g.: `pipe = make_pipeline(PowerTransformer(), LogisticRegression())`. " 1520,"def desargues_graph(create_using=None): G = LCF_graph(20, [5, -5, 9, -9], 5, create_using) G.name = ""Desargues Graph"" return G "," Returns the Desargues Graph The Desargues Graph is a non-planar, distance-transitive cubic graph with 20 nodes and 30 edges [1]_. It is a symmetric graph. It can be represented in LCF notation as [5,-5,9,-9]^5 [2]_. Parameters ---------- create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Returns ------- G : networkx Graph Desargues Graph with 20 nodes and 30 edges References ---------- .. [1] https://en.wikipedia.org/wiki/Desargues_graph .. [2] https://mathworld.wolfram.com/DesarguesGraph.html ",77,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def desargues_graph(create_using=None): G = LCF_graph(20, [5, -5, 9, -9], 5, create_using) G.name = ""Desargues Graph"" return G ``` ###Assistant : Returns the Desargues Graph The Desargues Graph is a non-planar, distance-transitive cubic graph with 20 nodes and 30 edges [1]_. It is a symmetric graph. It can be represented in LCF notation as [5,-5,9,-9]^5 [2]_. Parameters ---------- create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Returns ------- G : networkx Graph Desargues Graph with 20 nodes and 30 edges References ---------- .. [1] https://en.wikipedia.org/wiki/Desargues_graph .. [2] https://mathworld.wolfram.com/DesarguesGraph.html " 1521,"def get_output_feature_jsonschema(): output_feature_types = sorted(list(output_type_registry.keys())) return { ""type"": ""array"", ""items"": { ""type"": ""object"", ""properties"": { ""name"": {""type"": ""string""}, ""type"": {""type"": ""string"", ""enum"": output_feature_types}, ""column"": {""type"": ""string""}, }, ""additionalProperties"": True, ""allOf"": get_output_feature_conds(), ""required"": [""name"", ""type""], }, } ","This function returns a JSON schema structured to only requires a `type` key and then conditionally applies a corresponding output feature's field constraints. Returns: JSON Schema ",26,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_output_feature_jsonschema(): output_feature_types = sorted(list(output_type_registry.keys())) return { ""type"": ""array"", ""items"": { ""type"": ""object"", ""properties"": { ""name"": {""type"": ""string""}, ""type"": {""type"": ""string"", ""enum"": output_feature_types}, ""column"": {""type"": ""string""}, }, ""additionalProperties"": True, ""allOf"": get_output_feature_conds(), ""required"": [""name"", ""type""], }, } ``` ###Assistant : This function returns a JSON schema structured to only requires a `type` key and then conditionally applies a corresponding output feature's field constraints. Returns: JSON Schema " 1522,"def greet(str): return str with gr.Blocks() as demo: with gr.Row(): text1 = gr.component(""textarea"") text2 = gr.TextArea() text3 = gr.templates.TextArea() text1.change(greet, text1, text2) text2.change(greet, text2, text3) text3.change(greet, text3, text1) demo.launch() "," You can make use of str shortcuts you use in Interface within Blocks as well. Interface shortcut example: Interface(greet, ""textarea"", ""textarea"") You can use 1. gr.component() 2. gr.templates.Template() 3. gr.Template() All the templates are listed in gradio/templates.py ",37,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def greet(str): return str with gr.Blocks() as demo: with gr.Row(): text1 = gr.component(""textarea"") text2 = gr.TextArea() text3 = gr.templates.TextArea() text1.change(greet, text1, text2) text2.change(greet, text2, text3) text3.change(greet, text3, text1) demo.launch() ``` ###Assistant : You can make use of str shortcuts you use in Interface within Blocks as well. Interface shortcut example: Interface(greet, ""textarea"", ""textarea"") You can use 1. gr.component() 2. gr.templates.Template() 3. gr.Template() All the templates are listed in gradio/templates.py " 1523,"def _impute_values(self, features): if self.verbosity > 1: print(""Imputing missing values in feature set"") if self._fitted_imputer is None: self._fitted_imputer = SimpleImputer(strategy=""median"") self._fitted_imputer.fit(features) return self._fitted_imputer.transform(features) ","Impute missing values in a feature set. Parameters ---------- features: array-like {n_samples, n_features} A feature matrix Returns ------- array-like {n_samples, n_features} ",21,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _impute_values(self, features): if self.verbosity > 1: print(""Imputing missing values in feature set"") if self._fitted_imputer is None: self._fitted_imputer = SimpleImputer(strategy=""median"") self._fitted_imputer.fit(features) return self._fitted_imputer.transform(features) ``` ###Assistant : Impute missing values in a feature set. Parameters ---------- features: array-like {n_samples, n_features} A feature matrix Returns ------- array-like {n_samples, n_features} " 1524,"def get_current_site(request): # Import is inside the function because its point is to avoid importing the # Site models when django.contrib.sites isn't installed. if apps.is_installed(""django.contrib.sites""): from .models import Site return Site.objects.get_current(request) else: return RequestSite(request) "," Check if contrib.sites is installed and return either the current ``Site`` object or a ``RequestSite`` object based on the request. ",20,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_current_site(request): # Import is inside the function because its point is to avoid importing the # Site models when django.contrib.sites isn't installed. if apps.is_installed(""django.contrib.sites""): from .models import Site return Site.objects.get_current(request) else: return RequestSite(request) ``` ###Assistant : Check if contrib.sites is installed and return either the current ``Site`` object or a ``RequestSite`` object based on the request. " 1525,"def get_scripts(use_names=False): scripts = OrderedDict() # Iterate through all modules within the scripts path. These are the user-created files in which reports are # defined. for importer, module_name, _ in pkgutil.iter_modules([settings.SCRIPTS_ROOT]): # Remove cached module to ensure consistency with filesystem if module_name in sys.modules: del sys.modules[module_name] module = importer.find_module(module_name).load_module(module_name) if use_names and hasattr(module, 'name'): module_name = module.name module_scripts = OrderedDict() script_order = getattr(module, ""script_order"", ()) ordered_scripts = [cls for cls in script_order if is_script(cls)] unordered_scripts = [cls for _, cls in inspect.getmembers(module, is_script) if cls not in script_order] for cls in [*ordered_scripts, *unordered_scripts]: module_scripts[cls.__name__] = cls if module_scripts: scripts[module_name] = module_scripts return scripts "," Return a dict of dicts mapping all scripts to their modules. Set use_names to True to use each module's human- defined name in place of the actual module name. ",29,103,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_scripts(use_names=False): scripts = OrderedDict() # Iterate through all modules within the scripts path. These are the user-created files in which reports are # defined. for importer, module_name, _ in pkgutil.iter_modules([settings.SCRIPTS_ROOT]): # Remove cached module to ensure consistency with filesystem if module_name in sys.modules: del sys.modules[module_name] module = importer.find_module(module_name).load_module(module_name) if use_names and hasattr(module, 'name'): module_name = module.name module_scripts = OrderedDict() script_order = getattr(module, ""script_order"", ()) ordered_scripts = [cls for cls in script_order if is_script(cls)] unordered_scripts = [cls for _, cls in inspect.getmembers(module, is_script) if cls not in script_order] for cls in [*ordered_scripts, *unordered_scripts]: module_scripts[cls.__name__] = cls if module_scripts: scripts[module_name] = module_scripts return scripts ``` ###Assistant : Return a dict of dicts mapping all scripts to their modules. Set use_names to True to use each module's human- defined name in place of the actual module name. " 1526,"def render(self, template_name, extra_context=None): if extra_context is None: extra_context = {} elif not isinstance(extra_context, dict): raise TypeError(""extra_context must be a dictionary"") return get_template(template_name).render({**self.context, **extra_context}) "," Convenience method for rendering the specified Django template using the default context data. An additional context dictionary may be passed as `extra_context`. ",22,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def render(self, template_name, extra_context=None): if extra_context is None: extra_context = {} elif not isinstance(extra_context, dict): raise TypeError(""extra_context must be a dictionary"") return get_template(template_name).render({**self.context, **extra_context}) ``` ###Assistant : Convenience method for rendering the specified Django template using the default context data. An additional context dictionary may be passed as `extra_context`. " 1527,"def preprocess_input(x, data_format=None): return x @keras_export(""keras.applications.mobilenet_v3.decode_predictions"")","A placeholder method for backward compatibility. The preprocessing logic has been included in the mobilenet_v3 model implementation. Users are no longer required to call this method to normalize the input data. This method does nothing and only kept as a placeholder to align the API surface between old and new version of model. Args: x: A floating point `numpy.array` or a `tf.Tensor`. data_format: Optional data format of the image tensor/array. Defaults to None, in which case the global setting `tf.keras.backend.image_data_format()` is used (unless you changed it, it defaults to ""channels_last"").{mode} Returns: Unchanged `numpy.array` or `tf.Tensor`. ",95,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def preprocess_input(x, data_format=None): return x @keras_export(""keras.applications.mobilenet_v3.decode_predictions"") ``` ###Assistant : A placeholder method for backward compatibility. The preprocessing logic has been included in the mobilenet_v3 model implementation. Users are no longer required to call this method to normalize the input data. This method does nothing and only kept as a placeholder to align the API surface between old and new version of model. Args: x: A floating point `numpy.array` or a `tf.Tensor`. data_format: Optional data format of the image tensor/array. Defaults to None, in which case the global setting `tf.keras.backend.image_data_format()` is used (unless you changed it, it defaults to ""channels_last"").{mode} Returns: Unchanged `numpy.array` or `tf.Tensor`. " 1528,"def message_level_tag(message): return MESSAGE_TAGS.get(message.level) @register.simple_tag"," Return the tag for this message's level as defined in django.contrib.messages.constants.DEFAULT_TAGS, ignoring the project-level MESSAGE_TAGS setting (which end-users might customise). ",20,5,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def message_level_tag(message): return MESSAGE_TAGS.get(message.level) @register.simple_tag ``` ###Assistant : Return the tag for this message's level as defined in django.contrib.messages.constants.DEFAULT_TAGS, ignoring the project-level MESSAGE_TAGS setting (which end-users might customise). " 1529,"def _check_reg_targets(y_true, y_pred, multioutput, dtype=""numeric""): check_consistent_length(y_true, y_pred) y_true = check_array(y_true, ensure_2d=False, dtype=dtype) y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype) if y_true.ndim == 1: y_true = y_true.reshape((-1, 1)) if y_pred.ndim == 1: y_pred = y_pred.reshape((-1, 1)) if y_true.shape[1] != y_pred.shape[1]: raise ValueError( ""y_true and y_pred have different number of output ({0}!={1})"".format( y_true.shape[1], y_pred.shape[1] ) ) n_outputs = y_true.shape[1] allowed_multioutput_str = (""raw_values"", ""uniform_average"", ""variance_weighted"") if isinstance(multioutput, str): if multioutput not in allowed_multioutput_str: raise ValueError( ""Allowed 'multioutput' string values are {}. "" ""You provided multioutput={!r}"".format( allowed_multioutput_str, multioutput ) ) elif multioutput is not None: multioutput = check_array(multioutput, ensure_2d=False) if n_outputs == 1: raise ValueError(""Custom weights are useful only in multi-output cases."") elif n_outputs != len(multioutput): raise ValueError( ""There must be equally many custom weights (%d) as outputs (%d)."" % (len(multioutput), n_outputs) ) y_type = ""continuous"" if n_outputs == 1 else ""continuous-multioutput"" return y_type, y_true, y_pred, multioutput ","Check that y_true and y_pred belong to the same regression task. Parameters ---------- y_true : array-like y_pred : array-like multioutput : array-like or string in ['raw_values', uniform_average', 'variance_weighted'] or None None is accepted due to backward compatibility of r2_score(). dtype : str or list, default=""numeric"" the dtype argument passed to check_array. Returns ------- type_true : one of {'continuous', continuous-multioutput'} The type of the true target data, as output by 'utils.multiclass.type_of_target'. y_true : array-like of shape (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape (n_samples, n_outputs) Estimated target values. multioutput : array-like of shape (n_outputs) or string in ['raw_values', uniform_average', 'variance_weighted'] or None Custom output weights if ``multioutput`` is array-like or just the corresponding argument if ``multioutput`` is a correct keyword. ",124,141,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_reg_targets(y_true, y_pred, multioutput, dtype=""numeric""): check_consistent_length(y_true, y_pred) y_true = check_array(y_true, ensure_2d=False, dtype=dtype) y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype) if y_true.ndim == 1: y_true = y_true.reshape((-1, 1)) if y_pred.ndim == 1: y_pred = y_pred.reshape((-1, 1)) if y_true.shape[1] != y_pred.shape[1]: raise ValueError( ""y_true and y_pred have different number of output ({0}!={1})"".format( y_true.shape[1], y_pred.shape[1] ) ) n_outputs = y_true.shape[1] allowed_multioutput_str = (""raw_values"", ""uniform_average"", ""variance_weighted"") if isinstance(multioutput, str): if multioutput not in allowed_multioutput_str: raise ValueError( ""Allowed 'multioutput' string values are {}. "" ""You provided multioutput={!r}"".format( allowed_multioutput_str, multioutput ) ) elif multioutput is not None: multioutput = check_array(multioutput, ensure_2d=False) if n_outputs == 1: raise ValueError(""Custom weights are useful only in multi-output cases."") elif n_outputs != len(multioutput): raise ValueError( ""There must be equally many custom weights (%d) as outputs (%d)."" % (len(multioutput), n_outputs) ) y_type = ""continuous"" if n_outputs == 1 else ""continuous-multioutput"" return y_type, y_true, y_pred, multioutput ``` ###Assistant : Check that y_true and y_pred belong to the same regression task. Parameters ---------- y_true : array-like y_pred : array-like multioutput : array-like or string in ['raw_values', uniform_average', 'variance_weighted'] or None None is accepted due to backward compatibility of r2_score(). dtype : str or list, default=""numeric"" the dtype argument passed to check_array. Returns ------- type_true : one of {'continuous', continuous-multioutput'} The type of the true target data, as output by 'utils.multiclass.type_of_target'. y_true : array-like of shape (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape (n_samples, n_outputs) Estimated target values. multioutput : array-like of shape (n_outputs) or string in ['raw_values', uniform_average', 'variance_weighted'] or None Custom output weights if ``multioutput`` is array-like or just the corresponding argument if ``multioutput`` is a correct keyword. " 1530,"def bind(self, *args, **kwargs) -> Union[ClassNode, FunctionNode]: copied_self = copy(self) copied_self._func_or_class = ""dummpy.module"" schema_shell = deployment_to_schema(copied_self) if inspect.isfunction(self._func_or_class): return FunctionNode( self._func_or_class, args, # Used to bind and resolve DAG only, can take user input kwargs, # Used to bind and resolve DAG only, can take user input self._ray_actor_options or dict(), other_args_to_resolve={ ""deployment_schema"": schema_shell, ""is_from_serve_deployment"": True, }, ) else: return ClassNode( self._func_or_class, args, kwargs, cls_options=self._ray_actor_options or dict(), other_args_to_resolve={ ""deployment_schema"": schema_shell, ""is_from_serve_deployment"": True, }, ) ","Bind the provided arguments and return a class or function node. The returned bound deployment can be deployed or bound to other deployments to create a deployment graph. ",28,73,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def bind(self, *args, **kwargs) -> Union[ClassNode, FunctionNode]: copied_self = copy(self) copied_self._func_or_class = ""dummpy.module"" schema_shell = deployment_to_schema(copied_self) if inspect.isfunction(self._func_or_class): return FunctionNode( self._func_or_class, args, # Used to bind and resolve DAG only, can take user input kwargs, # Used to bind and resolve DAG only, can take user input self._ray_actor_options or dict(), other_args_to_resolve={ ""deployment_schema"": schema_shell, ""is_from_serve_deployment"": True, }, ) else: return ClassNode( self._func_or_class, args, kwargs, cls_options=self._ray_actor_options or dict(), other_args_to_resolve={ ""deployment_schema"": schema_shell, ""is_from_serve_deployment"": True, }, ) ``` ###Assistant : Bind the provided arguments and return a class or function node. The returned bound deployment can be deployed or bound to other deployments to create a deployment graph. " 1531,"def _obtain_mask(cls, detected_face, mask_type): mask = detected_face.mask.get(mask_type) if not mask: return None if mask.stored_centering != ""face"": face = AlignedFace(detected_face.landmarks_xy) mask.set_sub_crop(face.pose.offset[mask.stored_centering], face.pose.offset[""face""], centering=""face"") return mask.mask.squeeze() "," Obtain the mask for the correct ""face"" centering that is used in the thumbnail display. Parameters ----------- detected_face: :class:`lib.align.DetectedFace` The Detected Face object to obtain the mask for mask_type: str The type of mask to obtain Returns ------- :class:`numpy.ndarray` or ``None`` The single channel mask of requested mask type, if it exists, otherwise ``None`` ",54,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _obtain_mask(cls, detected_face, mask_type): mask = detected_face.mask.get(mask_type) if not mask: return None if mask.stored_centering != ""face"": face = AlignedFace(detected_face.landmarks_xy) mask.set_sub_crop(face.pose.offset[mask.stored_centering], face.pose.offset[""face""], centering=""face"") return mask.mask.squeeze() ``` ###Assistant : Obtain the mask for the correct ""face"" centering that is used in the thumbnail display. Parameters ----------- detected_face: :class:`lib.align.DetectedFace` The Detected Face object to obtain the mask for mask_type: str The type of mask to obtain Returns ------- :class:`numpy.ndarray` or ``None`` The single channel mask of requested mask type, if it exists, otherwise ``None`` " 1532,"def forward(self, input, mask=None): forward_input, backward_input = paddle.chunk(input, chunks=2, axis=2) # elementwise-sum forward_x and backward_x # Shape: (batch_size, max_seq_len, hidden_size) h = paddle.add_n([forward_input, backward_input]) # Shape: (batch_size, hidden_size, 1) att_weight = self.att_weight.tile( repeat_times=(paddle.shape(h)[0], 1, 1)) # Shape: (batch_size, max_seq_len, 1) att_score = paddle.bmm(paddle.tanh(h), att_weight) if mask is not None: # mask, remove the effect of 'PAD' mask = paddle.cast(mask, dtype='float32') mask = mask.unsqueeze(axis=-1) inf_tensor = paddle.full( shape=mask.shape, dtype='float32', fill_value=-INF) att_score = paddle.multiply(att_score, mask) + paddle.multiply( inf_tensor, (1 - mask)) # Shape: (batch_size, max_seq_len, 1) att_weight = F.softmax(att_score, axis=1) # Shape: (batch_size, lstm_hidden_size) reps = paddle.bmm(h.transpose(perm=(0, 2, 1)), att_weight).squeeze(axis=-1) reps = paddle.tanh(reps) return reps, att_weight "," Args: input (paddle.Tensor) of shape (batch, seq_len, input_size): Tensor containing the features of the input sequence. mask (paddle.Tensor) of shape (batch, seq_len) : Tensor is a bool tensor, whose each element identifies whether the input word id is pad token or not. Defaults to `None`. ",45,104,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def forward(self, input, mask=None): forward_input, backward_input = paddle.chunk(input, chunks=2, axis=2) # elementwise-sum forward_x and backward_x # Shape: (batch_size, max_seq_len, hidden_size) h = paddle.add_n([forward_input, backward_input]) # Shape: (batch_size, hidden_size, 1) att_weight = self.att_weight.tile( repeat_times=(paddle.shape(h)[0], 1, 1)) # Shape: (batch_size, max_seq_len, 1) att_score = paddle.bmm(paddle.tanh(h), att_weight) if mask is not None: # mask, remove the effect of 'PAD' mask = paddle.cast(mask, dtype='float32') mask = mask.unsqueeze(axis=-1) inf_tensor = paddle.full( shape=mask.shape, dtype='float32', fill_value=-INF) att_score = paddle.multiply(att_score, mask) + paddle.multiply( inf_tensor, (1 - mask)) # Shape: (batch_size, max_seq_len, 1) att_weight = F.softmax(att_score, axis=1) # Shape: (batch_size, lstm_hidden_size) reps = paddle.bmm(h.transpose(perm=(0, 2, 1)), att_weight).squeeze(axis=-1) reps = paddle.tanh(reps) return reps, att_weight ``` ###Assistant : Args: input (paddle.Tensor) of shape (batch, seq_len, input_size): Tensor containing the features of the input sequence. mask (paddle.Tensor) of shape (batch, seq_len) : Tensor is a bool tensor, whose each element identifies whether the input word id is pad token or not. Defaults to `None`. " 1533,"def simple_test(self, feats, img_metas, **kwargs): all_cls_scores, all_mask_preds = self(feats, img_metas) mask_cls_results = all_cls_scores[-1] mask_pred_results = all_mask_preds[-1] # upsample masks img_shape = img_metas[0]['batch_input_shape'] mask_pred_results = F.interpolate( mask_pred_results, size=(img_shape[0], img_shape[1]), mode='bilinear', align_corners=False) return mask_cls_results, mask_pred_results ","Test without augmentaton. Args: feats (list[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. Returns: tuple: A tuple contains two tensors. - mask_cls_results (Tensor): Mask classification logits,\ shape (batch_size, num_queries, cls_out_channels). Note `cls_out_channels` should includes background. - mask_pred_results (Tensor): Mask logits, shape \ (batch_size, num_queries, h, w). ",55,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def simple_test(self, feats, img_metas, **kwargs): all_cls_scores, all_mask_preds = self(feats, img_metas) mask_cls_results = all_cls_scores[-1] mask_pred_results = all_mask_preds[-1] # upsample masks img_shape = img_metas[0]['batch_input_shape'] mask_pred_results = F.interpolate( mask_pred_results, size=(img_shape[0], img_shape[1]), mode='bilinear', align_corners=False) return mask_cls_results, mask_pred_results ``` ###Assistant : Test without augmentaton. Args: feats (list[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. Returns: tuple: A tuple contains two tensors. - mask_cls_results (Tensor): Mask classification logits,\ shape (batch_size, num_queries, cls_out_channels). Note `cls_out_channels` should includes background. - mask_pred_results (Tensor): Mask logits, shape \ (batch_size, num_queries, h, w). " 1534,"def _format_list(self, extracted_list): Colors = self.Colors list = [] for ind, (filename, lineno, name, line) in enumerate(extracted_list): normalCol, nameCol, fileCol, lineCol = ( # Emphasize the last entry (Colors.normalEm, Colors.nameEm, Colors.filenameEm, Colors.line) if ind == len(extracted_list) - 1 else (Colors.Normal, Colors.name, Colors.filename, """") ) fns = _format_filename(filename, fileCol, normalCol, lineno=lineno) item = f""{normalCol} {fns}"" if name != """": item += f"" in {nameCol}{name}{normalCol}\n"" else: item += ""\n"" if line: item += f""{lineCol} {line.strip()}{normalCol}\n"" list.append(item) return list ","Format a list of traceback entry tuples for printing. Given a list of tuples as returned by extract_tb() or extract_stack(), return a list of strings ready for printing. Each string in the resulting list corresponds to the item with the same index in the argument list. Each string ends in a newline; the strings may contain internal newlines as well, for those items whose source text line is not None. Lifted almost verbatim from traceback.py ",75,76,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _format_list(self, extracted_list): Colors = self.Colors list = [] for ind, (filename, lineno, name, line) in enumerate(extracted_list): normalCol, nameCol, fileCol, lineCol = ( # Emphasize the last entry (Colors.normalEm, Colors.nameEm, Colors.filenameEm, Colors.line) if ind == len(extracted_list) - 1 else (Colors.Normal, Colors.name, Colors.filename, """") ) fns = _format_filename(filename, fileCol, normalCol, lineno=lineno) item = f""{normalCol} {fns}"" if name != """": item += f"" in {nameCol}{name}{normalCol}\n"" else: item += ""\n"" if line: item += f""{lineCol} {line.strip()}{normalCol}\n"" list.append(item) return list ``` ###Assistant : Format a list of traceback entry tuples for printing. Given a list of tuples as returned by extract_tb() or extract_stack(), return a list of strings ready for printing. Each string in the resulting list corresponds to the item with the same index in the argument list. Each string ends in a newline; the strings may contain internal newlines as well, for those items whose source text line is not None. Lifted almost verbatim from traceback.py " 1535,"def euler_equations(L, funcs=(), vars=()): r funcs = tuple(funcs) if iterable(funcs) else (funcs,) if not funcs: funcs = tuple(L.atoms(Function)) else: for f in funcs: if not isinstance(f, Function): raise TypeError('Function expected, got: %s' % f) vars = tuple(vars) if iterable(vars) else (vars,) if not vars: vars = funcs[0].args else: vars = tuple(sympify(var) for var in vars) if not all(isinstance(v, Symbol) for v in vars): raise TypeError('Variables are not symbols, got %s' % vars) for f in funcs: if not vars == f.args: raise ValueError(""Variables %s do not match args: %s"" % (vars, f)) order = max([len(d.variables) for d in L.atoms(Derivative) if d.expr in funcs] + [0]) eqns = [] for f in funcs: eq = diff(L, f) for i in range(1, order + 1): for p in combinations_with_replacement(vars, i): eq = eq + S.NegativeOne**i*diff(L, diff(f, *p), *p) new_eq = Eq(eq, 0) if isinstance(new_eq, Eq): eqns.append(new_eq) return eqns "," Find the Euler-Lagrange equations [1]_ for a given Lagrangian. Parameters ========== L : Expr The Lagrangian that should be a function of the functions listed in the second argument and their derivatives. For example, in the case of two functions `f(x,y)`, `g(x,y)` and two independent variables `x`, `y` the Lagrangian would have the form: .. math:: L\left(f(x,y),g(x,y),\frac{\partial f(x,y)}{\partial x}, \frac{\partial f(x,y)}{\partial y}, \frac{\partial g(x,y)}{\partial x}, \frac{\partial g(x,y)}{\partial y},x,y\right) In many cases it is not necessary to provide anything, except the Lagrangian, it will be auto-detected (and an error raised if this couldn't be done). funcs : Function or an iterable of Functions The functions that the Lagrangian depends on. The Euler equations are differential equations for each of these functions. vars : Symbol or an iterable of Symbols The Symbols that are the independent variables of the functions. Returns ======= eqns : list of Eq The list of differential equations, one for each function. Examples ======== >>> from sympy import euler_equations, Symbol, Function >>> x = Function('x') >>> t = Symbol('t') >>> L = (x(t).diff(t))**2/2 - x(t)**2/2 >>> euler_equations(L, x(t), t) [Eq(-x(t) - Derivative(x(t), (t, 2)), 0)] >>> u = Function('u') >>> x = Symbol('x') >>> L = (u(t, x).diff(t))**2/2 - (u(t, x).diff(x))**2/2 >>> euler_equations(L, u(t, x), [t, x]) [Eq(-Derivative(u(t, x), (t, 2)) + Derivative(u(t, x), (x, 2)), 0)] References ========== .. [1] https://en.wikipedia.org/wiki/Euler%E2%80%93Lagrange_equation ",224,146,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def euler_equations(L, funcs=(), vars=()): r funcs = tuple(funcs) if iterable(funcs) else (funcs,) if not funcs: funcs = tuple(L.atoms(Function)) else: for f in funcs: if not isinstance(f, Function): raise TypeError('Function expected, got: %s' % f) vars = tuple(vars) if iterable(vars) else (vars,) if not vars: vars = funcs[0].args else: vars = tuple(sympify(var) for var in vars) if not all(isinstance(v, Symbol) for v in vars): raise TypeError('Variables are not symbols, got %s' % vars) for f in funcs: if not vars == f.args: raise ValueError(""Variables %s do not match args: %s"" % (vars, f)) order = max([len(d.variables) for d in L.atoms(Derivative) if d.expr in funcs] + [0]) eqns = [] for f in funcs: eq = diff(L, f) for i in range(1, order + 1): for p in combinations_with_replacement(vars, i): eq = eq + S.NegativeOne**i*diff(L, diff(f, *p), *p) new_eq = Eq(eq, 0) if isinstance(new_eq, Eq): eqns.append(new_eq) return eqns ``` ###Assistant : Find the Euler-Lagrange equations [1]_ for a given Lagrangian. Parameters ========== L : Expr The Lagrangian that should be a function of the functions listed in the second argument and their derivatives. For example, in the case of two functions `f(x,y)`, `g(x,y)` and two independent variables `x`, `y` the Lagrangian would have the form: .. math:: L\left(f(x,y),g(x,y),\frac{\partial f(x,y)}{\partial x}, \frac{\partial f(x,y)}{\partial y}, \frac{\partial g(x,y)}{\partial x}, \frac{\partial g(x,y)}{\partial y},x,y\right) In many cases it is not necessary to provide anything, except the Lagrangian, it will be auto-detected (and an error raised if this couldn't be done). funcs : Function or an iterable of Functions The functions that the Lagrangian depends on. The Euler equations are differential equations for each of these functions. vars : Symbol or an iterable of Symbols The Symbols that are the independent variables of the functions. Returns ======= eqns : list of Eq The list of differential equations, one for each function. Examples ======== >>> from sympy import euler_equations, Symbol, Function >>> x = Function('x') >>> t = Symbol('t') >>> L = (x(t).diff(t))**2/2 - x(t)**2/2 >>> euler_equations(L, x(t), t) [Eq(-x(t) - Derivative(x(t), (t, 2)), 0)] >>> u = Function('u') >>> x = Symbol('x') >>> L = (u(t, x).diff(t))**2/2 - (u(t, x).diff(x))**2/2 >>> euler_equations(L, u(t, x), [t, x]) [Eq(-Derivative(u(t, x), (t, 2)) + Derivative(u(t, x), (x, 2)), 0)] References ========== .. [1] https://en.wikipedia.org/wiki/Euler%E2%80%93Lagrange_equation " 1536,"def delay_update(self, skip_if_already_set=False, **kwargs): for key, value in kwargs.items(): if key in self.extra_update_fields and skip_if_already_set: continue elif key in self.extra_update_fields and key in ('job_explanation', 'result_traceback'): if str(value) in self.extra_update_fields.get(key, ''): continue # if already set, avoid duplicating messages # In the case of these fields, we do not want to lose any prior information, so combine values self.extra_update_fields[key] = '\n'.join([str(self.extra_update_fields[key]), str(value)]) else: self.extra_update_fields[key] = value ",Stash fields that should be saved along with the job status change,12,65,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def delay_update(self, skip_if_already_set=False, **kwargs): for key, value in kwargs.items(): if key in self.extra_update_fields and skip_if_already_set: continue elif key in self.extra_update_fields and key in ('job_explanation', 'result_traceback'): if str(value) in self.extra_update_fields.get(key, ''): continue # if already set, avoid duplicating messages # In the case of these fields, we do not want to lose any prior information, so combine values self.extra_update_fields[key] = '\n'.join([str(self.extra_update_fields[key]), str(value)]) else: self.extra_update_fields[key] = value ``` ###Assistant : Stash fields that should be saved along with the job status change " 1537,"def adjacent_tmp_file(path, **kwargs): # type: (str, **Any) -> Iterator[BinaryIO] with NamedTemporaryFile( delete=False, dir=os.path.dirname(path), prefix=os.path.basename(path), suffix="".tmp"", **kwargs, ) as f: result = cast(BinaryIO, f) try: yield result finally: result.flush() os.fsync(result.fileno()) # Tenacity raises RetryError by default, explictly raise the original exception _replace_retry = retry(reraise=True, stop=stop_after_delay(1), wait=wait_fixed(0.25)) replace = _replace_retry(os.replace) # test_writable_dir and _test_writable_dir_win are copied from Flit, # with the author's agreement to also place them under pip's license.","Return a file-like object pointing to a tmp file next to path. The file is created securely and is ensured to be written to disk after the context reaches its end. kwargs will be passed to tempfile.NamedTemporaryFile to control the way the temporary file will be opened. ",47,68,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def adjacent_tmp_file(path, **kwargs): # type: (str, **Any) -> Iterator[BinaryIO] with NamedTemporaryFile( delete=False, dir=os.path.dirname(path), prefix=os.path.basename(path), suffix="".tmp"", **kwargs, ) as f: result = cast(BinaryIO, f) try: yield result finally: result.flush() os.fsync(result.fileno()) # Tenacity raises RetryError by default, explictly raise the original exception _replace_retry = retry(reraise=True, stop=stop_after_delay(1), wait=wait_fixed(0.25)) replace = _replace_retry(os.replace) # test_writable_dir and _test_writable_dir_win are copied from Flit, # with the author's agreement to also place them under pip's license. ``` ###Assistant : Return a file-like object pointing to a tmp file next to path. The file is created securely and is ensured to be written to disk after the context reaches its end. kwargs will be passed to tempfile.NamedTemporaryFile to control the way the temporary file will be opened. " 1538,"def _lsb_release_info(self): # type: () -> Dict[str, str] if not self.include_lsb: return {} with open(os.devnull, ""wb"") as devnull: try: cmd = (""lsb_release"", ""-a"") stdout = subprocess.check_output(cmd, stderr=devnull) # Command not found or lsb_release returned error except (OSError, subprocess.CalledProcessError): return {} content = self._to_str(stdout).splitlines() return self._parse_lsb_release_content(content) "," Get the information items from the lsb_release command output. Returns: A dictionary containing all information items. ",16,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _lsb_release_info(self): # type: () -> Dict[str, str] if not self.include_lsb: return {} with open(os.devnull, ""wb"") as devnull: try: cmd = (""lsb_release"", ""-a"") stdout = subprocess.check_output(cmd, stderr=devnull) # Command not found or lsb_release returned error except (OSError, subprocess.CalledProcessError): return {} content = self._to_str(stdout).splitlines() return self._parse_lsb_release_content(content) ``` ###Assistant : Get the information items from the lsb_release command output. Returns: A dictionary containing all information items. " 1539,"def as_dict(self) -> Dict[Text, Any]: serializable_graph_schema: Dict[Text, Dict[Text, Any]] = {""nodes"": {}} for node_name, node in self.nodes.items(): serializable = dataclasses.asdict(node) # Classes are not JSON serializable (surprise) serializable[""uses""] = f""{node.uses.__module__}.{node.uses.__name__}"" serializable_graph_schema[""nodes""][node_name] = serializable return serializable_graph_schema ","Returns graph schema in a serializable format. Returns: The graph schema in a format which can be dumped as JSON or other formats. ",23,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def as_dict(self) -> Dict[Text, Any]: serializable_graph_schema: Dict[Text, Dict[Text, Any]] = {""nodes"": {}} for node_name, node in self.nodes.items(): serializable = dataclasses.asdict(node) # Classes are not JSON serializable (surprise) serializable[""uses""] = f""{node.uses.__module__}.{node.uses.__name__}"" serializable_graph_schema[""nodes""][node_name] = serializable return serializable_graph_schema ``` ###Assistant : Returns graph schema in a serializable format. Returns: The graph schema in a format which can be dumped as JSON or other formats. " 1540,"def _create_project_state(self, with_applied_migrations=False): state = ProjectState(real_apps=self.loader.unmigrated_apps) if with_applied_migrations: # Create the forwards plan Django would follow on an empty database full_plan = self.migration_plan( self.loader.graph.leaf_nodes(), clean_start=True ) applied_migrations = { self.loader.graph.nodes[key] for key in self.loader.applied_migrations if key in self.loader.graph.nodes } for migration, _ in full_plan: if migration in applied_migrations: migration.mutate_state(state, preserve=False) return state "," Create a project state including all the applications without migrations and applied migrations if with_applied_migrations=True. ",15,52,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_project_state(self, with_applied_migrations=False): state = ProjectState(real_apps=self.loader.unmigrated_apps) if with_applied_migrations: # Create the forwards plan Django would follow on an empty database full_plan = self.migration_plan( self.loader.graph.leaf_nodes(), clean_start=True ) applied_migrations = { self.loader.graph.nodes[key] for key in self.loader.applied_migrations if key in self.loader.graph.nodes } for migration, _ in full_plan: if migration in applied_migrations: migration.mutate_state(state, preserve=False) return state ``` ###Assistant : Create a project state including all the applications without migrations and applied migrations if with_applied_migrations=True. " 1541,"def revoke(state, task_id, terminate=False, signal=None, **kwargs): # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. # supports list argument since 3.1 task_ids, task_id = set(maybe_list(task_id) or []), None task_ids = _revoke(state, task_ids, terminate, signal, **kwargs) return ok(f'tasks {task_ids} flagged as revoked') @control_command( variadic='headers', signature='[key1=value1 [key2=value2 [... [keyN=valueN]]]]', )","Revoke task by task id (or list of ids). Keyword Arguments: terminate (bool): Also terminate the process if the task is active. signal (str): Name of signal to use for terminate (e.g., ``KILL``). ",33,58,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def revoke(state, task_id, terminate=False, signal=None, **kwargs): # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. # supports list argument since 3.1 task_ids, task_id = set(maybe_list(task_id) or []), None task_ids = _revoke(state, task_ids, terminate, signal, **kwargs) return ok(f'tasks {task_ids} flagged as revoked') @control_command( variadic='headers', signature='[key1=value1 [key2=value2 [... [keyN=valueN]]]]', ) ``` ###Assistant : Revoke task by task id (or list of ids). Keyword Arguments: terminate (bool): Also terminate the process if the task is active. signal (str): Name of signal to use for terminate (e.g., ``KILL``). " 1542,"def inplace_swap_row_csc(X, m, n): for t in [m, n]: if isinstance(t, np.ndarray): raise TypeError(""m and n should be valid integers"") if m < 0: m += X.shape[0] if n < 0: n += X.shape[0] m_mask = X.indices == m X.indices[X.indices == n] = m X.indices[m_mask] = n ","Swap two rows of a CSC matrix in-place. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) Matrix whose two rows are to be swapped. It should be of CSC format. m : int Index of the row of X to be swapped. n : int Index of the row of X to be swapped. ",56,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def inplace_swap_row_csc(X, m, n): for t in [m, n]: if isinstance(t, np.ndarray): raise TypeError(""m and n should be valid integers"") if m < 0: m += X.shape[0] if n < 0: n += X.shape[0] m_mask = X.indices == m X.indices[X.indices == n] = m X.indices[m_mask] = n ``` ###Assistant : Swap two rows of a CSC matrix in-place. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) Matrix whose two rows are to be swapped. It should be of CSC format. m : int Index of the row of X to be swapped. n : int Index of the row of X to be swapped. " 1543,"def test_render_empty_table(self): block = TableBlock() result = block.render( { ""first_row_is_table_header"": False, ""first_col_is_header"": False, ""data"": [[None, None, None], [None, None, None], [None, None, None]], } ) expected = self.assertHTMLEqual(result, expected) "," An empty table should render okay.
    ",13,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_render_empty_table(self): block = TableBlock() result = block.render( { ""first_row_is_table_header"": False, ""first_col_is_header"": False, ""data"": [[None, None, None], [None, None, None], [None, None, None]], } ) expected = self.assertHTMLEqual(result, expected) ``` ###Assistant : An empty table should render okay.
    " 1544,"def rot_axis3(theta): ct = cos(theta) st = sin(theta) lil = ((ct, st, 0), (-st, ct, 0), (0, 0, 1)) return Matrix(lil) ","Returns a rotation matrix for a rotation of theta (in radians) about the 3-axis. Examples ======== >>> from sympy import pi, rot_axis3 A rotation of pi/3 (60 degrees): >>> theta = pi/3 >>> rot_axis3(theta) Matrix([ [ 1/2, sqrt(3)/2, 0], [-sqrt(3)/2, 1/2, 0], [ 0, 0, 1]]) If we rotate by pi/2 (90 degrees): >>> rot_axis3(pi/2) Matrix([ [ 0, 1, 0], [-1, 0, 0], [ 0, 0, 1]]) See Also ======== rot_axis1: Returns a rotation matrix for a rotation of theta (in radians) about the 1-axis rot_axis2: Returns a rotation matrix for a rotation of theta (in radians) about the 2-axis ",100,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rot_axis3(theta): ct = cos(theta) st = sin(theta) lil = ((ct, st, 0), (-st, ct, 0), (0, 0, 1)) return Matrix(lil) ``` ###Assistant : Returns a rotation matrix for a rotation of theta (in radians) about the 3-axis. Examples ======== >>> from sympy import pi, rot_axis3 A rotation of pi/3 (60 degrees): >>> theta = pi/3 >>> rot_axis3(theta) Matrix([ [ 1/2, sqrt(3)/2, 0], [-sqrt(3)/2, 1/2, 0], [ 0, 0, 1]]) If we rotate by pi/2 (90 degrees): >>> rot_axis3(pi/2) Matrix([ [ 0, 1, 0], [-1, 0, 0], [ 0, 0, 1]]) See Also ======== rot_axis1: Returns a rotation matrix for a rotation of theta (in radians) about the 1-axis rot_axis2: Returns a rotation matrix for a rotation of theta (in radians) about the 2-axis " 1545,"def current(self): rv = self._current or '0' if not isinstance(rv, str): rv = bin(rv)[2:] return rv.rjust(self.n, '0') "," Returns the currently referenced Gray code as a bit string. Examples ======== >>> from sympy.combinatorics import GrayCode >>> GrayCode(3, start='100').current '100' ",21,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def current(self): rv = self._current or '0' if not isinstance(rv, str): rv = bin(rv)[2:] return rv.rjust(self.n, '0') ``` ###Assistant : Returns the currently referenced Gray code as a bit string. Examples ======== >>> from sympy.combinatorics import GrayCode >>> GrayCode(3, start='100').current '100' " 1546,"def upsample_conv_2d(x, w, k=None, factor=2, gain=1): assert isinstance(factor, int) and factor >= 1 # Check weight shape. assert len(w.shape) == 4 convH = w.shape[2] convW = w.shape[3] inC = w.shape[1] assert convW == convH # Setup filter kernel. if k is None: k = [1] * factor k = _setup_kernel(k) * (gain * (factor**2)) p = (k.shape[0] - factor) - (convW - 1) stride = (factor, factor) # Determine data dimensions. stride = [1, 1, factor, factor] output_shape = ((_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW) output_padding = ( output_shape[0] - (_shape(x, 2) - 1) * stride[0] - convH, output_shape[1] - (_shape(x, 3) - 1) * stride[1] - convW, ) assert output_padding[0] >= 0 and output_padding[1] >= 0 num_groups = _shape(x, 1) // inC # Transpose weights. w = torch.reshape(w, (num_groups, -1, inC, convH, convW)) w = w[..., ::-1, ::-1].permute(0, 2, 1, 3, 4) w = torch.reshape(w, (num_groups * inC, -1, convH, convW)) x = F.conv_transpose2d(x, w, stride=stride, output_padding=output_padding, padding=0) # Original TF code. # x = tf.nn.conv2d_transpose( # x, # w, # output_shape=output_shape, # strides=stride, # padding='VALID', # data_format=data_format) # JAX equivalent return upfirdn2d(x, torch.tensor(k, device=x.device), pad=((p + 1) // 2 + factor - 1, p // 2 + 1)) ","Fused `upsample_2d()` followed by `tf.nn.conv2d()`. Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary order. Args: x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as `x`. ",139,210,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def upsample_conv_2d(x, w, k=None, factor=2, gain=1): assert isinstance(factor, int) and factor >= 1 # Check weight shape. assert len(w.shape) == 4 convH = w.shape[2] convW = w.shape[3] inC = w.shape[1] assert convW == convH # Setup filter kernel. if k is None: k = [1] * factor k = _setup_kernel(k) * (gain * (factor**2)) p = (k.shape[0] - factor) - (convW - 1) stride = (factor, factor) # Determine data dimensions. stride = [1, 1, factor, factor] output_shape = ((_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW) output_padding = ( output_shape[0] - (_shape(x, 2) - 1) * stride[0] - convH, output_shape[1] - (_shape(x, 3) - 1) * stride[1] - convW, ) assert output_padding[0] >= 0 and output_padding[1] >= 0 num_groups = _shape(x, 1) // inC # Transpose weights. w = torch.reshape(w, (num_groups, -1, inC, convH, convW)) w = w[..., ::-1, ::-1].permute(0, 2, 1, 3, 4) w = torch.reshape(w, (num_groups * inC, -1, convH, convW)) x = F.conv_transpose2d(x, w, stride=stride, output_padding=output_padding, padding=0) # Original TF code. # x = tf.nn.conv2d_transpose( # x, # w, # output_shape=output_shape, # strides=stride, # padding='VALID', # data_format=data_format) # JAX equivalent return upfirdn2d(x, torch.tensor(k, device=x.device), pad=((p + 1) // 2 + factor - 1, p // 2 + 1)) ``` ###Assistant : Fused `upsample_2d()` followed by `tf.nn.conv2d()`. Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary order. Args: x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as `x`. " 1547,"def read(self, size=None): if size is None: t = [] while True: buf = self._read(self.bufsize) if not buf: break t.append(buf) buf = """".join(t) else: buf = self._read(size) self.pos += len(buf) return buf ","Return the next size number of bytes from the stream. If size is not defined, return all bytes of the stream up to EOF. ",24,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def read(self, size=None): if size is None: t = [] while True: buf = self._read(self.bufsize) if not buf: break t.append(buf) buf = """".join(t) else: buf = self._read(size) self.pos += len(buf) return buf ``` ###Assistant : Return the next size number of bytes from the stream. If size is not defined, return all bytes of the stream up to EOF. " 1548,"def state(self) -> Mapping[str, Any]: if self._cursor_value: return { self.cursor_field: self._cursor_value, ""include_deleted"": self._include_deleted, } return {} ","State getter, get current state and serialize it to emmit Airbyte STATE message",13,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def state(self) -> Mapping[str, Any]: if self._cursor_value: return { self.cursor_field: self._cursor_value, ""include_deleted"": self._include_deleted, } return {} ``` ###Assistant : State getter, get current state and serialize it to emmit Airbyte STATE message " 1549,"def set_task_factory(self, factory): if factory is not None and not callable(factory): raise TypeError('task factory must be a callable or None') self._task_factory = factory ","Set a task factory that will be used by loop.create_task(). If factory is None the default task factory will be set. If factory is a callable, it should have a signature matching '(loop, coro)', where 'loop' will be a reference to the active event loop, 'coro' will be a coroutine object. The callable must return a Future. ",57,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_task_factory(self, factory): if factory is not None and not callable(factory): raise TypeError('task factory must be a callable or None') self._task_factory = factory ``` ###Assistant : Set a task factory that will be used by loop.create_task(). If factory is None the default task factory will be set. If factory is a callable, it should have a signature matching '(loop, coro)', where 'loop' will be a reference to the active event loop, 'coro' will be a coroutine object. The callable must return a Future. " 1550,"def date(self) -> npt.NDArray[np.object_]: # If the Timestamps have a timezone that is not UTC, # convert them into their i8 representation while # keeping their timezone and not using UTC timestamps = self._local_timestamps() return ints_to_pydatetime(timestamps, box=""date"") "," Returns numpy array of python :class:`datetime.date` objects. Namely, the date part of Timestamps without time and timezone information. ",18,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def date(self) -> npt.NDArray[np.object_]: # If the Timestamps have a timezone that is not UTC, # convert them into their i8 representation while # keeping their timezone and not using UTC timestamps = self._local_timestamps() return ints_to_pydatetime(timestamps, box=""date"") ``` ###Assistant : Returns numpy array of python :class:`datetime.date` objects. Namely, the date part of Timestamps without time and timezone information. " 1551,"def apply_transparency(self): if self.mode != ""P"" or ""transparency"" not in self.info: return from . import ImagePalette palette = self.getpalette(""RGBA"") transparency = self.info[""transparency""] if isinstance(transparency, bytes): for i, alpha in enumerate(transparency): palette[i * 4 + 3] = alpha else: palette[transparency * 4 + 3] = 0 self.palette = ImagePalette.ImagePalette(""RGBA"", bytes(palette)) self.palette.dirty = 1 del self.info[""transparency""] "," If a P mode image has a ""transparency"" key in the info dictionary, remove the key and apply the transparency to the palette instead. ",24,54,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def apply_transparency(self): if self.mode != ""P"" or ""transparency"" not in self.info: return from . import ImagePalette palette = self.getpalette(""RGBA"") transparency = self.info[""transparency""] if isinstance(transparency, bytes): for i, alpha in enumerate(transparency): palette[i * 4 + 3] = alpha else: palette[transparency * 4 + 3] = 0 self.palette = ImagePalette.ImagePalette(""RGBA"", bytes(palette)) self.palette.dirty = 1 del self.info[""transparency""] ``` ###Assistant : If a P mode image has a ""transparency"" key in the info dictionary, remove the key and apply the transparency to the palette instead. " 1552,"def get_frontend_app_asset_url(module, key): args = (settings.STATIC_FRONTEND_APP_URL.rstrip(""/""), module, key.lstrip(""/"")) return ""{}/{}/{}"".format(*args) "," Returns an asset URL that is unversioned. These assets should have a `Cache-Control: max-age=0, must-revalidate` so that clients must validate with the origin server before using their locally cached asset. Example: {% frontend_app_asset_url 'sentry' 'sentry.css' %} => ""/_static/dist/sentry/sentry.css"" ",38,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_frontend_app_asset_url(module, key): args = (settings.STATIC_FRONTEND_APP_URL.rstrip(""/""), module, key.lstrip(""/"")) return ""{}/{}/{}"".format(*args) ``` ###Assistant : Returns an asset URL that is unversioned. These assets should have a `Cache-Control: max-age=0, must-revalidate` so that clients must validate with the origin server before using their locally cached asset. Example: {% frontend_app_asset_url 'sentry' 'sentry.css' %} => ""/_static/dist/sentry/sentry.css"" " 1553,"def tridiagonal_solve(dl, d, du, b): r if dl.ndim != 1 or d.ndim != 1 or du.ndim != 1: raise ValueError('dl, d and du must be vectors') if dl.shape != d.shape or d.shape != du.shape: raise ValueError( f'dl={dl.shape}, d={d.shape} and du={du.shape} must all be `[m]`') if b.ndim != 2: raise ValueError(f'b={b.shape} must be a matrix') m, = dl.shape if m < 3: raise ValueError(f'm ({m}) must be >= 3') ldb, n = b.shape if ldb < max(1, m): raise ValueError(f'Leading dimension of b={ldb} must be ≥ max(1, {m})') if dl.dtype != d.dtype or d.dtype != du.dtype or du.dtype != b.dtype: raise ValueError(f'dl={dl.dtype}, d={d.dtype}, du={du.dtype} and ' f'b={b.dtype} must be the same dtype,') t = dl.dtype if t not in (np.float32, np.float64): raise ValueError(f'Only f32/f64 are supported, got {t}') return tridiagonal_solve_p.bind(dl, d, du, b, m=m, n=n, ldb=ldb, t=t) # Schur Decomposition ","Computes the solution of a tridiagonal linear system. This function computes the solution of a tridiagonal linear system: .. math:: A . X = B Args: dl: The lower diagonal of A: ``dl[i] := A[i, i-1]`` for i in ``[0,m)``. Note that ``dl[0] = 0``. d: The middle diagnoal of A: ``d[i] := A[i, i]`` for i in ``[0,m)``. du: The upper diagonal of A: ``du[i] := A[i, i+1]`` for i in ``[0,m)``. Note that ``dl[m - 1] = 0``. b: Right hand side matrix. Returns: Solution ``X`` of tridiagonal system. ",91,139,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def tridiagonal_solve(dl, d, du, b): r if dl.ndim != 1 or d.ndim != 1 or du.ndim != 1: raise ValueError('dl, d and du must be vectors') if dl.shape != d.shape or d.shape != du.shape: raise ValueError( f'dl={dl.shape}, d={d.shape} and du={du.shape} must all be `[m]`') if b.ndim != 2: raise ValueError(f'b={b.shape} must be a matrix') m, = dl.shape if m < 3: raise ValueError(f'm ({m}) must be >= 3') ldb, n = b.shape if ldb < max(1, m): raise ValueError(f'Leading dimension of b={ldb} must be ≥ max(1, {m})') if dl.dtype != d.dtype or d.dtype != du.dtype or du.dtype != b.dtype: raise ValueError(f'dl={dl.dtype}, d={d.dtype}, du={du.dtype} and ' f'b={b.dtype} must be the same dtype,') t = dl.dtype if t not in (np.float32, np.float64): raise ValueError(f'Only f32/f64 are supported, got {t}') return tridiagonal_solve_p.bind(dl, d, du, b, m=m, n=n, ldb=ldb, t=t) # Schur Decomposition ``` ###Assistant : Computes the solution of a tridiagonal linear system. This function computes the solution of a tridiagonal linear system: .. math:: A . X = B Args: dl: The lower diagonal of A: ``dl[i] := A[i, i-1]`` for i in ``[0,m)``. Note that ``dl[0] = 0``. d: The middle diagnoal of A: ``d[i] := A[i, i]`` for i in ``[0,m)``. du: The upper diagonal of A: ``du[i] := A[i, i+1]`` for i in ``[0,m)``. Note that ``dl[m - 1] = 0``. b: Right hand side matrix. Returns: Solution ``X`` of tridiagonal system. " 1554,"def _get_dependency_info() -> dict[str, JSONSerializable]: deps = [ ""pandas"", # required ""numpy"", ""pytz"", ""dateutil"", # install / build, ""setuptools"", ""pip"", ""Cython"", # test ""pytest"", ""hypothesis"", # docs ""sphinx"", # Other, need a min version ""blosc"", ""feather"", ""xlsxwriter"", ""lxml.etree"", ""html5lib"", ""pymysql"", ""psycopg2"", ""jinja2"", # Other, not imported. ""IPython"", ""pandas_datareader"", ] deps.extend(list(VERSIONS)) result: dict[str, JSONSerializable] = {} for modname in deps: mod = import_optional_dependency(modname, errors=""ignore"") result[modname] = get_version(mod) if mod else None return result "," Returns dependency information as a JSON serializable dictionary. ",8,72,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_dependency_info() -> dict[str, JSONSerializable]: deps = [ ""pandas"", # required ""numpy"", ""pytz"", ""dateutil"", # install / build, ""setuptools"", ""pip"", ""Cython"", # test ""pytest"", ""hypothesis"", # docs ""sphinx"", # Other, need a min version ""blosc"", ""feather"", ""xlsxwriter"", ""lxml.etree"", ""html5lib"", ""pymysql"", ""psycopg2"", ""jinja2"", # Other, not imported. ""IPython"", ""pandas_datareader"", ] deps.extend(list(VERSIONS)) result: dict[str, JSONSerializable] = {} for modname in deps: mod = import_optional_dependency(modname, errors=""ignore"") result[modname] = get_version(mod) if mod else None return result ``` ###Assistant : Returns dependency information as a JSON serializable dictionary. " 1555,"def execute(): company_list = frappe.get_all(""Company"", filters={""country"": ""Germany""}) for company in company_list: party_account_list = frappe.get_all( ""Party Account"", filters={""company"": company.name}, fields=[""name"", ""account"", ""debtor_creditor_number""], ) for party_account in party_account_list: if (not party_account.account) or party_account.debtor_creditor_number: # account empty or debtor_creditor_number already filled continue account_number = frappe.db.get_value(""Account"", party_account.account, ""account_number"") if not account_number: continue frappe.db.set_value( ""Party Account"", party_account.name, ""debtor_creditor_number"", account_number ) frappe.db.set_value(""Party Account"", party_account.name, ""account"", """") ","Move account number into the new custom field debtor_creditor_number. German companies used to use a dedicated payable/receivable account for every party to mimick party accounts in the external accounting software ""DATEV"". This is no longer necessary. The reference ID for DATEV will be stored in a new custom field ""debtor_creditor_number"". ",50,60,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def execute(): company_list = frappe.get_all(""Company"", filters={""country"": ""Germany""}) for company in company_list: party_account_list = frappe.get_all( ""Party Account"", filters={""company"": company.name}, fields=[""name"", ""account"", ""debtor_creditor_number""], ) for party_account in party_account_list: if (not party_account.account) or party_account.debtor_creditor_number: # account empty or debtor_creditor_number already filled continue account_number = frappe.db.get_value(""Account"", party_account.account, ""account_number"") if not account_number: continue frappe.db.set_value( ""Party Account"", party_account.name, ""debtor_creditor_number"", account_number ) frappe.db.set_value(""Party Account"", party_account.name, ""account"", """") ``` ###Assistant : Move account number into the new custom field debtor_creditor_number. German companies used to use a dedicated payable/receivable account for every party to mimick party accounts in the external accounting software ""DATEV"". This is no longer necessary. The reference ID for DATEV will be stored in a new custom field ""debtor_creditor_number"". " 1556,"def compute_or_load(self, wav_file): pitch_file = self.create_pitch_file_path(wav_file, self.cache_path) if not os.path.exists(pitch_file): pitch = self._compute_and_save_pitch(self.ap, wav_file, pitch_file) else: pitch = np.load(pitch_file) return pitch.astype(np.float32) "," compute pitch and return a numpy array of pitch values ",10,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def compute_or_load(self, wav_file): pitch_file = self.create_pitch_file_path(wav_file, self.cache_path) if not os.path.exists(pitch_file): pitch = self._compute_and_save_pitch(self.ap, wav_file, pitch_file) else: pitch = np.load(pitch_file) return pitch.astype(np.float32) ``` ###Assistant : compute pitch and return a numpy array of pitch values " 1557,"def check_output(self, want, got, optionflags): # Handle the common case first, for efficiency: # if they're string-identical, always return true. if got == want: return True # TODO parse integers as well ? # Parse floats and compare them. If some of the parsed floats contain # ellipses, skip the comparison. matches = self.num_got_rgx.finditer(got) numbers_got = [match.group(1) for match in matches] # list of strs matches = self.num_want_rgx.finditer(want) numbers_want = [match.group(1) for match in matches] # list of strs if len(numbers_got) != len(numbers_want): return False if len(numbers_got) > 0: nw_ = [] for ng, nw in zip(numbers_got, numbers_want): if '...' in nw: nw_.append(ng) continue else: nw_.append(nw) if abs(float(ng)-float(nw)) > 1e-5: return False got = self.num_got_rgx.sub(r'%s', got) got = got % tuple(nw_) # can be used as a special sequence to signify a # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE): # Replace in want with a blank line. want = re.sub(r'(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER), '', want) # If a line in got contains only spaces, then remove the # spaces. got = re.sub(r'(?m)^\s*?$', '', got) if got == want: return True # This flag causes doctest to ignore any differences in the # contents of whitespace strings. Note that this can be used # in conjunction with the ELLIPSIS flag. if optionflags & pdoctest.NORMALIZE_WHITESPACE: got = ' '.join(got.split()) want = ' '.join(want.split()) if got == want: return True # The ELLIPSIS flag says to let the sequence ""..."" in `want` # match any substring in `got`. if optionflags & pdoctest.ELLIPSIS: if pdoctest._ellipsis_match(want, got): return True # We didn't find any match; return false. return False "," Return True iff the actual output from an example (`got`) matches the expected output (`want`). These strings are always considered to match if they are identical; but depending on what option flags the test runner is using, several non-exact match types are also possible. See the documentation for `TestRunner` for more information about option flags. ",55,272,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_output(self, want, got, optionflags): # Handle the common case first, for efficiency: # if they're string-identical, always return true. if got == want: return True # TODO parse integers as well ? # Parse floats and compare them. If some of the parsed floats contain # ellipses, skip the comparison. matches = self.num_got_rgx.finditer(got) numbers_got = [match.group(1) for match in matches] # list of strs matches = self.num_want_rgx.finditer(want) numbers_want = [match.group(1) for match in matches] # list of strs if len(numbers_got) != len(numbers_want): return False if len(numbers_got) > 0: nw_ = [] for ng, nw in zip(numbers_got, numbers_want): if '...' in nw: nw_.append(ng) continue else: nw_.append(nw) if abs(float(ng)-float(nw)) > 1e-5: return False got = self.num_got_rgx.sub(r'%s', got) got = got % tuple(nw_) # can be used as a special sequence to signify a # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE): # Replace in want with a blank line. want = re.sub(r'(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER), '', want) # If a line in got contains only spaces, then remove the # spaces. got = re.sub(r'(?m)^\s*?$', '', got) if got == want: return True # This flag causes doctest to ignore any differences in the # contents of whitespace strings. Note that this can be used # in conjunction with the ELLIPSIS flag. if optionflags & pdoctest.NORMALIZE_WHITESPACE: got = ' '.join(got.split()) want = ' '.join(want.split()) if got == want: return True # The ELLIPSIS flag says to let the sequence ""..."" in `want` # match any substring in `got`. if optionflags & pdoctest.ELLIPSIS: if pdoctest._ellipsis_match(want, got): return True # We didn't find any match; return false. return False ``` ###Assistant : Return True iff the actual output from an example (`got`) matches the expected output (`want`). These strings are always considered to match if they are identical; but depending on what option flags the test runner is using, several non-exact match types are also possible. See the documentation for `TestRunner` for more information about option flags. " 1558,"def start(self, workflow_state, user=None): task_state = self.get_task_state_class()(workflow_state=workflow_state) task_state.status = TaskState.STATUS_IN_PROGRESS task_state.page_revision = workflow_state.page.get_latest_revision() task_state.task = self task_state.save() task_submitted.send( sender=task_state.specific.__class__, instance=task_state.specific, user=user, ) return task_state ",Start this task on the provided workflow state by creating an instance of TaskState,14,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def start(self, workflow_state, user=None): task_state = self.get_task_state_class()(workflow_state=workflow_state) task_state.status = TaskState.STATUS_IN_PROGRESS task_state.page_revision = workflow_state.page.get_latest_revision() task_state.task = self task_state.save() task_submitted.send( sender=task_state.specific.__class__, instance=task_state.specific, user=user, ) return task_state ``` ###Assistant : Start this task on the provided workflow state by creating an instance of TaskState " 1559,"def get_datev_csv(data, filters, csv_class): empty_df = pd.DataFrame(columns=csv_class.COLUMNS) data_df = pd.DataFrame.from_records(data) result = empty_df.append(data_df, sort=True) if csv_class.DATA_CATEGORY == DataCategory.TRANSACTIONS: result[""Belegdatum""] = pd.to_datetime(result[""Belegdatum""]) result[""Beleginfo - Inhalt 6""] = pd.to_datetime(result[""Beleginfo - Inhalt 6""]) result[""Beleginfo - Inhalt 6""] = result[""Beleginfo - Inhalt 6""].dt.strftime(""%d%m%Y"") result[""Fälligkeit""] = pd.to_datetime(result[""Fälligkeit""]) result[""Fälligkeit""] = result[""Fälligkeit""].dt.strftime(""%d%m%y"") result.sort_values(by=""Belegdatum"", inplace=True, kind=""stable"", ignore_index=True) if csv_class.DATA_CATEGORY == DataCategory.ACCOUNT_NAMES: result[""Sprach-ID""] = ""de-DE"" data = result.to_csv( # Reason for str(';'): https://github.com/pandas-dev/pandas/issues/6035 sep="";"", # European decimal seperator decimal="","", # Windows ""ANSI"" encoding encoding=""latin_1"", # format date as DDMM date_format=""%d%m"", # Windows line terminator line_terminator=""\r\n"", # Do not number rows index=False, # Use all columns defined above columns=csv_class.COLUMNS, # Quote most fields, even currency values with "","" separator quoting=QUOTE_NONNUMERIC, ) data = data.encode(""latin_1"", errors=""replace"") header = get_header(filters, csv_class) header = "";"".join(header).encode(""latin_1"", errors=""replace"") # 1st Row: Header with meta data # 2nd Row: Data heading (Überschrift der Nutzdaten), included in `data` here. # 3rd - nth Row: Data (Nutzdaten) return header + b""\r\n"" + data "," Fill in missing columns and return a CSV in DATEV Format. For automatic processing, DATEV requires the first line of the CSV file to hold meta data such as the length of account numbers oder the category of the data. Arguments: data -- array of dictionaries filters -- dict csv_class -- defines DATA_CATEGORY, FORMAT_NAME and COLUMNS ",56,155,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_datev_csv(data, filters, csv_class): empty_df = pd.DataFrame(columns=csv_class.COLUMNS) data_df = pd.DataFrame.from_records(data) result = empty_df.append(data_df, sort=True) if csv_class.DATA_CATEGORY == DataCategory.TRANSACTIONS: result[""Belegdatum""] = pd.to_datetime(result[""Belegdatum""]) result[""Beleginfo - Inhalt 6""] = pd.to_datetime(result[""Beleginfo - Inhalt 6""]) result[""Beleginfo - Inhalt 6""] = result[""Beleginfo - Inhalt 6""].dt.strftime(""%d%m%Y"") result[""Fälligkeit""] = pd.to_datetime(result[""Fälligkeit""]) result[""Fälligkeit""] = result[""Fälligkeit""].dt.strftime(""%d%m%y"") result.sort_values(by=""Belegdatum"", inplace=True, kind=""stable"", ignore_index=True) if csv_class.DATA_CATEGORY == DataCategory.ACCOUNT_NAMES: result[""Sprach-ID""] = ""de-DE"" data = result.to_csv( # Reason for str(';'): https://github.com/pandas-dev/pandas/issues/6035 sep="";"", # European decimal seperator decimal="","", # Windows ""ANSI"" encoding encoding=""latin_1"", # format date as DDMM date_format=""%d%m"", # Windows line terminator line_terminator=""\r\n"", # Do not number rows index=False, # Use all columns defined above columns=csv_class.COLUMNS, # Quote most fields, even currency values with "","" separator quoting=QUOTE_NONNUMERIC, ) data = data.encode(""latin_1"", errors=""replace"") header = get_header(filters, csv_class) header = "";"".join(header).encode(""latin_1"", errors=""replace"") # 1st Row: Header with meta data # 2nd Row: Data heading (Überschrift der Nutzdaten), included in `data` here. # 3rd - nth Row: Data (Nutzdaten) return header + b""\r\n"" + data ``` ###Assistant : Fill in missing columns and return a CSV in DATEV Format. For automatic processing, DATEV requires the first line of the CSV file to hold meta data such as the length of account numbers oder the category of the data. Arguments: data -- array of dictionaries filters -- dict csv_class -- defines DATA_CATEGORY, FORMAT_NAME and COLUMNS " 1560,"def CircularSymplecticEnsemble(sym, dim): sym, dim = _symbol_converter(sym), _sympify(dim) model = CircularSymplecticEnsembleModel(sym, dim) rmp = RandomMatrixPSpace(sym, model=model) return RandomMatrixSymbol(sym, dim, dim, pspace=rmp) "," Represents Circular Symplectic Ensembles. Examples ======== >>> from sympy.stats import CircularSymplecticEnsemble as CSE >>> from sympy.stats import joint_eigen_distribution >>> C = CSE('S', 1) >>> joint_eigen_distribution(C) Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k]))**4, (_j, _k + 1, 1), (_k, 1, 0))/(2*pi)) Note ==== As can be seen above in the example, density of CiruclarSymplecticEnsemble is not evaluated because the exact definition is based on haar measure of unitary group which is not unique. ",69,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def CircularSymplecticEnsemble(sym, dim): sym, dim = _symbol_converter(sym), _sympify(dim) model = CircularSymplecticEnsembleModel(sym, dim) rmp = RandomMatrixPSpace(sym, model=model) return RandomMatrixSymbol(sym, dim, dim, pspace=rmp) ``` ###Assistant : Represents Circular Symplectic Ensembles. Examples ======== >>> from sympy.stats import CircularSymplecticEnsemble as CSE >>> from sympy.stats import joint_eigen_distribution >>> C = CSE('S', 1) >>> joint_eigen_distribution(C) Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k]))**4, (_j, _k + 1, 1), (_k, 1, 0))/(2*pi)) Note ==== As can be seen above in the example, density of CiruclarSymplecticEnsemble is not evaluated because the exact definition is based on haar measure of unitary group which is not unique. " 1561,"def _executor_config_comparator(x, y): try: return x == y except AttributeError: return False "," The TaskInstance.executor_config attribute is a pickled object that may contain kubernetes objects. If the installed library version has changed since the object was originally pickled, due to the underlying ``__eq__`` method on these objects (which converts them to JSON), we may encounter attribute errors. In this case we should replace the stored object. ",53,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _executor_config_comparator(x, y): try: return x == y except AttributeError: return False ``` ###Assistant : The TaskInstance.executor_config attribute is a pickled object that may contain kubernetes objects. If the installed library version has changed since the object was originally pickled, due to the underlying ``__eq__`` method on these objects (which converts them to JSON), we may encounter attribute errors. In this case we should replace the stored object. " 1562,"def test_color_temperature_to_rgbww(): # Coldest color temperature -> only cold channel enabled assert color_util.color_temperature_to_rgbww(6535, 255, 2000, 6535) == ( 0, 0, 0, 255, 0, ) assert color_util.color_temperature_to_rgbww(6535, 128, 2000, 6535) == ( 0, 0, 0, 128, 0, ) # Warmest color temperature -> only cold channel enabled assert color_util.color_temperature_to_rgbww(2000, 255, 2000, 6535) == ( 0, 0, 0, 0, 255, ) assert color_util.color_temperature_to_rgbww(2000, 128, 2000, 6535) == ( 0, 0, 0, 0, 128, ) # Warmer than mid point color temperature -> More warm than cold channel enabled assert color_util.color_temperature_to_rgbww(2881, 255, 2000, 6535) == ( 0, 0, 0, 112, 143, ) assert color_util.color_temperature_to_rgbww(2881, 128, 2000, 6535) == ( 0, 0, 0, 56, 72, ) ","Test color temp to warm, cold conversion. Temperature values must be in mireds Home Assistant uses rgbcw for rgbww ",19,112,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_color_temperature_to_rgbww(): # Coldest color temperature -> only cold channel enabled assert color_util.color_temperature_to_rgbww(6535, 255, 2000, 6535) == ( 0, 0, 0, 255, 0, ) assert color_util.color_temperature_to_rgbww(6535, 128, 2000, 6535) == ( 0, 0, 0, 128, 0, ) # Warmest color temperature -> only cold channel enabled assert color_util.color_temperature_to_rgbww(2000, 255, 2000, 6535) == ( 0, 0, 0, 0, 255, ) assert color_util.color_temperature_to_rgbww(2000, 128, 2000, 6535) == ( 0, 0, 0, 0, 128, ) # Warmer than mid point color temperature -> More warm than cold channel enabled assert color_util.color_temperature_to_rgbww(2881, 255, 2000, 6535) == ( 0, 0, 0, 112, 143, ) assert color_util.color_temperature_to_rgbww(2881, 128, 2000, 6535) == ( 0, 0, 0, 56, 72, ) ``` ###Assistant : Test color temp to warm, cold conversion. Temperature values must be in mireds Home Assistant uses rgbcw for rgbww " 1563,"def test_set_page_config_first(self): fake_enqueue = lambda msg: None ctx = ScriptRunContext( ""TestSessionID"", fake_enqueue, """", SessionState(), UploadedFileManager(), ) ctx.on_script_start() markdown_msg = ForwardMsg() markdown_msg.delta.new_element.markdown.body = ""foo"" msg = ForwardMsg() msg.page_config_changed.title = ""foo"" ctx.enqueue(markdown_msg) with self.assertRaises(StreamlitAPIException): ctx.enqueue(msg) ","st.set_page_config must be called before other st commands when the script has been marked as started",16,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_set_page_config_first(self): fake_enqueue = lambda msg: None ctx = ScriptRunContext( ""TestSessionID"", fake_enqueue, """", SessionState(), UploadedFileManager(), ) ctx.on_script_start() markdown_msg = ForwardMsg() markdown_msg.delta.new_element.markdown.body = ""foo"" msg = ForwardMsg() msg.page_config_changed.title = ""foo"" ctx.enqueue(markdown_msg) with self.assertRaises(StreamlitAPIException): ctx.enqueue(msg) ``` ###Assistant : st.set_page_config must be called before other st commands when the script has been marked as started " 1564,"def set_vars(self) -> None: tk_vars = super().set_vars() smoothgraph = tk.DoubleVar() smoothgraph.set(0.900) tk_vars[""smoothgraph""] = smoothgraph raw_var = tk.BooleanVar() raw_var.set(True) tk_vars[""raw_data""] = raw_var smooth_var = tk.BooleanVar() smooth_var.set(True) tk_vars[""smooth_data""] = smooth_var iterations_var = tk.IntVar() iterations_var.set(10000) tk_vars[""display_iterations""] = iterations_var logger.debug(tk_vars) return tk_vars "," Add graphing specific variables to the default variables. Overrides original method. Returns ------- dict The variable names with their corresponding tkinter variable ",22,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_vars(self) -> None: tk_vars = super().set_vars() smoothgraph = tk.DoubleVar() smoothgraph.set(0.900) tk_vars[""smoothgraph""] = smoothgraph raw_var = tk.BooleanVar() raw_var.set(True) tk_vars[""raw_data""] = raw_var smooth_var = tk.BooleanVar() smooth_var.set(True) tk_vars[""smooth_data""] = smooth_var iterations_var = tk.IntVar() iterations_var.set(10000) tk_vars[""display_iterations""] = iterations_var logger.debug(tk_vars) return tk_vars ``` ###Assistant : Add graphing specific variables to the default variables. Overrides original method. Returns ------- dict The variable names with their corresponding tkinter variable " 1565,"def _configure_kubernetes_library_client(self) -> None: # TODO: Investigate returning a configured client so calls on other threads # will not invalidate the config needed here # if a k8s cluster block is provided to the flow runner, use that if self.cluster_config: self.cluster_config.configure_client() else: # If no block specified, try to load Kubernetes configuration within a cluster. If that doesn't # work, try to load the configuration from the local environment, allowing # any further ConfigExceptions to bubble up. try: kubernetes.config.load_incluster_config() except kubernetes.config.ConfigException: kubernetes.config.load_kube_config() "," Set the correct kubernetes client configuration. WARNING: This action is not threadsafe and may override the configuration specified by another `KubernetesJob` instance. ",22,82,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _configure_kubernetes_library_client(self) -> None: # TODO: Investigate returning a configured client so calls on other threads # will not invalidate the config needed here # if a k8s cluster block is provided to the flow runner, use that if self.cluster_config: self.cluster_config.configure_client() else: # If no block specified, try to load Kubernetes configuration within a cluster. If that doesn't # work, try to load the configuration from the local environment, allowing # any further ConfigExceptions to bubble up. try: kubernetes.config.load_incluster_config() except kubernetes.config.ConfigException: kubernetes.config.load_kube_config() ``` ###Assistant : Set the correct kubernetes client configuration. WARNING: This action is not threadsafe and may override the configuration specified by another `KubernetesJob` instance. " 1566,"def test_runs_alert_rule_action_creator(self, mock_alert_rule_action_creator): self.login_as(user=self.user) project = self.create_project() self.create_sentry_app( name=""Pied Piper"", organization=project.organization, schema={""elements"": [self.create_alert_rule_action_schema()]}, ) install = self.create_sentry_app_installation( slug=""pied-piper"", organization=project.organization ) actions = [ { ""id"": ""sentry.rules.actions.notify_event_sentry_app.NotifyEventSentryAppAction"", ""settings"": [ {""name"": ""title"", ""value"": ""Team Rocket""}, {""name"": ""summary"", ""value"": ""We're blasting off again.""}, ], ""sentryAppInstallationUuid"": install.uuid, ""hasSchemaFormConfig"": True, }, ] url = reverse( ""sentry-api-0-project-rules"", kwargs={""organization_slug"": project.organization.slug, ""project_slug"": project.slug}, ) response = self.client.post( url, data={ ""name"": ""my super cool rule"", ""owner"": f""user:{self.user.id}"", ""conditions"": [], ""filters"": [], ""actions"": actions, ""filterMatch"": ""any"", ""actionMatch"": ""any"", ""frequency"": 30, }, format=""json"", ) assert response.status_code == 200, response.content assert response.data[""id""] rule = Rule.objects.get(id=response.data[""id""]) assert rule.data[""actions""] == actions kwargs = { ""install"": install, ""fields"": actions[0].get(""settings""), } call_kwargs = mock_alert_rule_action_creator.call_args[1] assert call_kwargs[""install""].id == kwargs[""install""].id assert call_kwargs[""fields""] == kwargs[""fields""] "," Ensures that Sentry Apps with schema forms (UI components) receive a payload when an alert rule is created with them. ",20,116,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_runs_alert_rule_action_creator(self, mock_alert_rule_action_creator): self.login_as(user=self.user) project = self.create_project() self.create_sentry_app( name=""Pied Piper"", organization=project.organization, schema={""elements"": [self.create_alert_rule_action_schema()]}, ) install = self.create_sentry_app_installation( slug=""pied-piper"", organization=project.organization ) actions = [ { ""id"": ""sentry.rules.actions.notify_event_sentry_app.NotifyEventSentryAppAction"", ""settings"": [ {""name"": ""title"", ""value"": ""Team Rocket""}, {""name"": ""summary"", ""value"": ""We're blasting off again.""}, ], ""sentryAppInstallationUuid"": install.uuid, ""hasSchemaFormConfig"": True, }, ] url = reverse( ""sentry-api-0-project-rules"", kwargs={""organization_slug"": project.organization.slug, ""project_slug"": project.slug}, ) response = self.client.post( url, data={ ""name"": ""my super cool rule"", ""owner"": f""user:{self.user.id}"", ""conditions"": [], ""filters"": [], ""actions"": actions, ""filterMatch"": ""any"", ""actionMatch"": ""any"", ""frequency"": 30, }, format=""json"", ) assert response.status_code == 200, response.content assert response.data[""id""] rule = Rule.objects.get(id=response.data[""id""]) assert rule.data[""actions""] == actions kwargs = { ""install"": install, ""fields"": actions[0].get(""settings""), } call_kwargs = mock_alert_rule_action_creator.call_args[1] assert call_kwargs[""install""].id == kwargs[""install""].id assert call_kwargs[""fields""] == kwargs[""fields""] ``` ###Assistant : Ensures that Sentry Apps with schema forms (UI components) receive a payload when an alert rule is created with them. " 1567,"def _determine_interval(self) -> int: intervals = {""default"": self._max_interval} for device in self._devices.values(): # Max interval if no location if device.location is None: continue current_zone = run_callback_threadsafe( self.hass.loop, async_active_zone, self.hass, device.location[DEVICE_LOCATION_LATITUDE], device.location[DEVICE_LOCATION_LONGITUDE], device.location[DEVICE_LOCATION_HORIZONTAL_ACCURACY], ).result() # Max interval if in zone if current_zone is not None: continue zones = ( self.hass.states.get(entity_id) for entity_id in sorted(self.hass.states.entity_ids(""zone"")) ) distances = [] for zone_state in zones: if zone_state is None: continue zone_state_lat = zone_state.attributes[DEVICE_LOCATION_LATITUDE] zone_state_long = zone_state.attributes[DEVICE_LOCATION_LONGITUDE] zone_distance = distance( device.location[DEVICE_LOCATION_LATITUDE], device.location[DEVICE_LOCATION_LONGITUDE], zone_state_lat, zone_state_long, ) if zone_distance is not None: distances.append(round(zone_distance / 1000, 1)) # Max interval if no zone if not distances: continue mindistance = min(distances) # Calculate out how long it would take for the device to drive # to the nearest zone at 120 km/h: interval = round(mindistance / 2) # Never poll more than once per minute interval = max(interval, 1) if interval > 180: # Three hour drive? # This is far enough that they might be flying interval = self._max_interval if ( device.battery_level is not None and device.battery_level <= 33 and mindistance > 3 ): # Low battery - let's check half as often interval = interval * 2 intervals[device.name] = interval return max( int(min(intervals.items(), key=operator.itemgetter(1))[1]), self._max_interval, ) ",Calculate new interval between two API fetch (in minutes).,9,199,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _determine_interval(self) -> int: intervals = {""default"": self._max_interval} for device in self._devices.values(): # Max interval if no location if device.location is None: continue current_zone = run_callback_threadsafe( self.hass.loop, async_active_zone, self.hass, device.location[DEVICE_LOCATION_LATITUDE], device.location[DEVICE_LOCATION_LONGITUDE], device.location[DEVICE_LOCATION_HORIZONTAL_ACCURACY], ).result() # Max interval if in zone if current_zone is not None: continue zones = ( self.hass.states.get(entity_id) for entity_id in sorted(self.hass.states.entity_ids(""zone"")) ) distances = [] for zone_state in zones: if zone_state is None: continue zone_state_lat = zone_state.attributes[DEVICE_LOCATION_LATITUDE] zone_state_long = zone_state.attributes[DEVICE_LOCATION_LONGITUDE] zone_distance = distance( device.location[DEVICE_LOCATION_LATITUDE], device.location[DEVICE_LOCATION_LONGITUDE], zone_state_lat, zone_state_long, ) if zone_distance is not None: distances.append(round(zone_distance / 1000, 1)) # Max interval if no zone if not distances: continue mindistance = min(distances) # Calculate out how long it would take for the device to drive # to the nearest zone at 120 km/h: interval = round(mindistance / 2) # Never poll more than once per minute interval = max(interval, 1) if interval > 180: # Three hour drive? # This is far enough that they might be flying interval = self._max_interval if ( device.battery_level is not None and device.battery_level <= 33 and mindistance > 3 ): # Low battery - let's check half as often interval = interval * 2 intervals[device.name] = interval return max( int(min(intervals.items(), key=operator.itemgetter(1))[1]), self._max_interval, ) ``` ###Assistant : Calculate new interval between two API fetch (in minutes). " 1568,"def prepare_cookies(self, cookies): if isinstance(cookies, cookielib.CookieJar): self._cookies = cookies else: self._cookies = cookiejar_from_dict(cookies) cookie_header = get_cookie_header(self._cookies, self) if cookie_header is not None: self.headers[""Cookie""] = cookie_header ","Prepares the given HTTP cookie data. This function eventually generates a ``Cookie`` header from the given cookies using cookielib. Due to cookielib's design, the header will not be regenerated if it already exists, meaning this function can only be called once for the life of the :class:`PreparedRequest ` object. Any subsequent calls to ``prepare_cookies`` will have no actual effect, unless the ""Cookie"" header is removed beforehand. ",66,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def prepare_cookies(self, cookies): if isinstance(cookies, cookielib.CookieJar): self._cookies = cookies else: self._cookies = cookiejar_from_dict(cookies) cookie_header = get_cookie_header(self._cookies, self) if cookie_header is not None: self.headers[""Cookie""] = cookie_header ``` ###Assistant : Prepares the given HTTP cookie data. This function eventually generates a ``Cookie`` header from the given cookies using cookielib. Due to cookielib's design, the header will not be regenerated if it already exists, meaning this function can only be called once for the life of the :class:`PreparedRequest ` object. Any subsequent calls to ``prepare_cookies`` will have no actual effect, unless the ""Cookie"" header is removed beforehand. " 1569,"def wait_for_contains_text(self, selector, text, timeout=None): return self._wait_for( method=contains_text, args=(selector, text), timeout=timeout, msg=f""text -> {text} not found inside element within {timeout or self._wait_timeout}s"", ) ","Explicit wait until the element's text contains the expected `text`. timeout if not set, equals to the fixture's `wait_timeout` shortcut to `WebDriverWait` with customized `contains_text` condition. ",26,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def wait_for_contains_text(self, selector, text, timeout=None): return self._wait_for( method=contains_text, args=(selector, text), timeout=timeout, msg=f""text -> {text} not found inside element within {timeout or self._wait_timeout}s"", ) ``` ###Assistant : Explicit wait until the element's text contains the expected `text`. timeout if not set, equals to the fixture's `wait_timeout` shortcut to `WebDriverWait` with customized `contains_text` condition. " 1570,"def remove_member(self, label): if label not in list(self._members): raise ValueError(""No such member exists in the Truss"") else: self._nodes_occupied.pop(tuple([self._members[label][0], self._members[label][1]])) self._nodes_occupied.pop(tuple([self._members[label][1], self._members[label][0]])) self._members.pop(label) self._internal_forces.pop(label) "," This method removes a member from the given truss. Parameters ========== label: String or Symbol The label for the member to be removed. Examples ======== >>> from sympy.physics.continuum_mechanics.truss import Truss >>> t = Truss() >>> t.add_node('A', 0, 0) >>> t.add_node('B', 3, 0) >>> t.add_node('C', 2, 2) >>> t.add_member('AB', 'A', 'B') >>> t.add_member('AC', 'A', 'C') >>> t.add_member('BC', 'B', 'C') >>> t.members {'AB': ['A', 'B'], 'AC': ['A', 'C'], 'BC': ['B', 'C']} >>> t.remove_member('AC') >>> t.members {'AB': ['A', 'B'], 'BC': ['B', 'C']} ",79,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def remove_member(self, label): if label not in list(self._members): raise ValueError(""No such member exists in the Truss"") else: self._nodes_occupied.pop(tuple([self._members[label][0], self._members[label][1]])) self._nodes_occupied.pop(tuple([self._members[label][1], self._members[label][0]])) self._members.pop(label) self._internal_forces.pop(label) ``` ###Assistant : This method removes a member from the given truss. Parameters ========== label: String or Symbol The label for the member to be removed. Examples ======== >>> from sympy.physics.continuum_mechanics.truss import Truss >>> t = Truss() >>> t.add_node('A', 0, 0) >>> t.add_node('B', 3, 0) >>> t.add_node('C', 2, 2) >>> t.add_member('AB', 'A', 'B') >>> t.add_member('AC', 'A', 'C') >>> t.add_member('BC', 'B', 'C') >>> t.members {'AB': ['A', 'B'], 'AC': ['A', 'C'], 'BC': ['B', 'C']} >>> t.remove_member('AC') >>> t.members {'AB': ['A', 'B'], 'BC': ['B', 'C']} " 1571,"def _create_mock_app_session(*args, **kwargs): mock_id = mock.PropertyMock( return_value=""mock_id:%s"" % ServerTestCase._next_session_id ) ServerTestCase._next_session_id += 1 mock_session = mock.MagicMock(AppSession, autospec=True, *args, **kwargs) type(mock_session).id = mock_id return mock_session ","Create a mock AppSession. Each mocked instance will have its own unique ID.",13,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_mock_app_session(*args, **kwargs): mock_id = mock.PropertyMock( return_value=""mock_id:%s"" % ServerTestCase._next_session_id ) ServerTestCase._next_session_id += 1 mock_session = mock.MagicMock(AppSession, autospec=True, *args, **kwargs) type(mock_session).id = mock_id return mock_session ``` ###Assistant : Create a mock AppSession. Each mocked instance will have its own unique ID. " 1572,"def taggedsents_to_conll(sentences): for sentence in sentences: yield from taggedsent_to_conll(sentence) yield ""\n\n"" ###################################################################### # { Test Suites ###################################################################### "," A module to convert the a POS tagged document stream (i.e. list of list of tuples, a list of sentences) and yield lines in CONLL format. This module yields one line per word and two newlines for end of sentence. >>> from nltk import word_tokenize, sent_tokenize, pos_tag >>> text = ""This is a foobar sentence. Is that right?"" >>> sentences = [pos_tag(word_tokenize(sent)) for sent in sent_tokenize(text)] >>> for line in taggedsents_to_conll(sentences): # doctest: +NORMALIZE_WHITESPACE ... if line: ... print(line, end="""") 1 This _ DT DT _ 0 a _ _ 2 is _ VBZ VBZ _ 0 a _ _ 3 a _ DT DT _ 0 a _ _ 4 foobar _ JJ JJ _ 0 a _ _ 5 sentence _ NN NN _ 0 a _ _ 6 . _ . . _ 0 a _ _ 1 Is _ VBZ VBZ _ 0 a _ _ 2 that _ IN IN _ 0 a _ _ 3 right _ NN NN _ 0 a _ _ 4 ? _ . . _ 0 a _ _ :param sentences: Input sentences to parse :type sentence: list(list(tuple(str, str))) :rtype: iter(str) :return: a generator yielding sentences in CONLL format. ",204,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def taggedsents_to_conll(sentences): for sentence in sentences: yield from taggedsent_to_conll(sentence) yield ""\n\n"" ###################################################################### # { Test Suites ###################################################################### ``` ###Assistant : A module to convert the a POS tagged document stream (i.e. list of list of tuples, a list of sentences) and yield lines in CONLL format. This module yields one line per word and two newlines for end of sentence. >>> from nltk import word_tokenize, sent_tokenize, pos_tag >>> text = ""This is a foobar sentence. Is that right?"" >>> sentences = [pos_tag(word_tokenize(sent)) for sent in sent_tokenize(text)] >>> for line in taggedsents_to_conll(sentences): # doctest: +NORMALIZE_WHITESPACE ... if line: ... print(line, end="""") 1 This _ DT DT _ 0 a _ _ 2 is _ VBZ VBZ _ 0 a _ _ 3 a _ DT DT _ 0 a _ _ 4 foobar _ JJ JJ _ 0 a _ _ 5 sentence _ NN NN _ 0 a _ _ 6 . _ . . _ 0 a _ _ 1 Is _ VBZ VBZ _ 0 a _ _ 2 that _ IN IN _ 0 a _ _ 3 right _ NN NN _ 0 a _ _ 4 ? _ . . _ 0 a _ _ :param sentences: Input sentences to parse :type sentence: list(list(tuple(str, str))) :rtype: iter(str) :return: a generator yielding sentences in CONLL format. " 1573,"def _get_input_shape(self): arch = self.config[""enc_architecture""] enforce_size = _MODEL_MAPPING[arch].get(""enforce_for_weights"", False) default_size = _MODEL_MAPPING[arch][""default_size""] scaling = self.config[""enc_scaling""] / 100 min_size = _MODEL_MAPPING[arch].get(""min_size"", 32) size = int(max(min_size, min(default_size, ((default_size * scaling) // 16) * 16))) if self.config[""enc_load_weights""] and enforce_size and scaling != 1.0: logger.warning(""%s requires input size to be %spx when loading imagenet weights. "" ""Adjusting input size from %spx to %spx"", arch, default_size, size, default_size) retval = (default_size, default_size, 3) else: retval = (size, size, 3) logger.debug(""Encoder input set to: %s"", retval) return retval "," Obtain the input shape for the model. Input shape is calculated from the selected Encoder's input size, scaled to the user selected Input Scaling, rounded down to the nearest 16 pixels. Notes ----- Some models (NasNet) require the input size to be of a certain dimension if loading imagenet weights. In these instances resize inputs and raise warning message Returns ------- tuple The shape tuple for the input size to the Phaze-A model ",73,82,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_input_shape(self): arch = self.config[""enc_architecture""] enforce_size = _MODEL_MAPPING[arch].get(""enforce_for_weights"", False) default_size = _MODEL_MAPPING[arch][""default_size""] scaling = self.config[""enc_scaling""] / 100 min_size = _MODEL_MAPPING[arch].get(""min_size"", 32) size = int(max(min_size, min(default_size, ((default_size * scaling) // 16) * 16))) if self.config[""enc_load_weights""] and enforce_size and scaling != 1.0: logger.warning(""%s requires input size to be %spx when loading imagenet weights. "" ""Adjusting input size from %spx to %spx"", arch, default_size, size, default_size) retval = (default_size, default_size, 3) else: retval = (size, size, 3) logger.debug(""Encoder input set to: %s"", retval) return retval ``` ###Assistant : Obtain the input shape for the model. Input shape is calculated from the selected Encoder's input size, scaled to the user selected Input Scaling, rounded down to the nearest 16 pixels. Notes ----- Some models (NasNet) require the input size to be of a certain dimension if loading imagenet weights. In these instances resize inputs and raise warning message Returns ------- tuple The shape tuple for the input size to the Phaze-A model " 1574,"def get_form_options(self): options = {} if not getattr(self.widget_overrides, ""is_original_method"", False): warn( ""The `widget_overrides` method (on %r) is deprecated; "" ""these should be returned from `get_form_options` as a "" ""`widgets` item instead."" % type(self), category=RemovedInWagtail219Warning, ) options[""widgets""] = self.widget_overrides() if not getattr(self.required_fields, ""is_original_method"", False): warn( ""The `required_fields` method (on %r) is deprecated; "" ""these should be returned from `get_form_options` as a "" ""`fields` item instead."" % type(self), category=RemovedInWagtail219Warning, ) options[""fields""] = self.required_fields() if not getattr(self.required_formsets, ""is_original_method"", False): warn( ""The `required_formsets` method (on %r) is deprecated; "" ""these should be returned from `get_form_options` as a "" ""`formsets` item instead."" % type(self), category=RemovedInWagtail219Warning, ) options[""formsets""] = self.required_formsets() if not getattr(self.field_permissions, ""is_original_method"", False): warn( ""The `field_permissions` method (on %r) is deprecated; "" ""these should be returned from `get_form_options` as a "" ""`field_permissions` item instead."" % type(self), category=RemovedInWagtail219Warning, ) options[""field_permissions""] = self.field_permissions() return options # RemovedInWagtail219Warning - edit handlers should override get_form_options instead"," Return a dictionary of attributes such as 'fields', 'formsets' and 'widgets' which should be incorporated into the form class definition to generate a form that this EditHandler can use. This will only be called after binding to a model (i.e. self.model is available). ",43,148,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_form_options(self): options = {} if not getattr(self.widget_overrides, ""is_original_method"", False): warn( ""The `widget_overrides` method (on %r) is deprecated; "" ""these should be returned from `get_form_options` as a "" ""`widgets` item instead."" % type(self), category=RemovedInWagtail219Warning, ) options[""widgets""] = self.widget_overrides() if not getattr(self.required_fields, ""is_original_method"", False): warn( ""The `required_fields` method (on %r) is deprecated; "" ""these should be returned from `get_form_options` as a "" ""`fields` item instead."" % type(self), category=RemovedInWagtail219Warning, ) options[""fields""] = self.required_fields() if not getattr(self.required_formsets, ""is_original_method"", False): warn( ""The `required_formsets` method (on %r) is deprecated; "" ""these should be returned from `get_form_options` as a "" ""`formsets` item instead."" % type(self), category=RemovedInWagtail219Warning, ) options[""formsets""] = self.required_formsets() if not getattr(self.field_permissions, ""is_original_method"", False): warn( ""The `field_permissions` method (on %r) is deprecated; "" ""these should be returned from `get_form_options` as a "" ""`field_permissions` item instead."" % type(self), category=RemovedInWagtail219Warning, ) options[""field_permissions""] = self.field_permissions() return options # RemovedInWagtail219Warning - edit handlers should override get_form_options instead ``` ###Assistant : Return a dictionary of attributes such as 'fields', 'formsets' and 'widgets' which should be incorporated into the form class definition to generate a form that this EditHandler can use. This will only be called after binding to a model (i.e. self.model is available). " 1575,"def evaluate_links(self, link_evaluator, links): # type: (LinkEvaluator, Iterable[Link]) -> List[InstallationCandidate] candidates = [] for link in self._sort_links(links): candidate = self.get_install_candidate(link_evaluator, link) if candidate is not None: candidates.append(candidate) return candidates "," Convert links that are candidates to InstallationCandidate objects. ",8,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def evaluate_links(self, link_evaluator, links): # type: (LinkEvaluator, Iterable[Link]) -> List[InstallationCandidate] candidates = [] for link in self._sort_links(links): candidate = self.get_install_candidate(link_evaluator, link) if candidate is not None: candidates.append(candidate) return candidates ``` ###Assistant : Convert links that are candidates to InstallationCandidate objects. " 1576,"def columnarize(self): if len(self.columns) != 1 or ( len(self.index) == 1 and self.index[0] == MODIN_UNNAMED_SERIES_LABEL ): return self.transpose() return self "," Transpose this QueryCompiler if it has a single row but multiple columns. This method should be called for QueryCompilers representing a Series object, i.e. ``self.is_series_like()`` should be True. Returns ------- BaseQueryCompiler Transposed new QueryCompiler or self. ",36,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def columnarize(self): if len(self.columns) != 1 or ( len(self.index) == 1 and self.index[0] == MODIN_UNNAMED_SERIES_LABEL ): return self.transpose() return self ``` ###Assistant : Transpose this QueryCompiler if it has a single row but multiple columns. This method should be called for QueryCompilers representing a Series object, i.e. ``self.is_series_like()`` should be True. Returns ------- BaseQueryCompiler Transposed new QueryCompiler or self. " 1577,"def get_formatted_file_tags(self): # type: () -> List[str] return sorted(str(tag) for tag in self.file_tags) ",Return the wheel's tags as a sorted list of strings.,10,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_formatted_file_tags(self): # type: () -> List[str] return sorted(str(tag) for tag in self.file_tags) ``` ###Assistant : Return the wheel's tags as a sorted list of strings. " 1578,"def test_python_render(): syntax = Panel.fit( Syntax( CODE, lexer=""python"", line_numbers=True, line_range=(2, 10), theme=""monokai"", code_width=60, word_wrap=True, ), padding=0, ) rendered_syntax = render(syntax) print(repr(rendered_syntax)) expected = '╭─────────────────────────────────────────────────────────────────╮\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 2 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;230;219;116;48;2;39;40;34m\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 3 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalues\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 4 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mtry\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 5 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mnext\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 6 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mexcept\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;166;226;46;48;2;39;40;34mStopIteration\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 7 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mreturn\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 8 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mfirst\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mTrue\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 9 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mfor\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalue\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34min\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m10 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34myield\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mfirst\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mFalse\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n╰─────────────────────────────────────────────────────────────────╯\n' assert rendered_syntax == expected ",Iterate and generate a tuple with a flag for first \x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[48;2;39;40;34m \x1b[0m\x1b[38;2;230;219;116;48;2;39;40;34mand last value.,15,85,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_python_render(): syntax = Panel.fit( Syntax( CODE, lexer=""python"", line_numbers=True, line_range=(2, 10), theme=""monokai"", code_width=60, word_wrap=True, ), padding=0, ) rendered_syntax = render(syntax) print(repr(rendered_syntax)) expected = '╭─────────────────────────────────────────────────────────────────╮\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 2 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;230;219;116;48;2;39;40;34m\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 3 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalues\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 4 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mtry\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 5 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mnext\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 6 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mexcept\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;166;226;46;48;2;39;40;34mStopIteration\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 7 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mreturn\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 8 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mfirst\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mTrue\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 9 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mfor\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalue\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34min\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m10 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34myield\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mfirst\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mFalse\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n╰─────────────────────────────────────────────────────────────────╯\n' assert rendered_syntax == expected ``` ###Assistant : Iterate and generate a tuple with a flag for first \x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[48;2;39;40;34m \x1b[0m\x1b[38;2;230;219;116;48;2;39;40;34mand last value. " 1579,"def test_state_changes_during_period_multiple_entities_single_test(hass_recorder): hass = hass_recorder() start = dt_util.utcnow() test_entites = {f""sensor.{i}"": str(i) for i in range(30)} for entity_id, value in test_entites.items(): hass.states.set(entity_id, value) wait_recording_done(hass) end = dt_util.utcnow() hist = history.state_changes_during_period(hass, start, end, None) for entity_id, value in test_entites.items(): hist[entity_id][0].state == value for entity_id, value in test_entites.items(): hist = history.state_changes_during_period(hass, start, end, entity_id) assert len(hist) == 1 hist[entity_id][0].state == value hist = history.state_changes_during_period(hass, start, end, None) for entity_id, value in test_entites.items(): hist[entity_id][0].state == value ","Test state change during period with multiple entities in the same test. This test ensures the sqlalchemy query cache does not generate incorrect results. ",24,73,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_state_changes_during_period_multiple_entities_single_test(hass_recorder): hass = hass_recorder() start = dt_util.utcnow() test_entites = {f""sensor.{i}"": str(i) for i in range(30)} for entity_id, value in test_entites.items(): hass.states.set(entity_id, value) wait_recording_done(hass) end = dt_util.utcnow() hist = history.state_changes_during_period(hass, start, end, None) for entity_id, value in test_entites.items(): hist[entity_id][0].state == value for entity_id, value in test_entites.items(): hist = history.state_changes_during_period(hass, start, end, entity_id) assert len(hist) == 1 hist[entity_id][0].state == value hist = history.state_changes_during_period(hass, start, end, None) for entity_id, value in test_entites.items(): hist[entity_id][0].state == value ``` ###Assistant : Test state change during period with multiple entities in the same test. This test ensures the sqlalchemy query cache does not generate incorrect results. " 1580,"def O(self): # NOQA: E743, E741 if self._no_timezone_or_datetime_is_ambiguous_or_imaginary: return """" seconds = self.Z() sign = ""-"" if seconds < 0 else ""+"" seconds = abs(seconds) return ""%s%02d%02d"" % (sign, seconds // 3600, (seconds // 60) % 60) "," Difference to Greenwich time in hours; e.g. '+0200', '-0430'. If timezone information is not available, return an empty string. ",19,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def O(self): # NOQA: E743, E741 if self._no_timezone_or_datetime_is_ambiguous_or_imaginary: return """" seconds = self.Z() sign = ""-"" if seconds < 0 else ""+"" seconds = abs(seconds) return ""%s%02d%02d"" % (sign, seconds // 3600, (seconds // 60) % 60) ``` ###Assistant : Difference to Greenwich time in hours; e.g. '+0200', '-0430'. If timezone information is not available, return an empty string. " 1581,"def convert_mem_str_to_bytes(mem_str): # If there is no suffix, the memory sourced from the request is in bytes if mem_str.isdigit(): return int(mem_str) conversions = { 'Ei': lambda x: x * 2**60, 'E': lambda x: x * 10**18, 'Pi': lambda x: x * 2**50, 'P': lambda x: x * 10**15, 'Ti': lambda x: x * 2**40, 'T': lambda x: x * 10**12, 'Gi': lambda x: x * 2**30, 'G': lambda x: x * 10**9, 'Mi': lambda x: x * 2**20, 'M': lambda x: x * 10**6, 'Ki': lambda x: x * 2**10, 'K': lambda x: x * 10**3, } mem = 0 mem_unit = None for i, char in enumerate(mem_str): if not char.isdigit(): mem_unit = mem_str[i:] mem = int(mem_str[:i]) break if not mem_unit or mem_unit not in conversions.keys(): error = f""Unsupported value for SYSTEM_TASK_ABS_MEM: {mem_str}, memory must be expressed in bytes or with known suffix: {conversions.keys()}. Falling back to 1 byte"" logger.warning(error) return 1 return max(1, conversions[mem_unit](mem)) ","Convert string with suffix indicating units to memory in bytes (base 2) Useful for dealing with memory setting that may be expressed in units compatible with kubernetes. See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory ",29,155,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def convert_mem_str_to_bytes(mem_str): # If there is no suffix, the memory sourced from the request is in bytes if mem_str.isdigit(): return int(mem_str) conversions = { 'Ei': lambda x: x * 2**60, 'E': lambda x: x * 10**18, 'Pi': lambda x: x * 2**50, 'P': lambda x: x * 10**15, 'Ti': lambda x: x * 2**40, 'T': lambda x: x * 10**12, 'Gi': lambda x: x * 2**30, 'G': lambda x: x * 10**9, 'Mi': lambda x: x * 2**20, 'M': lambda x: x * 10**6, 'Ki': lambda x: x * 2**10, 'K': lambda x: x * 10**3, } mem = 0 mem_unit = None for i, char in enumerate(mem_str): if not char.isdigit(): mem_unit = mem_str[i:] mem = int(mem_str[:i]) break if not mem_unit or mem_unit not in conversions.keys(): error = f""Unsupported value for SYSTEM_TASK_ABS_MEM: {mem_str}, memory must be expressed in bytes or with known suffix: {conversions.keys()}. Falling back to 1 byte"" logger.warning(error) return 1 return max(1, conversions[mem_unit](mem)) ``` ###Assistant : Convert string with suffix indicating units to memory in bytes (base 2) Useful for dealing with memory setting that may be expressed in units compatible with kubernetes. See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory " 1582,"def sync_status_outbound(self, external_issue, is_resolved, project_id, **kwargs): client = self.get_client() jira_issue = client.get_issue(external_issue.key) jira_project = jira_issue[""fields""][""project""] try: external_project = IntegrationExternalProject.objects.get( external_id=jira_project[""id""], organization_integration_id__in=OrganizationIntegration.objects.filter( organization_id=external_issue.organization_id, integration_id=external_issue.integration_id, ), ) except IntegrationExternalProject.DoesNotExist: return jira_status = ( external_project.resolved_status if is_resolved else external_project.unresolved_status ) # don't bother updating if it's already the status we'd change it to if jira_issue[""fields""][""status""][""id""] == jira_status: return try: transitions = client.get_transitions(external_issue.key) except ApiHostError: raise IntegrationError(""Could not reach host to get transitions."") try: transition = [t for t in transitions if t.get(""to"", {}).get(""id"") == jira_status][0] except IndexError: # TODO(jess): Email for failure logger.warning( ""jira.status-sync-fail"", extra={ ""organization_id"": external_issue.organization_id, ""integration_id"": external_issue.integration_id, ""issue_key"": external_issue.key, }, ) return client.transition_issue(external_issue.key, transition[""id""]) "," Propagate a sentry issue's status to a linked issue's status. ",10,103,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def sync_status_outbound(self, external_issue, is_resolved, project_id, **kwargs): client = self.get_client() jira_issue = client.get_issue(external_issue.key) jira_project = jira_issue[""fields""][""project""] try: external_project = IntegrationExternalProject.objects.get( external_id=jira_project[""id""], organization_integration_id__in=OrganizationIntegration.objects.filter( organization_id=external_issue.organization_id, integration_id=external_issue.integration_id, ), ) except IntegrationExternalProject.DoesNotExist: return jira_status = ( external_project.resolved_status if is_resolved else external_project.unresolved_status ) # don't bother updating if it's already the status we'd change it to if jira_issue[""fields""][""status""][""id""] == jira_status: return try: transitions = client.get_transitions(external_issue.key) except ApiHostError: raise IntegrationError(""Could not reach host to get transitions."") try: transition = [t for t in transitions if t.get(""to"", {}).get(""id"") == jira_status][0] except IndexError: # TODO(jess): Email for failure logger.warning( ""jira.status-sync-fail"", extra={ ""organization_id"": external_issue.organization_id, ""integration_id"": external_issue.integration_id, ""issue_key"": external_issue.key, }, ) return client.transition_issue(external_issue.key, transition[""id""]) ``` ###Assistant : Propagate a sentry issue's status to a linked issue's status. " 1583,"def _sci(self, im): _api.check_isinstance( (mpl.contour.ContourSet, mcoll.Collection, mimage.AxesImage), im=im) if isinstance(im, mpl.contour.ContourSet): if im.collections[0] not in self._children: raise ValueError(""ContourSet must be in current Axes"") elif im not in self._children: raise ValueError(""Argument must be an image, collection, or "" ""ContourSet in this Axes"") self._current_image = im "," Set the current image. This image will be the target of colormap functions like ``pyplot.viridis``, and other functions such as `~.pyplot.clim`. The current image is an attribute of the current Axes. ",31,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _sci(self, im): _api.check_isinstance( (mpl.contour.ContourSet, mcoll.Collection, mimage.AxesImage), im=im) if isinstance(im, mpl.contour.ContourSet): if im.collections[0] not in self._children: raise ValueError(""ContourSet must be in current Axes"") elif im not in self._children: raise ValueError(""Argument must be an image, collection, or "" ""ContourSet in this Axes"") self._current_image = im ``` ###Assistant : Set the current image. This image will be the target of colormap functions like ``pyplot.viridis``, and other functions such as `~.pyplot.clim`. The current image is an attribute of the current Axes. " 1584,"def __dask_postpersist__(self) -> tuple[PostPersistCallable, tuple]: raise NotImplementedError(""Inheriting class must implement this method."") ","Rebuilder function and optional arguments to contruct a persisted collection. Returns ------- PostPersistCallable Callable that rebuilds the collection. The signature should be ``rebuild(dsk: Mapping, *args: Any, rename: Mapping[str, str] | None)``. The callable should return an equivalent Dask collection with the same keys as `self`, but with results that are computed through a different graph. In the case of :py:func:`dask.persist`, the new graph will have just the output keys and the values already computed. tuple[Any, ...] Optional arugments passed to the rebuild callable. If no additional arguments are to be passed then this must be an empty tuple. ",98,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __dask_postpersist__(self) -> tuple[PostPersistCallable, tuple]: raise NotImplementedError(""Inheriting class must implement this method."") ``` ###Assistant : Rebuilder function and optional arguments to contruct a persisted collection. Returns ------- PostPersistCallable Callable that rebuilds the collection. The signature should be ``rebuild(dsk: Mapping, *args: Any, rename: Mapping[str, str] | None)``. The callable should return an equivalent Dask collection with the same keys as `self`, but with results that are computed through a different graph. In the case of :py:func:`dask.persist`, the new graph will have just the output keys and the values already computed. tuple[Any, ...] Optional arugments passed to the rebuild callable. If no additional arguments are to be passed then this must be an empty tuple. " 1585,"def execute(): for doctype in (""Sales Order Item"", ""Bin""): frappe.reload_doctype(doctype) repost_for = frappe.db.sql() for item_code, warehouse in repost_for: if not (item_code and warehouse): continue update_bin_qty(item_code, warehouse, { ""reserved_qty"": get_reserved_qty(item_code, warehouse) }) frappe.db.sql() "," select distinct item_code, warehouse from ( ( select distinct item_code, warehouse from `tabSales Order Item` where docstatus=1 ) UNION ( select distinct item_code, warehouse from `tabPacked Item` where docstatus=1 and parenttype='Sales Order' ) ) so_item where exists(select name from tabItem where name=so_item.item_code and ifnull(is_stock_item, 0)=1) delete from tabBin where exists( select name from tabItem where name=tabBin.item_code and ifnull(is_stock_item, 0) = 0 ) ",62,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def execute(): for doctype in (""Sales Order Item"", ""Bin""): frappe.reload_doctype(doctype) repost_for = frappe.db.sql() for item_code, warehouse in repost_for: if not (item_code and warehouse): continue update_bin_qty(item_code, warehouse, { ""reserved_qty"": get_reserved_qty(item_code, warehouse) }) frappe.db.sql() ``` ###Assistant : select distinct item_code, warehouse from ( ( select distinct item_code, warehouse from `tabSales Order Item` where docstatus=1 ) UNION ( select distinct item_code, warehouse from `tabPacked Item` where docstatus=1 and parenttype='Sales Order' ) ) so_item where exists(select name from tabItem where name=so_item.item_code and ifnull(is_stock_item, 0)=1) delete from tabBin where exists( select name from tabItem where name=tabBin.item_code and ifnull(is_stock_item, 0) = 0 ) " 1586,"def rename_group_tables_reverse(apps, schema_editor): Group = apps.get_model(""auth"", ""Group"") schema_editor.alter_db_table( Group, ""account_group"", ""auth_group"", ) PermissionGroup = Group.permissions.through schema_editor.alter_db_table( PermissionGroup, ""account_group_permissions"", ""auth_group_permissions"", ) RENAME_CONSTRAINTS_AND_INDEX_REVERSE = DROP_OLD_CONSTRAINTS_REVERSE_FROM_0072 = DROP_OLD_CONSTRAINTS_REVERSE_FROM_APP_0018 = "," ALTER TABLE account_group RENAME CONSTRAINT account_group_pkey TO auth_group_pkey; ALTER TABLE account_group RENAME CONSTRAINT account_group_name_key TO auth_group_name_key; ALTER INDEX IF EXISTS account_group_name_034e9f3f_like RENAME TO auth_group_name_a6ea08ec_like; ALTER TABLE auth_group_permissions ADD CONSTRAINT auth_group_permissions_group_id_permission_id_0cd325b0_uniq UNIQUE (group_id, permission_id); ALTER TABLE auth_group_permissions ADD CONSTRAINT auth_group_permissions_group_id_b120cbf9_fk_auth_group_id FOREIGN KEY (group_id) REFERENCES auth_group (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE auth_group_permissions ADD CONSTRAINT auth_group_permissio_permission_id_84c5c92e_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE account_user_groups ADD CONSTRAINT userprofile_user_groups_group_id_c7eec74e_fk_auth_group_id FOREIGN KEY (group_id) REFERENCES auth_group (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE account_user_user_permissions ADD CONSTRAINT userprofile_user_use_permission_id_1caa8a71_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE app_app_permissions ADD CONSTRAINT account_serviceaccou_permission_id_449791f0_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE app_appextension_permissions ADD CONSTRAINT app_appextension_per_permission_id_cb6c3ce0_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE app_appinstallation_permissions ADD CONSTRAINT app_appinstallation__permission_id_4ee9f6c8_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED; ",138,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rename_group_tables_reverse(apps, schema_editor): Group = apps.get_model(""auth"", ""Group"") schema_editor.alter_db_table( Group, ""account_group"", ""auth_group"", ) PermissionGroup = Group.permissions.through schema_editor.alter_db_table( PermissionGroup, ""account_group_permissions"", ""auth_group_permissions"", ) RENAME_CONSTRAINTS_AND_INDEX_REVERSE = DROP_OLD_CONSTRAINTS_REVERSE_FROM_0072 = DROP_OLD_CONSTRAINTS_REVERSE_FROM_APP_0018 = ``` ###Assistant : ALTER TABLE account_group RENAME CONSTRAINT account_group_pkey TO auth_group_pkey; ALTER TABLE account_group RENAME CONSTRAINT account_group_name_key TO auth_group_name_key; ALTER INDEX IF EXISTS account_group_name_034e9f3f_like RENAME TO auth_group_name_a6ea08ec_like; ALTER TABLE auth_group_permissions ADD CONSTRAINT auth_group_permissions_group_id_permission_id_0cd325b0_uniq UNIQUE (group_id, permission_id); ALTER TABLE auth_group_permissions ADD CONSTRAINT auth_group_permissions_group_id_b120cbf9_fk_auth_group_id FOREIGN KEY (group_id) REFERENCES auth_group (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE auth_group_permissions ADD CONSTRAINT auth_group_permissio_permission_id_84c5c92e_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE account_user_groups ADD CONSTRAINT userprofile_user_groups_group_id_c7eec74e_fk_auth_group_id FOREIGN KEY (group_id) REFERENCES auth_group (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE account_user_user_permissions ADD CONSTRAINT userprofile_user_use_permission_id_1caa8a71_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE app_app_permissions ADD CONSTRAINT account_serviceaccou_permission_id_449791f0_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE app_appextension_permissions ADD CONSTRAINT app_appextension_per_permission_id_cb6c3ce0_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE app_appinstallation_permissions ADD CONSTRAINT app_appinstallation__permission_id_4ee9f6c8_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED; " 1587,"def test_session_is_accessed(self): response = self.client.get(""/auth_processor_attr_access/"") self.assertContains(response, ""Session accessed"") "," The session is accessed if the auth context processor is used and relevant attributes accessed. ",15,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_session_is_accessed(self): response = self.client.get(""/auth_processor_attr_access/"") self.assertContains(response, ""Session accessed"") ``` ###Assistant : The session is accessed if the auth context processor is used and relevant attributes accessed. " 1588,"def _find_vc2017(): root = os.environ.get(""ProgramFiles(x86)"") or os.environ.get(""ProgramFiles"") if not root: return None, None try: path = subprocess.check_output([ os.path.join(root, ""Microsoft Visual Studio"", ""Installer"", ""vswhere.exe""), ""-latest"", ""-prerelease"", ""-requires"", ""Microsoft.VisualStudio.Component.VC.Tools.x86.x64"", ""-property"", ""installationPath"", ""-products"", ""*"", ], encoding=""mbcs"", errors=""strict"").strip() except (subprocess.CalledProcessError, OSError, UnicodeDecodeError): return None, None path = os.path.join(path, ""VC"", ""Auxiliary"", ""Build"") if os.path.isdir(path): return 15, path return None, None PLAT_SPEC_TO_RUNTIME = { 'x86' : 'x86', 'x86_amd64' : 'x64', 'x86_arm' : 'arm', 'x86_arm64' : 'arm64' } ","Returns ""15, path"" based on the result of invoking vswhere.exe If no install is found, returns ""None, None"" The version is returned to avoid unnecessarily changing the function result. It may be ignored when the path is not None. If vswhere.exe is not available, by definition, VS 2017 is not installed. ",51,71,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _find_vc2017(): root = os.environ.get(""ProgramFiles(x86)"") or os.environ.get(""ProgramFiles"") if not root: return None, None try: path = subprocess.check_output([ os.path.join(root, ""Microsoft Visual Studio"", ""Installer"", ""vswhere.exe""), ""-latest"", ""-prerelease"", ""-requires"", ""Microsoft.VisualStudio.Component.VC.Tools.x86.x64"", ""-property"", ""installationPath"", ""-products"", ""*"", ], encoding=""mbcs"", errors=""strict"").strip() except (subprocess.CalledProcessError, OSError, UnicodeDecodeError): return None, None path = os.path.join(path, ""VC"", ""Auxiliary"", ""Build"") if os.path.isdir(path): return 15, path return None, None PLAT_SPEC_TO_RUNTIME = { 'x86' : 'x86', 'x86_amd64' : 'x64', 'x86_arm' : 'arm', 'x86_arm64' : 'arm64' } ``` ###Assistant : Returns ""15, path"" based on the result of invoking vswhere.exe If no install is found, returns ""None, None"" The version is returned to avoid unnecessarily changing the function result. It may be ignored when the path is not None. If vswhere.exe is not available, by definition, VS 2017 is not installed. " 1589,"def save_attributes_to_hdf5_group(group, name, data): # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( ""The following attributes cannot be saved to HDF5 file because they "" f""are larger than {HDF5_OBJECT_HEADER_LIMIT} bytes: {bad_attributes}"" ) data_npy = np.asarray(data) num_chunks = 1 chunked_data = np.array_split(data_npy, num_chunks) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data): num_chunks += 1 chunked_data = np.array_split(data_npy, num_chunks) if num_chunks > 1: for chunk_id, chunk_data in enumerate(chunked_data): group.attrs[""%s%d"" % (name, chunk_id)] = chunk_data else: group.attrs[name] = data ","Saves attributes (data) of the specified name into the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Args: group: A pointer to a HDF5 group. name: A name of the attributes to save. data: Attributes data to store. Raises: RuntimeError: If any single attribute is too large to be saved. ",65,127,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save_attributes_to_hdf5_group(group, name, data): # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( ""The following attributes cannot be saved to HDF5 file because they "" f""are larger than {HDF5_OBJECT_HEADER_LIMIT} bytes: {bad_attributes}"" ) data_npy = np.asarray(data) num_chunks = 1 chunked_data = np.array_split(data_npy, num_chunks) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data): num_chunks += 1 chunked_data = np.array_split(data_npy, num_chunks) if num_chunks > 1: for chunk_id, chunk_data in enumerate(chunked_data): group.attrs[""%s%d"" % (name, chunk_id)] = chunk_data else: group.attrs[name] = data ``` ###Assistant : Saves attributes (data) of the specified name into the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Args: group: A pointer to a HDF5 group. name: A name of the attributes to save. data: Attributes data to store. Raises: RuntimeError: If any single attribute is too large to be saved. " 1590,"def dispatch_hook(cls, _pkt=None, *args, **kargs): if _pkt is not None: plen = len(_pkt) if plen >= 2: byte0, byte1 = struct.unpack(""BB"", _pkt[:2]) s = kargs.get(""tls_session"", None) if byte0 not in _tls_type or byte1 != 3: # Unknown type # Check SSLv2: either the session is already SSLv2, # either the packet looks like one. As said above, this # isn't 100% reliable, but Wireshark does the same if s and (s.tls_version == 0x0002 or s.advertised_tls_version == 0x0002) or \ (_ssl_looks_like_sslv2(_pkt) and (not s or s.tls_version is None)): from scapy.layers.tls.record_sslv2 import SSLv2 return SSLv2 # Not SSLv2: continuation return _TLSEncryptedContent # Check TLS 1.3 if s and _tls_version_check(s.tls_version, 0x0304): _has_cipher = lambda x: ( x and not isinstance(x.cipher, Cipher_NULL) ) if (_has_cipher(s.rcs) or _has_cipher(s.prcs)) and \ byte0 == 0x17: from scapy.layers.tls.record_tls13 import TLS13 return TLS13 if plen < 5: # Layer detected as TLS but too small to be a # parsed. Scapy should not try to decode them return _TLSEncryptedContent return TLS # Parsing methods "," If the TLS class was called on raw SSLv2 data, we want to return an SSLv2 record instance. We acknowledge the risk of SSLv2 packets with a msglen of 0x1403, 0x1503, 0x1603 or 0x1703 which will never be casted as SSLv2 records but TLS ones instead, but hey, we can't be held responsible for low-minded extensibility choices. ",57,165,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dispatch_hook(cls, _pkt=None, *args, **kargs): if _pkt is not None: plen = len(_pkt) if plen >= 2: byte0, byte1 = struct.unpack(""BB"", _pkt[:2]) s = kargs.get(""tls_session"", None) if byte0 not in _tls_type or byte1 != 3: # Unknown type # Check SSLv2: either the session is already SSLv2, # either the packet looks like one. As said above, this # isn't 100% reliable, but Wireshark does the same if s and (s.tls_version == 0x0002 or s.advertised_tls_version == 0x0002) or \ (_ssl_looks_like_sslv2(_pkt) and (not s or s.tls_version is None)): from scapy.layers.tls.record_sslv2 import SSLv2 return SSLv2 # Not SSLv2: continuation return _TLSEncryptedContent # Check TLS 1.3 if s and _tls_version_check(s.tls_version, 0x0304): _has_cipher = lambda x: ( x and not isinstance(x.cipher, Cipher_NULL) ) if (_has_cipher(s.rcs) or _has_cipher(s.prcs)) and \ byte0 == 0x17: from scapy.layers.tls.record_tls13 import TLS13 return TLS13 if plen < 5: # Layer detected as TLS but too small to be a # parsed. Scapy should not try to decode them return _TLSEncryptedContent return TLS # Parsing methods ``` ###Assistant : If the TLS class was called on raw SSLv2 data, we want to return an SSLv2 record instance. We acknowledge the risk of SSLv2 packets with a msglen of 0x1403, 0x1503, 0x1603 or 0x1703 which will never be casted as SSLv2 records but TLS ones instead, but hey, we can't be held responsible for low-minded extensibility choices. " 1591,"def get_leave_period(from_date, to_date, company): leave_period = frappe.db.sql( , {""from_date"": from_date, ""to_date"": to_date, ""company"": company}, as_dict=1, ) if leave_period: return leave_period "," select name, from_date, to_date from `tabLeave Period` where company=%(company)s and is_active=1 and (from_date between %(from_date)s and %(to_date)s or to_date between %(from_date)s and %(to_date)s or (from_date < %(from_date)s and to_date > %(to_date)s)) ",31,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_leave_period(from_date, to_date, company): leave_period = frappe.db.sql( , {""from_date"": from_date, ""to_date"": to_date, ""company"": company}, as_dict=1, ) if leave_period: return leave_period ``` ###Assistant : select name, from_date, to_date from `tabLeave Period` where company=%(company)s and is_active=1 and (from_date between %(from_date)s and %(to_date)s or to_date between %(from_date)s and %(to_date)s or (from_date < %(from_date)s and to_date > %(to_date)s)) " 1592,"def rows(self) -> Iterator[Dict[str, TensorType]]: # Do we add seq_lens=[1] to each row? seq_lens = None if self.get(SampleBatch.SEQ_LENS) is None else np.array([1]) self_as_dict = {k: v for k, v in self.items()} for i in range(self.count): yield tree.map_structure_with_path( lambda p, v: v[i] if p[0] != self.SEQ_LENS else seq_lens, self_as_dict, ) ","Returns an iterator over data rows, i.e. dicts with column values. Note that if `seq_lens` is set in self, we set it to [1] in the rows. Yields: The column values of the row in this iteration. Examples: >>> batch = SampleBatch({ ... ""a"": [1, 2, 3], ... ""b"": [4, 5, 6], ... ""seq_lens"": [1, 2] ... }) >>> for row in batch.rows(): print(row) {""a"": 1, ""b"": 4, ""seq_lens"": [1]} {""a"": 2, ""b"": 5, ""seq_lens"": [1]} {""a"": 3, ""b"": 6, ""seq_lens"": [1]} ",82,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rows(self) -> Iterator[Dict[str, TensorType]]: # Do we add seq_lens=[1] to each row? seq_lens = None if self.get(SampleBatch.SEQ_LENS) is None else np.array([1]) self_as_dict = {k: v for k, v in self.items()} for i in range(self.count): yield tree.map_structure_with_path( lambda p, v: v[i] if p[0] != self.SEQ_LENS else seq_lens, self_as_dict, ) ``` ###Assistant : Returns an iterator over data rows, i.e. dicts with column values. Note that if `seq_lens` is set in self, we set it to [1] in the rows. Yields: The column values of the row in this iteration. Examples: >>> batch = SampleBatch({ ... ""a"": [1, 2, 3], ... ""b"": [4, 5, 6], ... ""seq_lens"": [1, 2] ... }) >>> for row in batch.rows(): print(row) {""a"": 1, ""b"": 4, ""seq_lens"": [1]} {""a"": 2, ""b"": 5, ""seq_lens"": [1]} {""a"": 3, ""b"": 6, ""seq_lens"": [1]} " 1593,"def _dedup_weights(self, weights): output, seen_ids = [], set() for w in weights: if id(w) not in seen_ids: output.append(w) # Track the Variable's identity to avoid __eq__ issues. seen_ids.add(id(w)) return output # SavedModel properties. Please see keras/saving/saved_model for details. ",Dedupe weights while maintaining order as much as possible.,9,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _dedup_weights(self, weights): output, seen_ids = [], set() for w in weights: if id(w) not in seen_ids: output.append(w) # Track the Variable's identity to avoid __eq__ issues. seen_ids.add(id(w)) return output # SavedModel properties. Please see keras/saving/saved_model for details. ``` ###Assistant : Dedupe weights while maintaining order as much as possible. " 1594,"def ancestors_with_self(self) -> list[DOMNode]: nodes: list[MessagePump | None] = [] add_node = nodes.append node: MessagePump | None = self while node is not None: add_node(node) node = node._parent return cast(""list[DOMNode]"", nodes) ","list[DOMNode]: A list of Nodes by tracing a path all the way back to App. Note: This is inclusive of ``self``. ",21,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ancestors_with_self(self) -> list[DOMNode]: nodes: list[MessagePump | None] = [] add_node = nodes.append node: MessagePump | None = self while node is not None: add_node(node) node = node._parent return cast(""list[DOMNode]"", nodes) ``` ###Assistant : list[DOMNode]: A list of Nodes by tracing a path all the way back to App. Note: This is inclusive of ``self``. " 1595,"def function_converter(self) -> Mapping[str, fields.MetricsFunction]: resolve_metric_id = { ""name"": ""metric_id"", ""fn"": lambda args: self.resolve_metric(args[""column""]), } function_converter = { function.name: function for function in [ # Note while the discover version of apdex, count_miserable, user_misery # accepts arguments, because this is precomputed with tags no parameters # are available fields.MetricsFunction( ""apdex"", optional_args=[fields.NullableNumberRange(""satisfaction"", 0, None)], snql_distribution=self._resolve_apdex_function, default_result_type=""number"", ), fields.MetricsFunction( ""avg"", required_args=[ fields.MetricArg( ""column"", allowed_columns=constants.METRIC_DURATION_COLUMNS, ) ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( ""avgIf"", [ Column(""value""), Function( ""equals"", [ Column(""metric_id""), args[""metric_id""], ], ), ], alias, ), result_type_fn=self.reflective_result_type(), default_result_type=""integer"", ), fields.MetricsFunction( ""count_miserable"", required_args=[ fields.MetricArg( ""column"", allowed_columns=[""user""], allow_custom_measurements=False ) ], optional_args=[fields.NullableNumberRange(""satisfaction"", 0, None)], calculated_args=[resolve_metric_id], snql_set=self._resolve_count_miserable_function, default_result_type=""integer"", ), fields.MetricsFunction( ""count_unparameterized_transactions"", snql_distribution=lambda args, alias: Function( ""countIf"", [ Column(""value""), Function( ""and"", [ Function( ""equals"", [ Column(""metric_id""), self.resolve_metric(""transaction.duration""), ], ), Function( ""equals"", [ self.builder.column(""transaction""), self.builder.resolve_tag_value(""<< unparameterized >>""), ], ), ], ), ], alias, ), # Not yet exposed, need to add far more validation around tag&value private=True, default_result_type=""integer"", ), fields.MetricsFunction( ""count_null_transactions"", snql_distribution=lambda args, alias: Function( ""countIf"", [ Column(""value""), Function( ""and"", [ Function( ""equals"", [ Column(""metric_id""), self.resolve_metric(""transaction.duration""), ], ), Function( ""equals"", [ self.builder.column(""transaction""), """" if self.builder.tag_values_are_strings else 0, ], ), ], ), ], alias, ), private=True, ), fields.MetricsFunction( ""count_has_transaction_name"", snql_distribution=lambda args, alias: Function( ""countIf"", [ Column(""value""), Function( ""and"", [ Function( ""equals"", [ Column(""metric_id""), self.resolve_metric(""transaction.duration""), ], ), Function( ""and"", [ Function( ""notEquals"", [ self.builder.column(""transaction""), """" if self.builder.tag_values_are_strings else 0, ], ), Function( ""notEquals"", [ self.builder.column(""transaction""), self.builder.resolve_tag_value( ""<< unparameterized >>"" ), ], ), ], ), ], ), ], alias, ), private=True, default_result_type=""integer"", ), fields.MetricsFunction( ""user_misery"", optional_args=[ fields.NullableNumberRange(""satisfaction"", 0, None), fields.with_default( constants.MISERY_ALPHA, fields.NumberRange(""alpha"", 0, None) ), fields.with_default( constants.MISERY_BETA, fields.NumberRange(""beta"", 0, None) ), ], calculated_args=[], snql_set=self._resolve_user_misery_function, default_result_type=""number"", ), fields.MetricsFunction( ""p50"", optional_args=[ fields.with_default( ""transaction.duration"", fields.MetricArg( ""column"", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.5 ), result_type_fn=self.reflective_result_type(), default_result_type=""duration"", ), fields.MetricsFunction( ""p75"", optional_args=[ fields.with_default( ""transaction.duration"", fields.MetricArg( ""column"", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.75 ), result_type_fn=self.reflective_result_type(), default_result_type=""duration"", ), fields.MetricsFunction( ""p90"", optional_args=[ fields.with_default( ""transaction.duration"", fields.MetricArg( ""column"", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.90 ), result_type_fn=self.reflective_result_type(), default_result_type=""duration"", ), fields.MetricsFunction( ""p95"", optional_args=[ fields.with_default( ""transaction.duration"", fields.MetricArg( ""column"", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.95 ), result_type_fn=self.reflective_result_type(), default_result_type=""duration"", ), fields.MetricsFunction( ""p99"", optional_args=[ fields.with_default( ""transaction.duration"", fields.MetricArg( ""column"", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.99 ), result_type_fn=self.reflective_result_type(), default_result_type=""duration"", ), fields.MetricsFunction( ""p100"", optional_args=[ fields.with_default( ""transaction.duration"", fields.MetricArg( ""column"", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile(args, alias, 1), result_type_fn=self.reflective_result_type(), default_result_type=""duration"", ), fields.MetricsFunction( ""max"", required_args=[ fields.MetricArg(""column""), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( ""maxIf"", [ Column(""value""), Function(""equals"", [Column(""metric_id""), args[""metric_id""]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( ""min"", required_args=[ fields.MetricArg(""column""), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( ""minIf"", [ Column(""value""), Function(""equals"", [Column(""metric_id""), args[""metric_id""]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( ""sum"", required_args=[ fields.MetricArg(""column""), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( ""sumIf"", [ Column(""value""), Function(""equals"", [Column(""metric_id""), args[""metric_id""]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( ""sumIf"", required_args=[ fields.ColumnTagArg(""if_col""), fields.FunctionArg(""if_val""), ], calculated_args=[ { ""name"": ""resolved_val"", ""fn"": lambda args: self.builder.resolve_tag_value(args[""if_val""]), } ], snql_counter=lambda args, alias: Function( ""sumIf"", [ Column(""value""), Function(""equals"", [args[""if_col""], args[""resolved_val""]]), ], alias, ), default_result_type=""integer"", ), fields.MetricsFunction( ""percentile"", required_args=[ fields.with_default( ""transaction.duration"", fields.MetricArg( ""column"", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), fields.NumberRange(""percentile"", 0, 1), ], calculated_args=[resolve_metric_id], snql_distribution=self._resolve_percentile, result_type_fn=self.reflective_result_type(), default_result_type=""duration"", ), fields.MetricsFunction( ""count_unique"", required_args=[ fields.MetricArg( ""column"", allowed_columns=[""user""], allow_custom_measurements=False ) ], calculated_args=[resolve_metric_id], snql_set=lambda args, alias: Function( ""uniqIf"", [ Column(""value""), Function(""equals"", [Column(""metric_id""), args[""metric_id""]]), ], alias, ), default_result_type=""integer"", ), fields.MetricsFunction( ""uniq"", snql_set=lambda args, alias: Function( ""uniq"", [Column(""value"")], alias, ), ), fields.MetricsFunction( ""uniqIf"", required_args=[ fields.ColumnTagArg(""if_col""), fields.FunctionArg(""if_val""), ], calculated_args=[ { ""name"": ""resolved_val"", ""fn"": lambda args: self.builder.resolve_tag_value(args[""if_val""]), } ], snql_set=lambda args, alias: Function( ""uniqIf"", [ Column(""value""), Function(""equals"", [args[""if_col""], args[""resolved_val""]]), ], alias, ), default_result_type=""integer"", ), fields.MetricsFunction( ""count"", snql_distribution=lambda args, alias: Function( ""countIf"", [ Column(""value""), Function( ""equals"", [ Column(""metric_id""), self.resolve_metric(""transaction.duration""), ], ), ], alias, ), default_result_type=""integer"", ), fields.MetricsFunction( ""count_web_vitals"", required_args=[ fields.MetricArg( ""column"", allowed_columns=[ ""measurements.fp"", ""measurements.fcp"", ""measurements.lcp"", ""measurements.fid"", ""measurements.cls"", ], allow_custom_measurements=False, ), fields.SnQLStringArg( ""quality"", allowed_strings=[""good"", ""meh"", ""poor"", ""any""] ), ], calculated_args=[resolve_metric_id], snql_distribution=self._resolve_web_vital_function, default_result_type=""integer"", ), fields.MetricsFunction( ""epm"", snql_distribution=lambda args, alias: Function( ""divide"", [ Function( ""countIf"", [ Column(""value""), Function( ""equals"", [ Column(""metric_id""), self.resolve_metric(""transaction.duration""), ], ), ], ), Function(""divide"", [args[""interval""], 60]), ], alias, ), optional_args=[fields.IntervalDefault(""interval"", 1, None)], default_result_type=""number"", ), fields.MetricsFunction( ""eps"", snql_distribution=lambda args, alias: Function( ""divide"", [ Function( ""countIf"", [ Column(""value""), Function( ""equals"", [ Column(""metric_id""), self.resolve_metric(""transaction.duration""), ], ), ], ), args[""interval""], ], alias, ), optional_args=[fields.IntervalDefault(""interval"", 1, None)], default_result_type=""number"", ), fields.MetricsFunction( ""failure_count"", snql_distribution=self._resolve_failure_count, default_result_type=""integer"", ), fields.MetricsFunction( ""failure_rate"", snql_distribution=lambda args, alias: Function( ""divide"", [ self._resolve_failure_count(args), Function( ""countIf"", [ Column(""value""), Function( ""equals"", [ Column(""metric_id""), self.resolve_metric(""transaction.duration""), ], ), ], ), ], alias, ), default_result_type=""percentage"", ), fields.MetricsFunction( ""histogram"", required_args=[fields.MetricArg(""column"")], calculated_args=[resolve_metric_id], snql_distribution=self._resolve_histogram_function, default_result_type=""number"", private=True, ), ] } for alias, name in constants.FUNCTION_ALIASES.items(): if name in function_converter: function_converter[alias] = function_converter[name].alias_as(alias) return function_converter # Field Aliases","While the final functions in clickhouse must have their -Merge combinators in order to function, we don't need to add them here since snuba has a FunctionMapper that will add it for us. Basically it turns expressions like quantiles(0.9)(value) into quantilesMerge(0.9)(percentiles) Make sure to update METRIC_FUNCTION_LIST_BY_TYPE when adding functions here, can't be a dynamic list since the Metric Layer will actually handle which dataset each function goes to ",68,747,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def function_converter(self) -> Mapping[str, fields.MetricsFunction]: resolve_metric_id = { ""name"": ""metric_id"", ""fn"": lambda args: self.resolve_metric(args[""column""]), } function_converter = { function.name: function for function in [ # Note while the discover version of apdex, count_miserable, user_misery # accepts arguments, because this is precomputed with tags no parameters # are available fields.MetricsFunction( ""apdex"", optional_args=[fields.NullableNumberRange(""satisfaction"", 0, None)], snql_distribution=self._resolve_apdex_function, default_result_type=""number"", ), fields.MetricsFunction( ""avg"", required_args=[ fields.MetricArg( ""column"", allowed_columns=constants.METRIC_DURATION_COLUMNS, ) ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( ""avgIf"", [ Column(""value""), Function( ""equals"", [ Column(""metric_id""), args[""metric_id""], ], ), ], alias, ), result_type_fn=self.reflective_result_type(), default_result_type=""integer"", ), fields.MetricsFunction( ""count_miserable"", required_args=[ fields.MetricArg( ""column"", allowed_columns=[""user""], allow_custom_measurements=False ) ], optional_args=[fields.NullableNumberRange(""satisfaction"", 0, None)], calculated_args=[resolve_metric_id], snql_set=self._resolve_count_miserable_function, default_result_type=""integer"", ), fields.MetricsFunction( ""count_unparameterized_transactions"", snql_distribution=lambda args, alias: Function( ""countIf"", [ Column(""value""), Function( ""and"", [ Function( ""equals"", [ Column(""metric_id""), self.resolve_metric(""transaction.duration""), ], ), Function( ""equals"", [ self.builder.column(""transaction""), self.builder.resolve_tag_value(""<< unparameterized >>""), ], ), ], ), ], alias, ), # Not yet exposed, need to add far more validation around tag&value private=True, default_result_type=""integer"", ), fields.MetricsFunction( ""count_null_transactions"", snql_distribution=lambda args, alias: Function( ""countIf"", [ Column(""value""), Function( ""and"", [ Function( ""equals"", [ Column(""metric_id""), self.resolve_metric(""transaction.duration""), ], ), Function( ""equals"", [ self.builder.column(""transaction""), """" if self.builder.tag_values_are_strings else 0, ], ), ], ), ], alias, ), private=True, ), fields.MetricsFunction( ""count_has_transaction_name"", snql_distribution=lambda args, alias: Function( ""countIf"", [ Column(""value""), Function( ""and"", [ Function( ""equals"", [ Column(""metric_id""), self.resolve_metric(""transaction.duration""), ], ), Function( ""and"", [ Function( ""notEquals"", [ self.builder.column(""transaction""), """" if self.builder.tag_values_are_strings else 0, ], ), Function( ""notEquals"", [ self.builder.column(""transaction""), self.builder.resolve_tag_value( ""<< unparameterized >>"" ), ], ), ], ), ], ), ], alias, ), private=True, default_result_type=""integer"", ), fields.MetricsFunction( ""user_misery"", optional_args=[ fields.NullableNumberRange(""satisfaction"", 0, None), fields.with_default( constants.MISERY_ALPHA, fields.NumberRange(""alpha"", 0, None) ), fields.with_default( constants.MISERY_BETA, fields.NumberRange(""beta"", 0, None) ), ], calculated_args=[], snql_set=self._resolve_user_misery_function, default_result_type=""number"", ), fields.MetricsFunction( ""p50"", optional_args=[ fields.with_default( ""transaction.duration"", fields.MetricArg( ""column"", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.5 ), result_type_fn=self.reflective_result_type(), default_result_type=""duration"", ), fields.MetricsFunction( ""p75"", optional_args=[ fields.with_default( ""transaction.duration"", fields.MetricArg( ""column"", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.75 ), result_type_fn=self.reflective_result_type(), default_result_type=""duration"", ), fields.MetricsFunction( ""p90"", optional_args=[ fields.with_default( ""transaction.duration"", fields.MetricArg( ""column"", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.90 ), result_type_fn=self.reflective_result_type(), default_result_type=""duration"", ), fields.MetricsFunction( ""p95"", optional_args=[ fields.with_default( ""transaction.duration"", fields.MetricArg( ""column"", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.95 ), result_type_fn=self.reflective_result_type(), default_result_type=""duration"", ), fields.MetricsFunction( ""p99"", optional_args=[ fields.with_default( ""transaction.duration"", fields.MetricArg( ""column"", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.99 ), result_type_fn=self.reflective_result_type(), default_result_type=""duration"", ), fields.MetricsFunction( ""p100"", optional_args=[ fields.with_default( ""transaction.duration"", fields.MetricArg( ""column"", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile(args, alias, 1), result_type_fn=self.reflective_result_type(), default_result_type=""duration"", ), fields.MetricsFunction( ""max"", required_args=[ fields.MetricArg(""column""), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( ""maxIf"", [ Column(""value""), Function(""equals"", [Column(""metric_id""), args[""metric_id""]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( ""min"", required_args=[ fields.MetricArg(""column""), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( ""minIf"", [ Column(""value""), Function(""equals"", [Column(""metric_id""), args[""metric_id""]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( ""sum"", required_args=[ fields.MetricArg(""column""), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( ""sumIf"", [ Column(""value""), Function(""equals"", [Column(""metric_id""), args[""metric_id""]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( ""sumIf"", required_args=[ fields.ColumnTagArg(""if_col""), fields.FunctionArg(""if_val""), ], calculated_args=[ { ""name"": ""resolved_val"", ""fn"": lambda args: self.builder.resolve_tag_value(args[""if_val""]), } ], snql_counter=lambda args, alias: Function( ""sumIf"", [ Column(""value""), Function(""equals"", [args[""if_col""], args[""resolved_val""]]), ], alias, ), default_result_type=""integer"", ), fields.MetricsFunction( ""percentile"", required_args=[ fields.with_default( ""transaction.duration"", fields.MetricArg( ""column"", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), fields.NumberRange(""percentile"", 0, 1), ], calculated_args=[resolve_metric_id], snql_distribution=self._resolve_percentile, result_type_fn=self.reflective_result_type(), default_result_type=""duration"", ), fields.MetricsFunction( ""count_unique"", required_args=[ fields.MetricArg( ""column"", allowed_columns=[""user""], allow_custom_measurements=False ) ], calculated_args=[resolve_metric_id], snql_set=lambda args, alias: Function( ""uniqIf"", [ Column(""value""), Function(""equals"", [Column(""metric_id""), args[""metric_id""]]), ], alias, ), default_result_type=""integer"", ), fields.MetricsFunction( ""uniq"", snql_set=lambda args, alias: Function( ""uniq"", [Column(""value"")], alias, ), ), fields.MetricsFunction( ""uniqIf"", required_args=[ fields.ColumnTagArg(""if_col""), fields.FunctionArg(""if_val""), ], calculated_args=[ { ""name"": ""resolved_val"", ""fn"": lambda args: self.builder.resolve_tag_value(args[""if_val""]), } ], snql_set=lambda args, alias: Function( ""uniqIf"", [ Column(""value""), Function(""equals"", [args[""if_col""], args[""resolved_val""]]), ], alias, ), default_result_type=""integer"", ), fields.MetricsFunction( ""count"", snql_distribution=lambda args, alias: Function( ""countIf"", [ Column(""value""), Function( ""equals"", [ Column(""metric_id""), self.resolve_metric(""transaction.duration""), ], ), ], alias, ), default_result_type=""integer"", ), fields.MetricsFunction( ""count_web_vitals"", required_args=[ fields.MetricArg( ""column"", allowed_columns=[ ""measurements.fp"", ""measurements.fcp"", ""measurements.lcp"", ""measurements.fid"", ""measurements.cls"", ], allow_custom_measurements=False, ), fields.SnQLStringArg( ""quality"", allowed_strings=[""good"", ""meh"", ""poor"", ""any""] ), ], calculated_args=[resolve_metric_id], snql_distribution=self._resolve_web_vital_function, default_result_type=""integer"", ), fields.MetricsFunction( ""epm"", snql_distribution=lambda args, alias: Function( ""divide"", [ Function( ""countIf"", [ Column(""value""), Function( ""equals"", [ Column(""metric_id""), self.resolve_metric(""transaction.duration""), ], ), ], ), Function(""divide"", [args[""interval""], 60]), ], alias, ), optional_args=[fields.IntervalDefault(""interval"", 1, None)], default_result_type=""number"", ), fields.MetricsFunction( ""eps"", snql_distribution=lambda args, alias: Function( ""divide"", [ Function( ""countIf"", [ Column(""value""), Function( ""equals"", [ Column(""metric_id""), self.resolve_metric(""transaction.duration""), ], ), ], ), args[""interval""], ], alias, ), optional_args=[fields.IntervalDefault(""interval"", 1, None)], default_result_type=""number"", ), fields.MetricsFunction( ""failure_count"", snql_distribution=self._resolve_failure_count, default_result_type=""integer"", ), fields.MetricsFunction( ""failure_rate"", snql_distribution=lambda args, alias: Function( ""divide"", [ self._resolve_failure_count(args), Function( ""countIf"", [ Column(""value""), Function( ""equals"", [ Column(""metric_id""), self.resolve_metric(""transaction.duration""), ], ), ], ), ], alias, ), default_result_type=""percentage"", ), fields.MetricsFunction( ""histogram"", required_args=[fields.MetricArg(""column"")], calculated_args=[resolve_metric_id], snql_distribution=self._resolve_histogram_function, default_result_type=""number"", private=True, ), ] } for alias, name in constants.FUNCTION_ALIASES.items(): if name in function_converter: function_converter[alias] = function_converter[name].alias_as(alias) return function_converter # Field Aliases ``` ###Assistant : While the final functions in clickhouse must have their -Merge combinators in order to function, we don't need to add them here since snuba has a FunctionMapper that will add it for us. Basically it turns expressions like quantiles(0.9)(value) into quantilesMerge(0.9)(percentiles) Make sure to update METRIC_FUNCTION_LIST_BY_TYPE when adding functions here, can't be a dynamic list since the Metric Layer will actually handle which dataset each function goes to " 1596,"def copyUsedDLLs(source_dir, dist_dir, standalone_entry_points): # This is terribly complex, because we check the list of used DLLs # trying to avoid duplicates, and detecting errors with them not # being binary identical, so we can report them. And then of course # we also need to handle OS specifics. # pylint: disable=too-many-branches,too-many-locals,too-many-statements used_dlls = detectUsedDLLs( source_dir=source_dir, standalone_entry_points=standalone_entry_points, use_cache=not Options.shallNotUseDependsExeCachedResults() and not Options.getWindowsDependencyTool() == ""depends.exe"", update_cache=not Options.shallNotStoreDependsExeCachedResults() and not Options.getWindowsDependencyTool() == ""depends.exe"", ) removed_dlls = set() warned_about = set() # Fist make checks and remove some. for dll_filename1, sources1 in tuple(iterItems(used_dlls)): if dll_filename1 in removed_dlls: continue for dll_filename2, sources2 in tuple(iterItems(used_dlls)): if dll_filename1 == dll_filename2: continue if dll_filename2 in removed_dlls: continue # Colliding basenames are an issue to us. if os.path.basename(dll_filename1) != os.path.basename(dll_filename2): continue # May already have been removed earlier if dll_filename1 not in used_dlls: continue if dll_filename2 not in used_dlls: continue dll_name = os.path.basename(dll_filename1) if Options.isShowInclusion(): inclusion_logger.info( % (dll_name, dll_filename1, dll_filename2) ) # Check that if a DLL has the same name, if it's identical, then it's easy. if haveSameFileContents(dll_filename1, dll_filename2): del used_dlls[dll_filename2] removed_dlls.add(dll_filename2) continue # For Win32 we can check out file versions. if Utils.isWin32Windows(): dll_version1 = getWindowsDLLVersion(dll_filename1) dll_version2 = getWindowsDLLVersion(dll_filename2) if dll_version2 < dll_version1: del used_dlls[dll_filename2] removed_dlls.add(dll_filename2) solved = True elif dll_version1 < dll_version2: del used_dlls[dll_filename1] removed_dlls.add(dll_filename1) solved = True else: solved = False if solved: if dll_name not in warned_about and dll_name not in ms_runtime_dlls: warned_about.add(dll_name) inclusion_logger.warning( ""Conflicting DLLs for '%s' in your installation, newest file version used, hoping for the best."" % dll_name ) continue # So we have conflicting DLLs, in which case we do report the fact. inclusion_logger.warning( % ( dll_name, dll_filename1, ""\n "".join(sources1), dll_filename2, ""\n "".join(sources2), ) ) del used_dlls[dll_filename2] removed_dlls.add(dll_filename2) dll_map = [] for dll_filename, sources in iterItems(used_dlls): dll_name = os.path.basename(dll_filename) target_path = os.path.join(dist_dir, dll_name) # Sometimes DLL dependencies were copied there already. if not os.path.exists(target_path): copyFile(dll_filename, target_path) dll_map.append((dll_filename, dll_name)) if Options.isShowInclusion(): inclusion_logger.info( ""Included used shared library '%s' (used by %s)."" % (dll_filename, "", "".join(sources)) ) if Utils.isMacOS(): # For macOS, the binary and the DLLs needs to be changed to reflect # the relative DLL location in the "".dist"" folder. for standalone_entry_point in standalone_entry_points: fixupBinaryDLLPathsMacOS( binary_filename=standalone_entry_point.dest_path, dll_map=dll_map, original_location=standalone_entry_point.source_path, ) for original_path, dll_filename in dll_map: fixupBinaryDLLPathsMacOS( binary_filename=os.path.join(dist_dir, dll_filename), dll_map=dll_map, original_location=original_path, ) # Remove code signature from CPython installed library candidate = os.path.join( dist_dir, ""Python"", ) if os.path.exists(candidate): removeMacOSCodeSignature(candidate) # Remove or update rpath settings. if Utils.getOS() in (""Linux"", ""Darwin""): # For Linux, the ""rpath"" of libraries may be an issue and must be # removed. if Utils.isMacOS(): start = 0 else: start = 1 for standalone_entry_point in standalone_entry_points[start:]: count = relpath( path=standalone_entry_point.dest_path, start=dist_dir ).count(os.path.sep) rpath = os.path.join(""$ORIGIN"", *([""..""] * count)) setSharedLibraryRPATH(standalone_entry_point.dest_path, rpath) for _original_path, dll_filename in dll_map: setSharedLibraryRPATH(os.path.join(dist_dir, dll_filename), ""$ORIGIN"") if Utils.isWin32Windows(): if python_version < 0x300: # For Win32, we might have to remove SXS paths for standalone_entry_point in standalone_entry_points[1:]: removeSxsFromDLL(standalone_entry_point.dest_path) for _original_path, dll_filename in dll_map: removeSxsFromDLL(os.path.join(dist_dir, dll_filename)) ","Colliding DLL names for %s, checking identity of \ '%s' <-> '%s'.\ Ignoring non-identical DLLs for '%s'. %s used by: %s different from %s used by %s",27,477,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def copyUsedDLLs(source_dir, dist_dir, standalone_entry_points): # This is terribly complex, because we check the list of used DLLs # trying to avoid duplicates, and detecting errors with them not # being binary identical, so we can report them. And then of course # we also need to handle OS specifics. # pylint: disable=too-many-branches,too-many-locals,too-many-statements used_dlls = detectUsedDLLs( source_dir=source_dir, standalone_entry_points=standalone_entry_points, use_cache=not Options.shallNotUseDependsExeCachedResults() and not Options.getWindowsDependencyTool() == ""depends.exe"", update_cache=not Options.shallNotStoreDependsExeCachedResults() and not Options.getWindowsDependencyTool() == ""depends.exe"", ) removed_dlls = set() warned_about = set() # Fist make checks and remove some. for dll_filename1, sources1 in tuple(iterItems(used_dlls)): if dll_filename1 in removed_dlls: continue for dll_filename2, sources2 in tuple(iterItems(used_dlls)): if dll_filename1 == dll_filename2: continue if dll_filename2 in removed_dlls: continue # Colliding basenames are an issue to us. if os.path.basename(dll_filename1) != os.path.basename(dll_filename2): continue # May already have been removed earlier if dll_filename1 not in used_dlls: continue if dll_filename2 not in used_dlls: continue dll_name = os.path.basename(dll_filename1) if Options.isShowInclusion(): inclusion_logger.info( % (dll_name, dll_filename1, dll_filename2) ) # Check that if a DLL has the same name, if it's identical, then it's easy. if haveSameFileContents(dll_filename1, dll_filename2): del used_dlls[dll_filename2] removed_dlls.add(dll_filename2) continue # For Win32 we can check out file versions. if Utils.isWin32Windows(): dll_version1 = getWindowsDLLVersion(dll_filename1) dll_version2 = getWindowsDLLVersion(dll_filename2) if dll_version2 < dll_version1: del used_dlls[dll_filename2] removed_dlls.add(dll_filename2) solved = True elif dll_version1 < dll_version2: del used_dlls[dll_filename1] removed_dlls.add(dll_filename1) solved = True else: solved = False if solved: if dll_name not in warned_about and dll_name not in ms_runtime_dlls: warned_about.add(dll_name) inclusion_logger.warning( ""Conflicting DLLs for '%s' in your installation, newest file version used, hoping for the best."" % dll_name ) continue # So we have conflicting DLLs, in which case we do report the fact. inclusion_logger.warning( % ( dll_name, dll_filename1, ""\n "".join(sources1), dll_filename2, ""\n "".join(sources2), ) ) del used_dlls[dll_filename2] removed_dlls.add(dll_filename2) dll_map = [] for dll_filename, sources in iterItems(used_dlls): dll_name = os.path.basename(dll_filename) target_path = os.path.join(dist_dir, dll_name) # Sometimes DLL dependencies were copied there already. if not os.path.exists(target_path): copyFile(dll_filename, target_path) dll_map.append((dll_filename, dll_name)) if Options.isShowInclusion(): inclusion_logger.info( ""Included used shared library '%s' (used by %s)."" % (dll_filename, "", "".join(sources)) ) if Utils.isMacOS(): # For macOS, the binary and the DLLs needs to be changed to reflect # the relative DLL location in the "".dist"" folder. for standalone_entry_point in standalone_entry_points: fixupBinaryDLLPathsMacOS( binary_filename=standalone_entry_point.dest_path, dll_map=dll_map, original_location=standalone_entry_point.source_path, ) for original_path, dll_filename in dll_map: fixupBinaryDLLPathsMacOS( binary_filename=os.path.join(dist_dir, dll_filename), dll_map=dll_map, original_location=original_path, ) # Remove code signature from CPython installed library candidate = os.path.join( dist_dir, ""Python"", ) if os.path.exists(candidate): removeMacOSCodeSignature(candidate) # Remove or update rpath settings. if Utils.getOS() in (""Linux"", ""Darwin""): # For Linux, the ""rpath"" of libraries may be an issue and must be # removed. if Utils.isMacOS(): start = 0 else: start = 1 for standalone_entry_point in standalone_entry_points[start:]: count = relpath( path=standalone_entry_point.dest_path, start=dist_dir ).count(os.path.sep) rpath = os.path.join(""$ORIGIN"", *([""..""] * count)) setSharedLibraryRPATH(standalone_entry_point.dest_path, rpath) for _original_path, dll_filename in dll_map: setSharedLibraryRPATH(os.path.join(dist_dir, dll_filename), ""$ORIGIN"") if Utils.isWin32Windows(): if python_version < 0x300: # For Win32, we might have to remove SXS paths for standalone_entry_point in standalone_entry_points[1:]: removeSxsFromDLL(standalone_entry_point.dest_path) for _original_path, dll_filename in dll_map: removeSxsFromDLL(os.path.join(dist_dir, dll_filename)) ``` ###Assistant : Colliding DLL names for %s, checking identity of \ '%s' <-> '%s'.\ Ignoring non-identical DLLs for '%s'. %s used by: %s different from %s used by %s " 1597,"def _laplace_rule_diff(f, t, s, doit=True, **hints): hints.pop('simplify', True) a = Wild('a', exclude=[t]) y = Wild('y') n = Wild('n', exclude=[t]) g = WildFunction('g', nargs=1) ma1 = f.match(a*Derivative(g, (t, n))) if ma1 and ma1[g].args[0] == t and ma1[n].is_integer: debug('_laplace_apply_rules match:') debug(' f: %s'%(f,)) debug(' rule: time derivative (1.11, 1.12)') d = [] for k in range(ma1[n]): if k==0: y = ma1[g].func(t).subs(t, 0) else: y = Derivative(ma1[g].func(t), (t, k)).subs(t, 0) d.append(s**(ma1[n]-k-1)*y) r = s**ma1[n]*_laplace_apply_rules(ma1[g].func(t), t, s, doit=doit, **hints) return ma1[a]*(r - Add(*d)) return None "," This internal helper function tries to transform an expression containing a derivative of an undefined function and returns `None` if it cannot do it. ",24,81,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _laplace_rule_diff(f, t, s, doit=True, **hints): hints.pop('simplify', True) a = Wild('a', exclude=[t]) y = Wild('y') n = Wild('n', exclude=[t]) g = WildFunction('g', nargs=1) ma1 = f.match(a*Derivative(g, (t, n))) if ma1 and ma1[g].args[0] == t and ma1[n].is_integer: debug('_laplace_apply_rules match:') debug(' f: %s'%(f,)) debug(' rule: time derivative (1.11, 1.12)') d = [] for k in range(ma1[n]): if k==0: y = ma1[g].func(t).subs(t, 0) else: y = Derivative(ma1[g].func(t), (t, k)).subs(t, 0) d.append(s**(ma1[n]-k-1)*y) r = s**ma1[n]*_laplace_apply_rules(ma1[g].func(t), t, s, doit=doit, **hints) return ma1[a]*(r - Add(*d)) return None ``` ###Assistant : This internal helper function tries to transform an expression containing a derivative of an undefined function and returns `None` if it cannot do it. " 1598,"def _check_flag(user, flag, attributes, user_flags_settings): new_flag = False is_role_key = ""is_%s_role"" % (flag) is_attr_key = ""is_%s_attr"" % (flag) is_value_key = ""is_%s_value"" % (flag) remove_setting = ""remove_%ss"" % (flag) # Check to see if we are respecting a role and, if so, does our user have that role? required_roles = user_flags_settings.get(is_role_key, None) if required_roles: matching_roles = _get_matches(required_roles, attributes.get('Role', [])) # We do a 2 layer check here so that we don't spit out the else message if there is no role defined if matching_roles: logger.debug(""User %s has %s role(s) %s"" % (user.username, flag, ', '.join(matching_roles))) new_flag = True else: logger.debug(""User %s is missing the %s role(s) %s"" % (user.username, flag, ', '.join(required_roles))) # Next, check to see if we are respecting an attribute; this will take priority over the role if its defined attr_setting = user_flags_settings.get(is_attr_key, None) if attr_setting and attributes.get(attr_setting, None): # Do we have a required value for the attribute required_value = user_flags_settings.get(is_value_key, None) if required_value: # If so, check and see if the value of the attr matches the required value saml_user_attribute_value = attributes.get(attr_setting, None) matching_values = _get_matches(required_value, saml_user_attribute_value) if matching_values: logger.debug(""Giving %s %s from attribute %s with matching values %s"" % (user.username, flag, attr_setting, ', '.join(matching_values))) new_flag = True # if they don't match make sure that new_flag is false else: logger.debug( ""Refusing %s for %s because attr %s (%s) did not match value(s) %s"" % (flag, user.username, attr_setting, "", "".join(saml_user_attribute_value), ', '.join(required_value)) ) new_flag = False # If there was no required value then we can just allow them in because of the attribute else: logger.debug(""Giving %s %s from attribute %s"" % (user.username, flag, attr_setting)) new_flag = True # Get the users old flag old_value = getattr(user, ""is_%s"" % (flag)) # If we are not removing the flag and they were a system admin and now we don't want them to be just return remove_flag = user_flags_settings.get(remove_setting, True) if not remove_flag and (old_value and not new_flag): logger.debug(""Remove flag %s preventing removal of %s for %s"" % (remove_flag, flag, user.username)) return old_value, False # If the user was flagged and we are going to make them not flagged make sure there is a message if old_value and not new_flag: logger.debug(""Revoking %s from %s"" % (flag, user.username)) return new_flag, old_value != new_flag "," Helper function to set the is_superuser is_system_auditor flags for the SAML adapter Returns the new flag and whether or not it changed the flag ",24,374,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_flag(user, flag, attributes, user_flags_settings): new_flag = False is_role_key = ""is_%s_role"" % (flag) is_attr_key = ""is_%s_attr"" % (flag) is_value_key = ""is_%s_value"" % (flag) remove_setting = ""remove_%ss"" % (flag) # Check to see if we are respecting a role and, if so, does our user have that role? required_roles = user_flags_settings.get(is_role_key, None) if required_roles: matching_roles = _get_matches(required_roles, attributes.get('Role', [])) # We do a 2 layer check here so that we don't spit out the else message if there is no role defined if matching_roles: logger.debug(""User %s has %s role(s) %s"" % (user.username, flag, ', '.join(matching_roles))) new_flag = True else: logger.debug(""User %s is missing the %s role(s) %s"" % (user.username, flag, ', '.join(required_roles))) # Next, check to see if we are respecting an attribute; this will take priority over the role if its defined attr_setting = user_flags_settings.get(is_attr_key, None) if attr_setting and attributes.get(attr_setting, None): # Do we have a required value for the attribute required_value = user_flags_settings.get(is_value_key, None) if required_value: # If so, check and see if the value of the attr matches the required value saml_user_attribute_value = attributes.get(attr_setting, None) matching_values = _get_matches(required_value, saml_user_attribute_value) if matching_values: logger.debug(""Giving %s %s from attribute %s with matching values %s"" % (user.username, flag, attr_setting, ', '.join(matching_values))) new_flag = True # if they don't match make sure that new_flag is false else: logger.debug( ""Refusing %s for %s because attr %s (%s) did not match value(s) %s"" % (flag, user.username, attr_setting, "", "".join(saml_user_attribute_value), ', '.join(required_value)) ) new_flag = False # If there was no required value then we can just allow them in because of the attribute else: logger.debug(""Giving %s %s from attribute %s"" % (user.username, flag, attr_setting)) new_flag = True # Get the users old flag old_value = getattr(user, ""is_%s"" % (flag)) # If we are not removing the flag and they were a system admin and now we don't want them to be just return remove_flag = user_flags_settings.get(remove_setting, True) if not remove_flag and (old_value and not new_flag): logger.debug(""Remove flag %s preventing removal of %s for %s"" % (remove_flag, flag, user.username)) return old_value, False # If the user was flagged and we are going to make them not flagged make sure there is a message if old_value and not new_flag: logger.debug(""Revoking %s from %s"" % (flag, user.username)) return new_flag, old_value != new_flag ``` ###Assistant : Helper function to set the is_superuser is_system_auditor flags for the SAML adapter Returns the new flag and whether or not it changed the flag " 1599,"def test_get_backfill_points_in_room(self): setup_info = self._setup_room_for_backfill_tests() room_id = setup_info.room_id backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertListEqual( backfill_event_ids, [""b6"", ""b5"", ""b4"", ""2"", ""b3"", ""b2"", ""b1""] ) "," Test to make sure we get some backfill points ",9,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_backfill_points_in_room(self): setup_info = self._setup_room_for_backfill_tests() room_id = setup_info.room_id backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertListEqual( backfill_event_ids, [""b6"", ""b5"", ""b4"", ""2"", ""b3"", ""b2"", ""b1""] ) ``` ###Assistant : Test to make sure we get some backfill points " 1600,"def find_asteroidal_triple(G): r V = set(G.nodes) if len(V) < 6: # An asteroidal triple cannot exist in a graph with 5 or less vertices. return None component_structure = create_component_structure(G) E_complement = set(nx.complement(G).edges) for e in E_complement: u = e[0] v = e[1] u_neighborhood = set(G[u]).union([u]) v_neighborhood = set(G[v]).union([v]) union_of_neighborhoods = u_neighborhood.union(v_neighborhood) for w in V - union_of_neighborhoods: # Check for each pair of vertices whether they belong to the # same connected component when the closed neighborhood of the # third is removed. if ( component_structure[u][v] == component_structure[u][w] and component_structure[v][u] == component_structure[v][w] and component_structure[w][u] == component_structure[w][v] ): return [u, v, w] return None @not_implemented_for(""directed"") @not_implemented_for(""multigraph"")","Find an asteroidal triple in the given graph. An asteroidal triple is a triple of non-adjacent vertices such that there exists a path between any two of them which avoids the closed neighborhood of the third. It checks all independent triples of vertices and whether they are an asteroidal triple or not. This is done with the help of a data structure called a component structure. A component structure encodes information about which vertices belongs to the same connected component when the closed neighborhood of a given vertex is removed from the graph. The algorithm used to check is the trivial one, outlined in [1]_, which has a runtime of :math:`O(|V||\overline{E} + |V||E|)`, where the second term is the creation of the component structure. Parameters ---------- G : NetworkX Graph The graph to check whether is AT-free or not Returns ------- list or None An asteroidal triple is returned as a list of nodes. If no asteroidal triple exists, i.e. the graph is AT-free, then None is returned. The returned value depends on the certificate parameter. The default option is a bool which is True if the graph is AT-free, i.e. the given graph contains no asteroidal triples, and False otherwise, i.e. if the graph contains at least one asteroidal triple. Notes ----- The component structure and the algorithm is described in [1]_. The current implementation implements the trivial algorithm for simple graphs. References ---------- .. [1] Ekkehard Köhler, ""Recognizing Graphs without asteroidal triples"", Journal of Discrete Algorithms 2, pages 439-452, 2004. https://www.sciencedirect.com/science/article/pii/S157086670400019X ",253,105,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_asteroidal_triple(G): r V = set(G.nodes) if len(V) < 6: # An asteroidal triple cannot exist in a graph with 5 or less vertices. return None component_structure = create_component_structure(G) E_complement = set(nx.complement(G).edges) for e in E_complement: u = e[0] v = e[1] u_neighborhood = set(G[u]).union([u]) v_neighborhood = set(G[v]).union([v]) union_of_neighborhoods = u_neighborhood.union(v_neighborhood) for w in V - union_of_neighborhoods: # Check for each pair of vertices whether they belong to the # same connected component when the closed neighborhood of the # third is removed. if ( component_structure[u][v] == component_structure[u][w] and component_structure[v][u] == component_structure[v][w] and component_structure[w][u] == component_structure[w][v] ): return [u, v, w] return None @not_implemented_for(""directed"") @not_implemented_for(""multigraph"") ``` ###Assistant : Find an asteroidal triple in the given graph. An asteroidal triple is a triple of non-adjacent vertices such that there exists a path between any two of them which avoids the closed neighborhood of the third. It checks all independent triples of vertices and whether they are an asteroidal triple or not. This is done with the help of a data structure called a component structure. A component structure encodes information about which vertices belongs to the same connected component when the closed neighborhood of a given vertex is removed from the graph. The algorithm used to check is the trivial one, outlined in [1]_, which has a runtime of :math:`O(|V||\overline{E} + |V||E|)`, where the second term is the creation of the component structure. Parameters ---------- G : NetworkX Graph The graph to check whether is AT-free or not Returns ------- list or None An asteroidal triple is returned as a list of nodes. If no asteroidal triple exists, i.e. the graph is AT-free, then None is returned. The returned value depends on the certificate parameter. The default option is a bool which is True if the graph is AT-free, i.e. the given graph contains no asteroidal triples, and False otherwise, i.e. if the graph contains at least one asteroidal triple. Notes ----- The component structure and the algorithm is described in [1]_. The current implementation implements the trivial algorithm for simple graphs. References ---------- .. [1] Ekkehard Köhler, ""Recognizing Graphs without asteroidal triples"", Journal of Discrete Algorithms 2, pages 439-452, 2004. https://www.sciencedirect.com/science/article/pii/S157086670400019X " 1601,"def get_leave_entries(employee, leave_type, from_date, to_date): return frappe.db.sql( , {""from_date"": from_date, ""to_date"": to_date, ""employee"": employee, ""leave_type"": leave_type}, as_dict=1, ) @frappe.whitelist()","Returns leave entries between from_date and to_date. SELECT employee, leave_type, from_date, to_date, leaves, transaction_name, transaction_type, holiday_list, is_carry_forward, is_expired FROM `tabLeave Ledger Entry` WHERE employee=%(employee)s AND leave_type=%(leave_type)s AND docstatus=1 AND (leaves<0 OR is_expired=1) AND (from_date between %(from_date)s AND %(to_date)s OR to_date between %(from_date)s AND %(to_date)s OR (from_date < %(from_date)s AND to_date > %(to_date)s)) ",52,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_leave_entries(employee, leave_type, from_date, to_date): return frappe.db.sql( , {""from_date"": from_date, ""to_date"": to_date, ""employee"": employee, ""leave_type"": leave_type}, as_dict=1, ) @frappe.whitelist() ``` ###Assistant : Returns leave entries between from_date and to_date. SELECT employee, leave_type, from_date, to_date, leaves, transaction_name, transaction_type, holiday_list, is_carry_forward, is_expired FROM `tabLeave Ledger Entry` WHERE employee=%(employee)s AND leave_type=%(leave_type)s AND docstatus=1 AND (leaves<0 OR is_expired=1) AND (from_date between %(from_date)s AND %(to_date)s OR to_date between %(from_date)s AND %(to_date)s OR (from_date < %(from_date)s AND to_date > %(to_date)s)) " 1602,"def addslashes(value): return value.replace(""\\"", ""\\\\"").replace('""', '\\""').replace(""'"", ""\\'"") @register.filter(is_safe=True) @stringfilter"," Add slashes before quotes. Useful for escaping strings in CSV, for example. Less useful for escaping JavaScript; use the ``escapejs`` filter instead. ",22,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def addslashes(value): return value.replace(""\\"", ""\\\\"").replace('""', '\\""').replace(""'"", ""\\'"") @register.filter(is_safe=True) @stringfilter ``` ###Assistant : Add slashes before quotes. Useful for escaping strings in CSV, for example. Less useful for escaping JavaScript; use the ``escapejs`` filter instead. " 1603,"def _get_raw_from_config(cls) -> str: if cls.varname is None: raise TypeError(""varname should not be None"") return os.environ[cls.varname] "," Read the value from environment variable. Returns ------- str Config raw value. Raises ------ TypeError If `varname` is None. KeyError If value is absent. ",24,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_raw_from_config(cls) -> str: if cls.varname is None: raise TypeError(""varname should not be None"") return os.environ[cls.varname] ``` ###Assistant : Read the value from environment variable. Returns ------- str Config raw value. Raises ------ TypeError If `varname` is None. KeyError If value is absent. " 1604,"def is_monotonic_decreasing(self): return SeriesDefault.register(pandas.Series.is_monotonic_decreasing)(self) "," Return boolean if values in the object are monotonically decreasing. Returns ------- bool ",13,4,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_monotonic_decreasing(self): return SeriesDefault.register(pandas.Series.is_monotonic_decreasing)(self) ``` ###Assistant : Return boolean if values in the object are monotonically decreasing. Returns ------- bool " 1605,"def get(self, model, columns, filters): key = self._make_key(model, filters) conn = self.cluster.get_local_client_for_key(key) pipe = conn.pipeline() for col in columns: pipe.hget(key, f""i+{col}"") results = pipe.execute() return { col: (int(results[i]) if results[i] is not None else 0) for i, col in enumerate(columns) } "," Fetches buffered values for a model/filter. Passed columns must be integer columns. ",12,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get(self, model, columns, filters): key = self._make_key(model, filters) conn = self.cluster.get_local_client_for_key(key) pipe = conn.pipeline() for col in columns: pipe.hget(key, f""i+{col}"") results = pipe.execute() return { col: (int(results[i]) if results[i] is not None else 0) for i, col in enumerate(columns) } ``` ###Assistant : Fetches buffered values for a model/filter. Passed columns must be integer columns. " 1606,"def test_non_ascii_subscription_for_principal(self) -> None: iago = self.example_user(""iago"") self.assert_adding_subscriptions_for_principal( iago.id, get_realm(""zulip""), [""hümbüǵ""], policy_name=""Public"" ) "," You can subscribe other people to streams even if they containing non-ASCII characters. ",13,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_non_ascii_subscription_for_principal(self) -> None: iago = self.example_user(""iago"") self.assert_adding_subscriptions_for_principal( iago.id, get_realm(""zulip""), [""hümbüǵ""], policy_name=""Public"" ) ``` ###Assistant : You can subscribe other people to streams even if they containing non-ASCII characters. " 1607,"def add_provs(self, reader): fileids = reader.fileids() for fileid in fileids: prov, langfile = os.path.split(fileid) file_name, file_extension = os.path.splitext(langfile) if file_extension == "".tab"": lang = file_name.split(""-"")[-1] if lang in self.provenances.keys(): # We already have another resource for this lang, # so we need to further specify the lang id: lang = f""{lang}_{prov}"" self.provenances[lang] = prov ",Add languages from Multilingual Wordnet to the provenance dictionary,9,54,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add_provs(self, reader): fileids = reader.fileids() for fileid in fileids: prov, langfile = os.path.split(fileid) file_name, file_extension = os.path.splitext(langfile) if file_extension == "".tab"": lang = file_name.split(""-"")[-1] if lang in self.provenances.keys(): # We already have another resource for this lang, # so we need to further specify the lang id: lang = f""{lang}_{prov}"" self.provenances[lang] = prov ``` ###Assistant : Add languages from Multilingual Wordnet to the provenance dictionary " 1608,"def test_thread_with_bundled_aggregations_for_latest(self) -> None: self._send_relation(RelationTypes.THREAD, ""m.room.test"") channel = self._send_relation(RelationTypes.THREAD, ""m.room.test"") thread_2 = channel.json_body[""event_id""] self._send_relation( RelationTypes.ANNOTATION, ""m.reaction"", ""a"", parent_id=thread_2 ) "," Bundled aggregations should get applied to the latest thread event. ",10,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_thread_with_bundled_aggregations_for_latest(self) -> None: self._send_relation(RelationTypes.THREAD, ""m.room.test"") channel = self._send_relation(RelationTypes.THREAD, ""m.room.test"") thread_2 = channel.json_body[""event_id""] self._send_relation( RelationTypes.ANNOTATION, ""m.reaction"", ""a"", parent_id=thread_2 ) ``` ###Assistant : Bundled aggregations should get applied to the latest thread event. " 1609,"def from_environment(cls): return cls.from_file(path=KUBE_CONFIG_DEFAULT_LOCATION) "," Factory method to produce an instance of this class using the default kube config location ",15,4,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def from_environment(cls): return cls.from_file(path=KUBE_CONFIG_DEFAULT_LOCATION) ``` ###Assistant : Factory method to produce an instance of this class using the default kube config location " 1610,"def test_thumbnail_repeated_thumbnail(self) -> None: self._test_thumbnail( ""scale"", self.test_image.expected_scaled, self.test_image.expected_found ) if not self.test_image.expected_found: return # Fetching again should work, without re-requesting the image from the # remote. params = ""?width=32&height=32&method=scale"" channel = make_request( self.reactor, FakeSite(self.thumbnail_resource, self.reactor), ""GET"", self.media_id + params, shorthand=False, await_result=False, ) self.pump() self.assertEqual(channel.code, 200) if self.test_image.expected_scaled: self.assertEqual( channel.result[""body""], self.test_image.expected_scaled, channel.result[""body""], ) # Deleting the thumbnail on disk then re-requesting it should work as # Synapse should regenerate missing thumbnails. origin, media_id = self.media_id.split(""/"") info = self.get_success(self.store.get_cached_remote_media(origin, media_id)) file_id = info[""filesystem_id""] thumbnail_dir = self.media_repo.filepaths.remote_media_thumbnail_dir( origin, file_id ) shutil.rmtree(thumbnail_dir, ignore_errors=True) channel = make_request( self.reactor, FakeSite(self.thumbnail_resource, self.reactor), ""GET"", self.media_id + params, shorthand=False, await_result=False, ) self.pump() self.assertEqual(channel.code, 200) if self.test_image.expected_scaled: self.assertEqual( channel.result[""body""], self.test_image.expected_scaled, channel.result[""body""], ) ","Test that fetching the same thumbnail works, and deleting the on disk thumbnail regenerates it. ",15,112,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_thumbnail_repeated_thumbnail(self) -> None: self._test_thumbnail( ""scale"", self.test_image.expected_scaled, self.test_image.expected_found ) if not self.test_image.expected_found: return # Fetching again should work, without re-requesting the image from the # remote. params = ""?width=32&height=32&method=scale"" channel = make_request( self.reactor, FakeSite(self.thumbnail_resource, self.reactor), ""GET"", self.media_id + params, shorthand=False, await_result=False, ) self.pump() self.assertEqual(channel.code, 200) if self.test_image.expected_scaled: self.assertEqual( channel.result[""body""], self.test_image.expected_scaled, channel.result[""body""], ) # Deleting the thumbnail on disk then re-requesting it should work as # Synapse should regenerate missing thumbnails. origin, media_id = self.media_id.split(""/"") info = self.get_success(self.store.get_cached_remote_media(origin, media_id)) file_id = info[""filesystem_id""] thumbnail_dir = self.media_repo.filepaths.remote_media_thumbnail_dir( origin, file_id ) shutil.rmtree(thumbnail_dir, ignore_errors=True) channel = make_request( self.reactor, FakeSite(self.thumbnail_resource, self.reactor), ""GET"", self.media_id + params, shorthand=False, await_result=False, ) self.pump() self.assertEqual(channel.code, 200) if self.test_image.expected_scaled: self.assertEqual( channel.result[""body""], self.test_image.expected_scaled, channel.result[""body""], ) ``` ###Assistant : Test that fetching the same thumbnail works, and deleting the on disk thumbnail regenerates it. " 1611,"def update(self) -> None: ping_cmd = [ ""ping"", ""-c"", ""1"", ""-W"", str(DEFAULT_PING_TIMEOUT), str(self._host), ] status = sp.call(ping_cmd, stdout=sp.DEVNULL, stderr=sp.DEVNULL) self._state = not bool(status) ",Check if device is on and update the state. Only called if assumed state is false.,16,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update(self) -> None: ping_cmd = [ ""ping"", ""-c"", ""1"", ""-W"", str(DEFAULT_PING_TIMEOUT), str(self._host), ] status = sp.call(ping_cmd, stdout=sp.DEVNULL, stderr=sp.DEVNULL) self._state = not bool(status) ``` ###Assistant : Check if device is on and update the state. Only called if assumed state is false. " 1612,"def get_transactions(self): df = self.__transactions[ [ ""Date"", ""Type"", ""Ticker"", ""Side"", ""Price"", ""Quantity"", ""Fees"", ""Investment"", ""Currency"", ""Sector"", ""Industry"", ""Country"", ""Region"", ] ] df = df.replace(np.nan, ""-"") df[""Date""] = df[""Date""].dt.strftime(""%Y-%m-%d"") df.sort_values(by=""Date"", ascending=False, inplace=True) return df ","Get formatted transactions Returns ------- pd.DataFrame: formatted transactions ",8,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_transactions(self): df = self.__transactions[ [ ""Date"", ""Type"", ""Ticker"", ""Side"", ""Price"", ""Quantity"", ""Fees"", ""Investment"", ""Currency"", ""Sector"", ""Industry"", ""Country"", ""Region"", ] ] df = df.replace(np.nan, ""-"") df[""Date""] = df[""Date""].dt.strftime(""%Y-%m-%d"") df.sort_values(by=""Date"", ascending=False, inplace=True) return df ``` ###Assistant : Get formatted transactions Returns ------- pd.DataFrame: formatted transactions " 1613,"def test_pick_colors(self) -> None: used_colors: Set[str] = set() color_map: Dict[int, str] = {} recipient_ids = list(range(30)) user_color_map = pick_colors(used_colors, color_map, recipient_ids) self.assertEqual( user_color_map, { 0: ""#76ce90"", 1: ""#fae589"", 2: ""#a6c7e5"", 3: ""#e79ab5"", 4: ""#bfd56f"", 5: ""#f4ae55"", 6: ""#b0a5fd"", 7: ""#addfe5"", 8: ""#f5ce6e"", 9: ""#c2726a"", 10: ""#94c849"", 11: ""#bd86e5"", 12: ""#ee7e4a"", 13: ""#a6dcbf"", 14: ""#95a5fd"", 15: ""#53a063"", 16: ""#9987e1"", 17: ""#e4523d"", 18: ""#c2c2c2"", 19: ""#4f8de4"", 20: ""#c6a8ad"", 21: ""#e7cc4d"", 22: ""#c8bebf"", 23: ""#a47462"", # start repeating 24: ""#76ce90"", 25: ""#fae589"", 26: ""#a6c7e5"", 27: ""#e79ab5"", 28: ""#bfd56f"", 29: ""#f4ae55"", }, ) color_map = {98: ""color98"", 99: ""color99""} used_colors = set(STREAM_ASSIGNMENT_COLORS) - {""#c6a8ad"", ""#9987e1""} recipient_ids = [99, 98, 1, 2, 3, 4] user_color_map = pick_colors(used_colors, color_map, recipient_ids) self.assertEqual( user_color_map, {98: ""color98"", 99: ""color99"", 1: ""#9987e1"", 2: ""#c6a8ad"", 3: ""#e79ab5"", 4: ""#bfd56f""}, ) used_colors = set(STREAM_ASSIGNMENT_COLORS) color_map = {} recipient_ids = [2, 26, 50, 74] user_color_map = pick_colors(used_colors, color_map, recipient_ids) self.assertEqual( user_color_map, {2: ""#a6c7e5"", 26: ""#a6c7e5"", 50: ""#a6c7e5"", 74: ""#a6c7e5""}, ) "," If we are assigning colors to a user with 24+ streams, we have to start re-using old colors. Our algorithm basically uses recipient_id % 24, so the following code reflects the worse case scenario that our new streams have recipient ids spaced out by exact multiples of 24. We don't try to work around this edge case, since users who really depend on the stream colors can always just assign themselves custom colors for the streams that they really want to stand out. Even if recipient_ids were completely random, the odds of collisions are low, but it's often the case that bulk-adds are done for streams that either were or are being created at roughly the same time, so the recipient_ids tend to have even fewer collisions. ",127,157,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_pick_colors(self) -> None: used_colors: Set[str] = set() color_map: Dict[int, str] = {} recipient_ids = list(range(30)) user_color_map = pick_colors(used_colors, color_map, recipient_ids) self.assertEqual( user_color_map, { 0: ""#76ce90"", 1: ""#fae589"", 2: ""#a6c7e5"", 3: ""#e79ab5"", 4: ""#bfd56f"", 5: ""#f4ae55"", 6: ""#b0a5fd"", 7: ""#addfe5"", 8: ""#f5ce6e"", 9: ""#c2726a"", 10: ""#94c849"", 11: ""#bd86e5"", 12: ""#ee7e4a"", 13: ""#a6dcbf"", 14: ""#95a5fd"", 15: ""#53a063"", 16: ""#9987e1"", 17: ""#e4523d"", 18: ""#c2c2c2"", 19: ""#4f8de4"", 20: ""#c6a8ad"", 21: ""#e7cc4d"", 22: ""#c8bebf"", 23: ""#a47462"", # start repeating 24: ""#76ce90"", 25: ""#fae589"", 26: ""#a6c7e5"", 27: ""#e79ab5"", 28: ""#bfd56f"", 29: ""#f4ae55"", }, ) color_map = {98: ""color98"", 99: ""color99""} used_colors = set(STREAM_ASSIGNMENT_COLORS) - {""#c6a8ad"", ""#9987e1""} recipient_ids = [99, 98, 1, 2, 3, 4] user_color_map = pick_colors(used_colors, color_map, recipient_ids) self.assertEqual( user_color_map, {98: ""color98"", 99: ""color99"", 1: ""#9987e1"", 2: ""#c6a8ad"", 3: ""#e79ab5"", 4: ""#bfd56f""}, ) used_colors = set(STREAM_ASSIGNMENT_COLORS) color_map = {} recipient_ids = [2, 26, 50, 74] user_color_map = pick_colors(used_colors, color_map, recipient_ids) self.assertEqual( user_color_map, {2: ""#a6c7e5"", 26: ""#a6c7e5"", 50: ""#a6c7e5"", 74: ""#a6c7e5""}, ) ``` ###Assistant : If we are assigning colors to a user with 24+ streams, we have to start re-using old colors. Our algorithm basically uses recipient_id % 24, so the following code reflects the worse case scenario that our new streams have recipient ids spaced out by exact multiples of 24. We don't try to work around this edge case, since users who really depend on the stream colors can always just assign themselves custom colors for the streams that they really want to stand out. Even if recipient_ids were completely random, the odds of collisions are low, but it's often the case that bulk-adds are done for streams that either were or are being created at roughly the same time, so the recipient_ids tend to have even fewer collisions. " 1614,"def test_import(self): data = ( ('name', 'slug', 'status', 'cf_text', 'cf_longtext', 'cf_integer', 'cf_boolean', 'cf_date', 'cf_url', 'cf_json', 'cf_select', 'cf_multiselect'), ('Site 1', 'site-1', 'active', 'ABC', 'Foo', '123', 'True', '2020-01-01', 'http://example.com/1', '{""foo"": 123}', 'Choice A', '""Choice A,Choice B""'), ('Site 2', 'site-2', 'active', 'DEF', 'Bar', '456', 'False', '2020-01-02', 'http://example.com/2', '{""bar"": 456}', 'Choice B', '""Choice B,Choice C""'), ('Site 3', 'site-3', 'active', '', '', '', '', '', '', '', '', ''), ) csv_data = '\n'.join(','.join(row) for row in data) response = self.client.post(reverse('dcim:site_import'), {'csv': csv_data}) self.assertEqual(response.status_code, 200) self.assertEqual(Site.objects.count(), 3) # Validate data for site 1 site1 = Site.objects.get(name='Site 1') self.assertEqual(len(site1.custom_field_data), 9) self.assertEqual(site1.custom_field_data['text'], 'ABC') self.assertEqual(site1.custom_field_data['longtext'], 'Foo') self.assertEqual(site1.custom_field_data['integer'], 123) self.assertEqual(site1.custom_field_data['boolean'], True) self.assertEqual(site1.custom_field_data['date'], '2020-01-01') self.assertEqual(site1.custom_field_data['url'], 'http://example.com/1') self.assertEqual(site1.custom_field_data['json'], {""foo"": 123}) self.assertEqual(site1.custom_field_data['select'], 'Choice A') self.assertEqual(site1.custom_field_data['multiselect'], ['Choice A', 'Choice B']) # Validate data for site 2 site2 = Site.objects.get(name='Site 2') self.assertEqual(len(site2.custom_field_data), 9) self.assertEqual(site2.custom_field_data['text'], 'DEF') self.assertEqual(site2.custom_field_data['longtext'], 'Bar') self.assertEqual(site2.custom_field_data['integer'], 456) self.assertEqual(site2.custom_field_data['boolean'], False) self.assertEqual(site2.custom_field_data['date'], '2020-01-02') self.assertEqual(site2.custom_field_data['url'], 'http://example.com/2') self.assertEqual(site2.custom_field_data['json'], {""bar"": 456}) self.assertEqual(site2.custom_field_data['select'], 'Choice B') self.assertEqual(site2.custom_field_data['multiselect'], ['Choice B', 'Choice C']) # No custom field data should be set for site 3 site3 = Site.objects.get(name='Site 3') self.assertFalse(any(site3.custom_field_data.values())) "," Import a Site in CSV format, including a value for each CustomField. ",12,167,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_import(self): data = ( ('name', 'slug', 'status', 'cf_text', 'cf_longtext', 'cf_integer', 'cf_boolean', 'cf_date', 'cf_url', 'cf_json', 'cf_select', 'cf_multiselect'), ('Site 1', 'site-1', 'active', 'ABC', 'Foo', '123', 'True', '2020-01-01', 'http://example.com/1', '{""foo"": 123}', 'Choice A', '""Choice A,Choice B""'), ('Site 2', 'site-2', 'active', 'DEF', 'Bar', '456', 'False', '2020-01-02', 'http://example.com/2', '{""bar"": 456}', 'Choice B', '""Choice B,Choice C""'), ('Site 3', 'site-3', 'active', '', '', '', '', '', '', '', '', ''), ) csv_data = '\n'.join(','.join(row) for row in data) response = self.client.post(reverse('dcim:site_import'), {'csv': csv_data}) self.assertEqual(response.status_code, 200) self.assertEqual(Site.objects.count(), 3) # Validate data for site 1 site1 = Site.objects.get(name='Site 1') self.assertEqual(len(site1.custom_field_data), 9) self.assertEqual(site1.custom_field_data['text'], 'ABC') self.assertEqual(site1.custom_field_data['longtext'], 'Foo') self.assertEqual(site1.custom_field_data['integer'], 123) self.assertEqual(site1.custom_field_data['boolean'], True) self.assertEqual(site1.custom_field_data['date'], '2020-01-01') self.assertEqual(site1.custom_field_data['url'], 'http://example.com/1') self.assertEqual(site1.custom_field_data['json'], {""foo"": 123}) self.assertEqual(site1.custom_field_data['select'], 'Choice A') self.assertEqual(site1.custom_field_data['multiselect'], ['Choice A', 'Choice B']) # Validate data for site 2 site2 = Site.objects.get(name='Site 2') self.assertEqual(len(site2.custom_field_data), 9) self.assertEqual(site2.custom_field_data['text'], 'DEF') self.assertEqual(site2.custom_field_data['longtext'], 'Bar') self.assertEqual(site2.custom_field_data['integer'], 456) self.assertEqual(site2.custom_field_data['boolean'], False) self.assertEqual(site2.custom_field_data['date'], '2020-01-02') self.assertEqual(site2.custom_field_data['url'], 'http://example.com/2') self.assertEqual(site2.custom_field_data['json'], {""bar"": 456}) self.assertEqual(site2.custom_field_data['select'], 'Choice B') self.assertEqual(site2.custom_field_data['multiselect'], ['Choice B', 'Choice C']) # No custom field data should be set for site 3 site3 = Site.objects.get(name='Site 3') self.assertFalse(any(site3.custom_field_data.values())) ``` ###Assistant : Import a Site in CSV format, including a value for each CustomField. " 1615,"def build_pattern(): #bare = set() for module, replace in list(MAPPING.items()): for old_attr, new_attr in list(replace.items()): LOOKUP[(module, old_attr)] = new_attr #bare.add(module) #bare.add(old_attr) #yield % (module, module) yield % (module, old_attr, old_attr) yield % (module, old_attr) #yield % alternates(bare) "," # import_name< 'import' (module=%r # | dotted_as_names< any* module=%r any* >) > # import_from< 'from' module_name=%r 'import' ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) > power< module_name=%r trailer< '.' attr_name=%r > any* > bare_name=%s",35,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def build_pattern(): #bare = set() for module, replace in list(MAPPING.items()): for old_attr, new_attr in list(replace.items()): LOOKUP[(module, old_attr)] = new_attr #bare.add(module) #bare.add(old_attr) #yield % (module, module) yield % (module, old_attr, old_attr) yield % (module, old_attr) #yield % alternates(bare) ``` ###Assistant : # import_name< 'import' (module=%r # | dotted_as_names< any* module=%r any* >) > # import_from< 'from' module_name=%r 'import' ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) > power< module_name=%r trailer< '.' attr_name=%r > any* > bare_name=%s " 1616,"async def async_refresh_sensor(self) -> None: _LOGGER.debug(""Refreshing library sensor for '%s'"", self.name) try: await self.hass.async_add_executor_job(self._update_state_and_attrs) self._attr_available = True except NotFound: self._attr_available = False except requests.exceptions.RequestException as err: _LOGGER.error( ""Could not update library sensor for '%s': %s"", self.library_section.title, err, ) self._attr_available = False self.async_write_ha_state() ",Update state and attributes for the library sensor.,8,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_refresh_sensor(self) -> None: _LOGGER.debug(""Refreshing library sensor for '%s'"", self.name) try: await self.hass.async_add_executor_job(self._update_state_and_attrs) self._attr_available = True except NotFound: self._attr_available = False except requests.exceptions.RequestException as err: _LOGGER.error( ""Could not update library sensor for '%s': %s"", self.library_section.title, err, ) self._attr_available = False self.async_write_ha_state() ``` ###Assistant : Update state and attributes for the library sensor. " 1617,"def __hash__(self): return hash(self.name) + hash(self.version) + hash(self.source_url) "," Compute hash in a way which matches the equality test. ",10,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __hash__(self): return hash(self.name) + hash(self.version) + hash(self.source_url) ``` ###Assistant : Compute hash in a way which matches the equality test. " 1618,"def _min_nodes(self) -> int: if self._side == ""gblock"": return self._config[""fc_gblock_min_nodes""] retval = self._scale_filters(self._config[""fc_min_filters""]) retval = int(retval * self._config[""fc_dimensions""] ** 2) return retval "," int: The number of nodes for the first Dense. For non g-block layers this will be the given minimum filters multiplied by the dimensions squared. For g-block layers, this is the given value ",33,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _min_nodes(self) -> int: if self._side == ""gblock"": return self._config[""fc_gblock_min_nodes""] retval = self._scale_filters(self._config[""fc_min_filters""]) retval = int(retval * self._config[""fc_dimensions""] ** 2) return retval ``` ###Assistant : int: The number of nodes for the first Dense. For non g-block layers this will be the given minimum filters multiplied by the dimensions squared. For g-block layers, this is the given value " 1619,"def _get(cls) -> dict: custom_parameters = super().get() result = cls.default.copy() result.update( {key.replace(""-"", ""_""): value for key, value in custom_parameters.items()} ) return result "," Get the resulted command-line options. Returns ------- dict Decoded and verified config value. ",13,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get(cls) -> dict: custom_parameters = super().get() result = cls.default.copy() result.update( {key.replace(""-"", ""_""): value for key, value in custom_parameters.items()} ) return result ``` ###Assistant : Get the resulted command-line options. Returns ------- dict Decoded and verified config value. " 1620,"def add(self, node, *predecessors): if self._ready_nodes is not None: raise ValueError(""Nodes cannot be added after a call to prepare()"") # Create the node -> predecessor edges nodeinfo = self._get_nodeinfo(node) nodeinfo.npredecessors += len(predecessors) # Create the predecessor -> node edges for pred in predecessors: pred_info = self._get_nodeinfo(pred) pred_info.successors.append(node) ","Add a new node and its predecessors to the graph. Both the *node* and all elements in *predecessors* must be hashable. If called multiple times with the same node argument, the set of dependencies will be the union of all dependencies passed in. It is possible to add a node with no dependencies (*predecessors* is not provided) as well as provide a dependency twice. If a node that has not been provided before is included among *predecessors* it will be automatically added to the graph with no predecessors of its own. Raises ValueError if called after ""prepare"". ",97,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add(self, node, *predecessors): if self._ready_nodes is not None: raise ValueError(""Nodes cannot be added after a call to prepare()"") # Create the node -> predecessor edges nodeinfo = self._get_nodeinfo(node) nodeinfo.npredecessors += len(predecessors) # Create the predecessor -> node edges for pred in predecessors: pred_info = self._get_nodeinfo(pred) pred_info.successors.append(node) ``` ###Assistant : Add a new node and its predecessors to the graph. Both the *node* and all elements in *predecessors* must be hashable. If called multiple times with the same node argument, the set of dependencies will be the union of all dependencies passed in. It is possible to add a node with no dependencies (*predecessors* is not provided) as well as provide a dependency twice. If a node that has not been provided before is included among *predecessors* it will be automatically added to the graph with no predecessors of its own. Raises ValueError if called after ""prepare"". " 1621,"def test_stream_slices_with_state_and_slices(self, api, async_manager_mock, start_date): end_date = start_date + duration(days=10) cursor_value = start_date + duration(days=5) state = { AdsInsights.cursor_field: cursor_value.date().isoformat(), ""slices"": [(cursor_value + duration(days=1)).date().isoformat(), (cursor_value + duration(days=3)).date().isoformat()], } stream = AdsInsights(api=api, start_date=start_date, end_date=end_date) async_manager_mock.completed_jobs.return_value = [1, 2, 3] slices = list(stream.stream_slices(stream_state=state, sync_mode=SyncMode.incremental)) assert slices == [{""insight_job"": 1}, {""insight_job"": 2}, {""insight_job"": 3}] async_manager_mock.assert_called_once() args, kwargs = async_manager_mock.call_args generated_jobs = list(kwargs[""jobs""]) assert len(generated_jobs) == (end_date - cursor_value).days - 2, ""should be 2 slices short because of state"" assert generated_jobs[0].interval.start == cursor_value.date() + duration(days=2) assert generated_jobs[1].interval.start == cursor_value.date() + duration(days=4) ","Stream will use cursor_value from state, but will skip saved slices",11,87,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_stream_slices_with_state_and_slices(self, api, async_manager_mock, start_date): end_date = start_date + duration(days=10) cursor_value = start_date + duration(days=5) state = { AdsInsights.cursor_field: cursor_value.date().isoformat(), ""slices"": [(cursor_value + duration(days=1)).date().isoformat(), (cursor_value + duration(days=3)).date().isoformat()], } stream = AdsInsights(api=api, start_date=start_date, end_date=end_date) async_manager_mock.completed_jobs.return_value = [1, 2, 3] slices = list(stream.stream_slices(stream_state=state, sync_mode=SyncMode.incremental)) assert slices == [{""insight_job"": 1}, {""insight_job"": 2}, {""insight_job"": 3}] async_manager_mock.assert_called_once() args, kwargs = async_manager_mock.call_args generated_jobs = list(kwargs[""jobs""]) assert len(generated_jobs) == (end_date - cursor_value).days - 2, ""should be 2 slices short because of state"" assert generated_jobs[0].interval.start == cursor_value.date() + duration(days=2) assert generated_jobs[1].interval.start == cursor_value.date() + duration(days=4) ``` ###Assistant : Stream will use cursor_value from state, but will skip saved slices " 1622,"def _test_readonly_foreignkey_links(self, admin_site): chapter = Chapter.objects.create( title=""Chapter 1"", content=""content"", book=Book.objects.create(name=""Book 1""), ) language = Language.objects.create(iso=""_40"", name=""Test"") obj = ReadOnlyRelatedField.objects.create( chapter=chapter, language=language, user=self.superuser, ) response = self.client.get( reverse( f""{admin_site}:admin_views_readonlyrelatedfield_change"", args=(obj.pk,) ), ) # Related ForeignKey object registered in admin. user_url = reverse(f""{admin_site}:auth_user_change"", args=(self.superuser.pk,)) self.assertContains( response, '' % user_url, html=True, ) # Related ForeignKey with the string primary key registered in admin. language_url = reverse( f""{admin_site}:admin_views_language_change"", args=(quote(language.pk),), ) self.assertContains( response, '' % language_url, html=True, ) # Related ForeignKey object not registered in admin. self.assertContains( response, '
    Chapter 1
    ', html=True ) "," ForeignKey readonly fields render as links if the target model is registered in admin. ",14,92,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _test_readonly_foreignkey_links(self, admin_site): chapter = Chapter.objects.create( title=""Chapter 1"", content=""content"", book=Book.objects.create(name=""Book 1""), ) language = Language.objects.create(iso=""_40"", name=""Test"") obj = ReadOnlyRelatedField.objects.create( chapter=chapter, language=language, user=self.superuser, ) response = self.client.get( reverse( f""{admin_site}:admin_views_readonlyrelatedfield_change"", args=(obj.pk,) ), ) # Related ForeignKey object registered in admin. user_url = reverse(f""{admin_site}:auth_user_change"", args=(self.superuser.pk,)) self.assertContains( response, '' % user_url, html=True, ) # Related ForeignKey with the string primary key registered in admin. language_url = reverse( f""{admin_site}:admin_views_language_change"", args=(quote(language.pk),), ) self.assertContains( response, '' % language_url, html=True, ) # Related ForeignKey object not registered in admin. self.assertContains( response, '
    Chapter 1
    ', html=True ) ``` ###Assistant : ForeignKey readonly fields render as links if the target model is registered in admin. " 1623,"def execute(): for project in frappe.get_all(""Project"", fields=[""name"", ""percent_complete_method""]): total = frappe.db.count(""Task"", dict(project=project.name)) if project.percent_complete_method == ""Task Completion"" and total > 0: completed = frappe.db.sql( , project.name, )[0][0] percent_complete = flt(flt(completed) / total * 100, 2) if project.percent_complete != percent_complete: frappe.db.set_value(""Project"", project.name, ""percent_complete"", percent_complete) if percent_complete == 100: frappe.db.set_value(""Project"", project.name, ""status"", ""Completed"") ","select count(name) from tabTask where project=%s and status in ('Cancelled', 'Completed')",11,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def execute(): for project in frappe.get_all(""Project"", fields=[""name"", ""percent_complete_method""]): total = frappe.db.count(""Task"", dict(project=project.name)) if project.percent_complete_method == ""Task Completion"" and total > 0: completed = frappe.db.sql( , project.name, )[0][0] percent_complete = flt(flt(completed) / total * 100, 2) if project.percent_complete != percent_complete: frappe.db.set_value(""Project"", project.name, ""percent_complete"", percent_complete) if percent_complete == 100: frappe.db.set_value(""Project"", project.name, ""status"", ""Completed"") ``` ###Assistant : select count(name) from tabTask where project=%s and status in ('Cancelled', 'Completed') " 1624,"def mathematica(s, additional_translations=None): parser = MathematicaParser(additional_translations) if additional_translations is not None: SymPyDeprecationWarning( feature=""additional_translations parameter for the Mathematica parser"", last_supported_version=""1.9"", useinstead=""Use SymPy's .replace( ) or .subs( ) methods on the output expression"", issue=""23042"", ).warn() return sympify(parser._parse_old(s)) return parser.parse(s) "," Translate a string containing a Wolfram Mathematica expression to a SymPy expression. If the translator is unable to find a suitable SymPy expression, the ``FullForm`` of the Mathematica expression will be output, using SymPy ``Function`` objects as nodes of the syntax tree. Examples ======== >>> from sympy.parsing.mathematica import mathematica >>> mathematica(""Sin[x]^2 Tan[y]"") sin(x)**2*tan(y) >>> e = mathematica(""F[7,5,3]"") >>> e F(7, 5, 3) >>> from sympy import Function, Max, Min >>> e.replace(Function(""F""), lambda *x: Max(*x)*Min(*x)) 21 Both standard input form and Mathematica full form are supported: >>> mathematica(""x*(a + b)"") x*(a + b) >>> mathematica(""Times[x, Plus[a, b]]"") x*(a + b) To get a matrix from Wolfram's code: >>> m = mathematica(""{{a, b}, {c, d}}"") >>> m ((a, b), (c, d)) >>> from sympy import Matrix >>> Matrix(m) Matrix([ [a, b], [c, d]]) If the translation into equivalent SymPy expressions fails, an SymPy expression equivalent to Wolfram Mathematica's ""FullForm"" will be created: >>> mathematica(""x_."") Optional(Pattern(x, Blank())) >>> mathematica(""Plus @@ {x, y, z}"") Apply(Plus, (x, y, z)) >>> mathematica(""f[x_, 3] := x^3 /; x > 0"") SetDelayed(f(Pattern(x, Blank()), 3), Condition(x**3, x > 0)) ",180,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mathematica(s, additional_translations=None): parser = MathematicaParser(additional_translations) if additional_translations is not None: SymPyDeprecationWarning( feature=""additional_translations parameter for the Mathematica parser"", last_supported_version=""1.9"", useinstead=""Use SymPy's .replace( ) or .subs( ) methods on the output expression"", issue=""23042"", ).warn() return sympify(parser._parse_old(s)) return parser.parse(s) ``` ###Assistant : Translate a string containing a Wolfram Mathematica expression to a SymPy expression. If the translator is unable to find a suitable SymPy expression, the ``FullForm`` of the Mathematica expression will be output, using SymPy ``Function`` objects as nodes of the syntax tree. Examples ======== >>> from sympy.parsing.mathematica import mathematica >>> mathematica(""Sin[x]^2 Tan[y]"") sin(x)**2*tan(y) >>> e = mathematica(""F[7,5,3]"") >>> e F(7, 5, 3) >>> from sympy import Function, Max, Min >>> e.replace(Function(""F""), lambda *x: Max(*x)*Min(*x)) 21 Both standard input form and Mathematica full form are supported: >>> mathematica(""x*(a + b)"") x*(a + b) >>> mathematica(""Times[x, Plus[a, b]]"") x*(a + b) To get a matrix from Wolfram's code: >>> m = mathematica(""{{a, b}, {c, d}}"") >>> m ((a, b), (c, d)) >>> from sympy import Matrix >>> Matrix(m) Matrix([ [a, b], [c, d]]) If the translation into equivalent SymPy expressions fails, an SymPy expression equivalent to Wolfram Mathematica's ""FullForm"" will be created: >>> mathematica(""x_."") Optional(Pattern(x, Blank())) >>> mathematica(""Plus @@ {x, y, z}"") Apply(Plus, (x, y, z)) >>> mathematica(""f[x_, 3] := x^3 /; x > 0"") SetDelayed(f(Pattern(x, Blank()), 3), Condition(x**3, x > 0)) " 1625,"def update_company_current_month_sales(company): current_month_year = formatdate(today(), ""MM-yyyy"") results = frappe.db.sql( .format( current_month_year=current_month_year, company=frappe.db.escape(company) ), as_dict=True, ) monthly_total = results[0][""total""] if len(results) > 0 else 0 frappe.db.set_value(""Company"", company, ""total_monthly_sales"", monthly_total) "," SELECT SUM(base_grand_total) AS total, DATE_FORMAT(`posting_date`, '%m-%Y') AS month_year FROM `tabSales Invoice` WHERE DATE_FORMAT(`posting_date`, '%m-%Y') = '{current_month_year}' AND docstatus = 1 AND company = {company} GROUP BY month_year ",27,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update_company_current_month_sales(company): current_month_year = formatdate(today(), ""MM-yyyy"") results = frappe.db.sql( .format( current_month_year=current_month_year, company=frappe.db.escape(company) ), as_dict=True, ) monthly_total = results[0][""total""] if len(results) > 0 else 0 frappe.db.set_value(""Company"", company, ""total_monthly_sales"", monthly_total) ``` ###Assistant : SELECT SUM(base_grand_total) AS total, DATE_FORMAT(`posting_date`, '%m-%Y') AS month_year FROM `tabSales Invoice` WHERE DATE_FORMAT(`posting_date`, '%m-%Y') = '{current_month_year}' AND docstatus = 1 AND company = {company} GROUP BY month_year " 1626,"def metadata_version(self): # type: () -> Optional[str] raise NotImplementedError() ","Value of ""Metadata-Version:"" in the distribution, if available.",8,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def metadata_version(self): # type: () -> Optional[str] raise NotImplementedError() ``` ###Assistant : Value of ""Metadata-Version:"" in the distribution, if available. " 1627,"def call_ca(self, _): if self.screen_tickers: self.queue = ca_controller.ComparisonAnalysisController( self.screen_tickers, self.queue ).menu(custom_path_menu_above=""/stocks/"") else: print(""Some tickers must be screened first through one of the presets!\n"") ",Call the comparison analysis menu with selected tickers,8,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call_ca(self, _): if self.screen_tickers: self.queue = ca_controller.ComparisonAnalysisController( self.screen_tickers, self.queue ).menu(custom_path_menu_above=""/stocks/"") else: print(""Some tickers must be screened first through one of the presets!\n"") ``` ###Assistant : Call the comparison analysis menu with selected tickers " 1628,"def delete_tasks_predictions(project, queryset, **kwargs): task_ids = queryset.values_list('id', flat=True) predictions = Prediction.objects.filter(task__id__in=task_ids) count = predictions.count() predictions.delete() queryset.update(updated_at=datetime.now()) return {'processed_items': count, 'detail': 'Deleted ' + str(count) + ' predictions'} actions = [ { 'entry_point': retrieve_tasks_predictions, 'permission': all_permissions.predictions_any, 'title': 'Retrieve Predictions', 'order': 90, 'dialog': { 'text': 'Send the selected tasks to all ML backends connected to the project.' 'This operation might be abruptly interrupted due to a timeout. ' 'The recommended way to get predictions is to update tasks using the Label Studio API.' 'See more in the documentation.' 'Please confirm your action.', 'type': 'confirm' } }, { 'entry_point': delete_tasks, 'permission': all_permissions.tasks_delete, 'title': 'Delete Tasks', 'order': 100, 'reload': True, 'dialog': { 'text': 'You are going to delete the selected tasks. Please confirm your action.', 'type': 'confirm' } }, { 'entry_point': delete_tasks_annotations, 'permission': all_permissions.tasks_delete, 'title': 'Delete Annotations', 'order': 101, 'dialog': { 'text': 'You are going to delete all annotations from the selected tasks. Please confirm your action.', 'type': 'confirm' } }, { 'entry_point': delete_tasks_predictions, 'permission': all_permissions.predictions_any, 'title': 'Delete Predictions', 'order': 102, 'dialog': { 'text': 'You are going to delete all predictions from the selected tasks. Please confirm your action.', 'type': 'confirm' } } ] "," Delete all predictions by tasks ids :param project: project instance :param queryset: filtered tasks db queryset ",16,191,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def delete_tasks_predictions(project, queryset, **kwargs): task_ids = queryset.values_list('id', flat=True) predictions = Prediction.objects.filter(task__id__in=task_ids) count = predictions.count() predictions.delete() queryset.update(updated_at=datetime.now()) return {'processed_items': count, 'detail': 'Deleted ' + str(count) + ' predictions'} actions = [ { 'entry_point': retrieve_tasks_predictions, 'permission': all_permissions.predictions_any, 'title': 'Retrieve Predictions', 'order': 90, 'dialog': { 'text': 'Send the selected tasks to all ML backends connected to the project.' 'This operation might be abruptly interrupted due to a timeout. ' 'The recommended way to get predictions is to update tasks using the Label Studio API.' 'See more in the documentation.' 'Please confirm your action.', 'type': 'confirm' } }, { 'entry_point': delete_tasks, 'permission': all_permissions.tasks_delete, 'title': 'Delete Tasks', 'order': 100, 'reload': True, 'dialog': { 'text': 'You are going to delete the selected tasks. Please confirm your action.', 'type': 'confirm' } }, { 'entry_point': delete_tasks_annotations, 'permission': all_permissions.tasks_delete, 'title': 'Delete Annotations', 'order': 101, 'dialog': { 'text': 'You are going to delete all annotations from the selected tasks. Please confirm your action.', 'type': 'confirm' } }, { 'entry_point': delete_tasks_predictions, 'permission': all_permissions.predictions_any, 'title': 'Delete Predictions', 'order': 102, 'dialog': { 'text': 'You are going to delete all predictions from the selected tasks. Please confirm your action.', 'type': 'confirm' } } ] ``` ###Assistant : Delete all predictions by tasks ids :param project: project instance :param queryset: filtered tasks db queryset " 1629,"def clean_up(self): if not self._cleaned: for ref in self.referenced_paths(): self._reference_counter[ref] -= 1 if self._reference_counter[ref] <= 0: os.remove(ref) if self._reference_counter[ref] < 0: _logger.warning('Referance counter error, the number of %s is %d', ref, self._reference_counter[ref]) self._cleaned = True else: _logger.warning('Already clean up task %d', self.task_id) "," Counter of referenced file paths subtract 1. If the counter reach 0, then delete the file. ",16,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def clean_up(self): if not self._cleaned: for ref in self.referenced_paths(): self._reference_counter[ref] -= 1 if self._reference_counter[ref] <= 0: os.remove(ref) if self._reference_counter[ref] < 0: _logger.warning('Referance counter error, the number of %s is %d', ref, self._reference_counter[ref]) self._cleaned = True else: _logger.warning('Already clean up task %d', self.task_id) ``` ###Assistant : Counter of referenced file paths subtract 1. If the counter reach 0, then delete the file. " 1630,"def load_backend(backend_name): # This backend was renamed in Django 1.9. if backend_name == ""django.db.backends.postgresql_psycopg2"": backend_name = ""django.db.backends.postgresql"" try: return import_module(""%s.base"" % backend_name) except ImportError as e_user: # The database backend wasn't found. Display a helpful error message # listing all built-in database backends. import django.db.backends builtin_backends = [ name for _, name, ispkg in pkgutil.iter_modules(django.db.backends.__path__) if ispkg and name not in {""base"", ""dummy""} ] if backend_name not in [""django.db.backends.%s"" % b for b in builtin_backends]: backend_reprs = map(repr, sorted(builtin_backends)) raise ImproperlyConfigured( ""%r isn't an available database backend or couldn't be "" ""imported. Check the above exception. To use one of the "" ""built-in backends, use 'django.db.backends.XXX', where XXX "" ""is one of:\n"" "" %s"" % (backend_name, "", "".join(backend_reprs)) ) from e_user else: # If there's some other error, this must be an error in Django raise "," Return a database backend's ""base"" module given a fully qualified database backend name, or raise an error if it doesn't exist. ",21,136,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_backend(backend_name): # This backend was renamed in Django 1.9. if backend_name == ""django.db.backends.postgresql_psycopg2"": backend_name = ""django.db.backends.postgresql"" try: return import_module(""%s.base"" % backend_name) except ImportError as e_user: # The database backend wasn't found. Display a helpful error message # listing all built-in database backends. import django.db.backends builtin_backends = [ name for _, name, ispkg in pkgutil.iter_modules(django.db.backends.__path__) if ispkg and name not in {""base"", ""dummy""} ] if backend_name not in [""django.db.backends.%s"" % b for b in builtin_backends]: backend_reprs = map(repr, sorted(builtin_backends)) raise ImproperlyConfigured( ""%r isn't an available database backend or couldn't be "" ""imported. Check the above exception. To use one of the "" ""built-in backends, use 'django.db.backends.XXX', where XXX "" ""is one of:\n"" "" %s"" % (backend_name, "", "".join(backend_reprs)) ) from e_user else: # If there's some other error, this must be an error in Django raise ``` ###Assistant : Return a database backend's ""base"" module given a fully qualified database backend name, or raise an error if it doesn't exist. " 1631,"def explode_superdims(sizes, dims): strides_to_sizes = {stride: size for size, stride in zip(sizes, strides_for_sizes(sizes))} dims = list(reversed(dims)) final_dims = [] for size, stride in dims: target_size = strides_to_sizes[stride] new_dims = [] while size > target_size: assert target_size > 1 # Ensure progress assert size % target_size == 0 new_dims.append((target_size, stride)) size //= target_size stride *= target_size target_size = strides_to_sizes[stride] assert size == target_size new_dims.append((size, stride)) final_dims += reversed(new_dims) return final_dims ","Explode superdims to fit a known shape. The unflattening process might mistakenly generate too few too large dimensions. For example, ``unflatten_superdims(np.arange(n))`` always returns ``[(n, 1)]``. This function takes a list of such contiguous super-dimensions and splits them into smaller dimensions such that:: set(map(fst, explode_superdims(sizes, dims))) == set(sizes) ",47,69,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def explode_superdims(sizes, dims): strides_to_sizes = {stride: size for size, stride in zip(sizes, strides_for_sizes(sizes))} dims = list(reversed(dims)) final_dims = [] for size, stride in dims: target_size = strides_to_sizes[stride] new_dims = [] while size > target_size: assert target_size > 1 # Ensure progress assert size % target_size == 0 new_dims.append((target_size, stride)) size //= target_size stride *= target_size target_size = strides_to_sizes[stride] assert size == target_size new_dims.append((size, stride)) final_dims += reversed(new_dims) return final_dims ``` ###Assistant : Explode superdims to fit a known shape. The unflattening process might mistakenly generate too few too large dimensions. For example, ``unflatten_superdims(np.arange(n))`` always returns ``[(n, 1)]``. This function takes a list of such contiguous super-dimensions and splits them into smaller dimensions such that:: set(map(fst, explode_superdims(sizes, dims))) == set(sizes) " 1632,"def path_weight(G, path, weight): multigraph = G.is_multigraph() cost = 0 if not nx.is_path(G, path): raise nx.NetworkXNoPath(""path does not exist"") for node, nbr in nx.utils.pairwise(path): if multigraph: cost += min(v[weight] for v in G[node][nbr].values()) else: cost += G[node][nbr][weight] return cost ","Returns total cost associated with specified path and weight Parameters ---------- G : graph A NetworkX graph. path: list A list of node labels which defines the path to traverse weight: string A string indicating which edge attribute to use for path cost Returns ------- cost: int or float An integer or a float representing the total cost with respect to the specified weight of the specified path Raises ------ NetworkXNoPath If the specified edge does not exist. ",78,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def path_weight(G, path, weight): multigraph = G.is_multigraph() cost = 0 if not nx.is_path(G, path): raise nx.NetworkXNoPath(""path does not exist"") for node, nbr in nx.utils.pairwise(path): if multigraph: cost += min(v[weight] for v in G[node][nbr].values()) else: cost += G[node][nbr][weight] return cost ``` ###Assistant : Returns total cost associated with specified path and weight Parameters ---------- G : graph A NetworkX graph. path: list A list of node labels which defines the path to traverse weight: string A string indicating which edge attribute to use for path cost Returns ------- cost: int or float An integer or a float representing the total cost with respect to the specified weight of the specified path Raises ------ NetworkXNoPath If the specified edge does not exist. " 1633,"def _run_sql(self, sql, params, raw=True, output=False, latest=False): toget = 'source_raw' if raw else 'source' sqlfrom = ""history"" if output: sqlfrom = ""history LEFT JOIN output_history USING (session, line)"" toget = ""history.%s, output_history.output"" % toget if latest: toget += "", MAX(session * 128 * 1024 + line)"" this_querry = ""SELECT session, line, %s FROM %s "" % (toget, sqlfrom) + sql cur = self.db.execute(this_querry, params) if latest: cur = (row[:-1] for row in cur) if output: # Regroup into 3-tuples, and parse JSON return ((ses, lin, (inp, out)) for ses, lin, inp, out in cur) return cur ","Prepares and runs an SQL query for the history database. Parameters ---------- sql : str Any filtering expressions to go after SELECT ... FROM ... params : tuple Parameters passed to the SQL query (to replace ""?"") raw, output : bool See :meth:`get_range` latest : bool Select rows with max (session, line) Returns ------- Tuples as :meth:`get_range` ",57,96,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _run_sql(self, sql, params, raw=True, output=False, latest=False): toget = 'source_raw' if raw else 'source' sqlfrom = ""history"" if output: sqlfrom = ""history LEFT JOIN output_history USING (session, line)"" toget = ""history.%s, output_history.output"" % toget if latest: toget += "", MAX(session * 128 * 1024 + line)"" this_querry = ""SELECT session, line, %s FROM %s "" % (toget, sqlfrom) + sql cur = self.db.execute(this_querry, params) if latest: cur = (row[:-1] for row in cur) if output: # Regroup into 3-tuples, and parse JSON return ((ses, lin, (inp, out)) for ses, lin, inp, out in cur) return cur ``` ###Assistant : Prepares and runs an SQL query for the history database. Parameters ---------- sql : str Any filtering expressions to go after SELECT ... FROM ... params : tuple Parameters passed to the SQL query (to replace ""?"") raw, output : bool See :meth:`get_range` latest : bool Select rows with max (session, line) Returns ------- Tuples as :meth:`get_range` " 1634,"def __add__(self, other): if isinstance(other, PathSpec): return PathSpec(self.patterns + other.patterns) else: return NotImplemented "," Combines the :attr:`Pathspec.patterns` patterns from two :class:`PathSpec` instances. ",8,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __add__(self, other): if isinstance(other, PathSpec): return PathSpec(self.patterns + other.patterns) else: return NotImplemented ``` ###Assistant : Combines the :attr:`Pathspec.patterns` patterns from two :class:`PathSpec` instances. " 1635,"def address(self): if use_gcs_for_bootstrap(): return self._gcs_address return self._redis_address ","Get the address for bootstrapping, e.g. the address to pass to `ray start` or `ray.int()` to start worker nodes, that has been converted to ip:port format. ",26,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def address(self): if use_gcs_for_bootstrap(): return self._gcs_address return self._redis_address ``` ###Assistant : Get the address for bootstrapping, e.g. the address to pass to `ray start` or `ray.int()` to start worker nodes, that has been converted to ip:port format. " 1636,"def similarity(self, texts=[], data={}, use_gpu=False, batch_size=1): if use_gpu: try: _places = os.environ[""CUDA_VISIBLE_DEVICES""] int(_places[0]) except: raise RuntimeError( ""Environment Variable CUDA_VISIBLE_DEVICES is not set correctly. If you wanna use gpu, please set CUDA_VISIBLE_DEVICES as cuda_device_id."" ) data = self.check_data(texts, data) start_idx = 0 iteration = int(math.ceil(len(data['text_1']) / batch_size)) results = [] for i in range(iteration): batch_data = {'text_1': [], 'text_2': []} if i < (iteration - 1): batch_data['text_1'] = data['text_1'][start_idx:(start_idx + batch_size)] batch_data['text_2'] = data['text_2'][start_idx:(start_idx + batch_size)] else: batch_data['text_1'] = data['text_1'][start_idx:(start_idx + batch_size)] batch_data['text_2'] = data['text_2'][start_idx:(start_idx + batch_size)] start_idx = start_idx + batch_size processed_results = preprocess(self.word_seg_module, self.vocab, batch_data, use_gpu, batch_size) data_1, lod_1, shape_1 = self._texts_process(processed_results[""text_1""]) data_2, lod_2, shape_2 = self._texts_process(processed_results[""text_2""]) predictor = self.gpu_predictor if use_gpu else self.cpu_predictor input_names = predictor.get_input_names() input_handle = predictor.get_input_handle(input_names[0]) input_handle.copy_from_cpu(data_1) input_handle.set_lod(lod_1) input_handle.reshape(shape_1) input_handle = predictor.get_input_handle(input_names[1]) input_handle.copy_from_cpu(data_2) input_handle.set_lod(lod_2) input_handle.reshape(shape_2) predictor.run() output_names = predictor.get_output_names() output_handle = predictor.get_output_handle(output_names[1]) batch_out = output_handle.copy_to_cpu() batch_result = postprocess(batch_out, processed_results) results += batch_result return results "," Get the sentiment prediction results results with the texts as input Args: texts(list): the input texts to be predicted which the first element is text_1(list) and the second element is text_2(list), such as [['这道题很难'], ['这道题不简单']] if texts not data. data(dict): key must be 'text_1' and 'text_2', value is the texts(list) to be predicted use_gpu(bool): whether use gpu to predict or not batch_size(int): the program deals once with one batch Returns: results(list): the word segmentation results ",75,149,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def similarity(self, texts=[], data={}, use_gpu=False, batch_size=1): if use_gpu: try: _places = os.environ[""CUDA_VISIBLE_DEVICES""] int(_places[0]) except: raise RuntimeError( ""Environment Variable CUDA_VISIBLE_DEVICES is not set correctly. If you wanna use gpu, please set CUDA_VISIBLE_DEVICES as cuda_device_id."" ) data = self.check_data(texts, data) start_idx = 0 iteration = int(math.ceil(len(data['text_1']) / batch_size)) results = [] for i in range(iteration): batch_data = {'text_1': [], 'text_2': []} if i < (iteration - 1): batch_data['text_1'] = data['text_1'][start_idx:(start_idx + batch_size)] batch_data['text_2'] = data['text_2'][start_idx:(start_idx + batch_size)] else: batch_data['text_1'] = data['text_1'][start_idx:(start_idx + batch_size)] batch_data['text_2'] = data['text_2'][start_idx:(start_idx + batch_size)] start_idx = start_idx + batch_size processed_results = preprocess(self.word_seg_module, self.vocab, batch_data, use_gpu, batch_size) data_1, lod_1, shape_1 = self._texts_process(processed_results[""text_1""]) data_2, lod_2, shape_2 = self._texts_process(processed_results[""text_2""]) predictor = self.gpu_predictor if use_gpu else self.cpu_predictor input_names = predictor.get_input_names() input_handle = predictor.get_input_handle(input_names[0]) input_handle.copy_from_cpu(data_1) input_handle.set_lod(lod_1) input_handle.reshape(shape_1) input_handle = predictor.get_input_handle(input_names[1]) input_handle.copy_from_cpu(data_2) input_handle.set_lod(lod_2) input_handle.reshape(shape_2) predictor.run() output_names = predictor.get_output_names() output_handle = predictor.get_output_handle(output_names[1]) batch_out = output_handle.copy_to_cpu() batch_result = postprocess(batch_out, processed_results) results += batch_result return results ``` ###Assistant : Get the sentiment prediction results results with the texts as input Args: texts(list): the input texts to be predicted which the first element is text_1(list) and the second element is text_2(list), such as [['这道题很难'], ['这道题不简单']] if texts not data. data(dict): key must be 'text_1' and 'text_2', value is the texts(list) to be predicted use_gpu(bool): whether use gpu to predict or not batch_size(int): the program deals once with one batch Returns: results(list): the word segmentation results " 1637,"def cli_collect_weights(sys_argv): parser = argparse.ArgumentParser( description=""This script loads a pretrained model "" ""and uses it collect weights."", prog=""ludwig collect_weights"", usage=""%(prog)s [options]"", ) # ---------------- # Model parameters # ---------------- parser.add_argument(""-m"", ""--model_path"", help=""model to load"", required=True) parser.add_argument(""-t"", ""--tensors"", help=""tensors to collect"", nargs=""+"", required=True) # ------------------------- # Output results parameters # ------------------------- parser.add_argument( ""-od"", ""--output_directory"", type=str, default=""results"", help=""directory that contains the results"" ) # ------------------ # Runtime parameters # ------------------ parser.add_argument( ""-l"", ""--logging_level"", default=""info"", help=""the level of logging to use"", choices=[""critical"", ""error"", ""warning"", ""info"", ""debug"", ""notset""], ) add_contrib_callback_args(parser) args = parser.parse_args(sys_argv) args.callbacks = args.callbacks or [] for callback in args.callbacks: callback.on_cmdline(""collect_weights"", *sys_argv) args.logging_level = get_logging_level_registry()[args.logging_level] logging.getLogger(""ludwig"").setLevel(args.logging_level) global logger logger = logging.getLogger(""ludwig.collect"") print_ludwig(""Collect Weights"", LUDWIG_VERSION) collect_weights(**vars(args)) ","Command Line Interface to collecting the weights for the model. --m: Input model that is necessary to collect to the tensors, this is a required *option* --t: Tensors to collect --od: Output directory of the model, defaults to results --v: Verbose: Defines the logging level that the user will be exposed to ",52,113,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cli_collect_weights(sys_argv): parser = argparse.ArgumentParser( description=""This script loads a pretrained model "" ""and uses it collect weights."", prog=""ludwig collect_weights"", usage=""%(prog)s [options]"", ) # ---------------- # Model parameters # ---------------- parser.add_argument(""-m"", ""--model_path"", help=""model to load"", required=True) parser.add_argument(""-t"", ""--tensors"", help=""tensors to collect"", nargs=""+"", required=True) # ------------------------- # Output results parameters # ------------------------- parser.add_argument( ""-od"", ""--output_directory"", type=str, default=""results"", help=""directory that contains the results"" ) # ------------------ # Runtime parameters # ------------------ parser.add_argument( ""-l"", ""--logging_level"", default=""info"", help=""the level of logging to use"", choices=[""critical"", ""error"", ""warning"", ""info"", ""debug"", ""notset""], ) add_contrib_callback_args(parser) args = parser.parse_args(sys_argv) args.callbacks = args.callbacks or [] for callback in args.callbacks: callback.on_cmdline(""collect_weights"", *sys_argv) args.logging_level = get_logging_level_registry()[args.logging_level] logging.getLogger(""ludwig"").setLevel(args.logging_level) global logger logger = logging.getLogger(""ludwig.collect"") print_ludwig(""Collect Weights"", LUDWIG_VERSION) collect_weights(**vars(args)) ``` ###Assistant : Command Line Interface to collecting the weights for the model. --m: Input model that is necessary to collect to the tensors, this is a required *option* --t: Tensors to collect --od: Output directory of the model, defaults to results --v: Verbose: Defines the logging level that the user will be exposed to " 1638,"def get_user_emails_from_group(group): group_doc = group if isinstance(group_doc, str): group_doc = frappe.get_doc(""Daily Work Summary Group"", group) emails = get_users_email(group_doc) return emails ","Returns list of email of enabled users from the given group :param group: Daily Work Summary Group `name`",18,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_user_emails_from_group(group): group_doc = group if isinstance(group_doc, str): group_doc = frappe.get_doc(""Daily Work Summary Group"", group) emails = get_users_email(group_doc) return emails ``` ###Assistant : Returns list of email of enabled users from the given group :param group: Daily Work Summary Group `name` " 1639,"def adapt_unknown_value(self, value): if isinstance(value, datetime.datetime): # must be before date return self.adapt_datetimefield_value(value) elif isinstance(value, datetime.date): return self.adapt_datefield_value(value) elif isinstance(value, datetime.time): return self.adapt_timefield_value(value) elif isinstance(value, decimal.Decimal): return self.adapt_decimalfield_value(value) else: return value "," Transform a value to something compatible with the backend driver. This method only depends on the type of the value. It's designed for cases where the target type isn't known, such as .raw() SQL queries. As a consequence it may not work perfectly in all circumstances. ",46,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def adapt_unknown_value(self, value): if isinstance(value, datetime.datetime): # must be before date return self.adapt_datetimefield_value(value) elif isinstance(value, datetime.date): return self.adapt_datefield_value(value) elif isinstance(value, datetime.time): return self.adapt_timefield_value(value) elif isinstance(value, decimal.Decimal): return self.adapt_decimalfield_value(value) else: return value ``` ###Assistant : Transform a value to something compatible with the backend driver. This method only depends on the type of the value. It's designed for cases where the target type isn't known, such as .raw() SQL queries. As a consequence it may not work perfectly in all circumstances. " 1640,"def test_get_settings_variable_assignment_request_context(self): request = self.get_request(site=self.other_site) context = Context({""request"": request}) template = Template( ""{% load wagtailsettings_tags %}"" ""{% get_settings as wagtail_settings %}"" ""{{ wagtail_settings.tests.testsetting.title}}"" ) self.assertEqual(template.render(context), self.other_site_settings.title) # Also check that the default 'settings' variable hasn't been set template = Template( ""{% load wagtailsettings_tags %}"" ""{% get_settings as wagtail_settings %}"" ""{{ settings.tests.testsetting.title}}"" ) self.assertEqual(template.render(context), """") "," Check that assigning the setting to a context variable with {% get_settings as wagtail_settings %} works. ",16,54,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_settings_variable_assignment_request_context(self): request = self.get_request(site=self.other_site) context = Context({""request"": request}) template = Template( ""{% load wagtailsettings_tags %}"" ""{% get_settings as wagtail_settings %}"" ""{{ wagtail_settings.tests.testsetting.title}}"" ) self.assertEqual(template.render(context), self.other_site_settings.title) # Also check that the default 'settings' variable hasn't been set template = Template( ""{% load wagtailsettings_tags %}"" ""{% get_settings as wagtail_settings %}"" ""{{ settings.tests.testsetting.title}}"" ) self.assertEqual(template.render(context), """") ``` ###Assistant : Check that assigning the setting to a context variable with {% get_settings as wagtail_settings %} works. " 1641,"def get_dataset(cls, path, engine, storage_options): if engine == ""auto"": # We follow in concordance with pandas engine_classes = [PyArrowDataset, FastParquetDataset] error_msgs = """" for engine_class in engine_classes: try: return engine_class(path, storage_options) except ImportError as err: error_msgs += ""\n - "" + str(err) raise ImportError( ""Unable to find a usable engine; "" + ""tried using: 'pyarrow', 'fastparquet'.\n"" + ""A suitable version of "" + ""pyarrow or fastparquet is required for parquet "" + ""support.\n"" + ""Trying to import the above resulted in these errors:"" + f""{error_msgs}"" ) elif engine == ""pyarrow"": return PyArrowDataset(path, storage_options) elif engine == ""fastparquet"": return FastParquetDataset(path, storage_options) else: raise ValueError(""engine must be one of 'pyarrow', 'fastparquet'"") "," Retrieve Parquet engine specific Dataset implementation. Parameters ---------- path : str, path object or file-like object The filepath of the parquet file in local filesystem or hdfs. engine : str Parquet library to use (only 'PyArrow' is supported for now). storage_options : dict Parameters for specific storage engine. Returns ------- Dataset Either a PyArrowDataset or FastParquetDataset object. ",57,109,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_dataset(cls, path, engine, storage_options): if engine == ""auto"": # We follow in concordance with pandas engine_classes = [PyArrowDataset, FastParquetDataset] error_msgs = """" for engine_class in engine_classes: try: return engine_class(path, storage_options) except ImportError as err: error_msgs += ""\n - "" + str(err) raise ImportError( ""Unable to find a usable engine; "" + ""tried using: 'pyarrow', 'fastparquet'.\n"" + ""A suitable version of "" + ""pyarrow or fastparquet is required for parquet "" + ""support.\n"" + ""Trying to import the above resulted in these errors:"" + f""{error_msgs}"" ) elif engine == ""pyarrow"": return PyArrowDataset(path, storage_options) elif engine == ""fastparquet"": return FastParquetDataset(path, storage_options) else: raise ValueError(""engine must be one of 'pyarrow', 'fastparquet'"") ``` ###Assistant : Retrieve Parquet engine specific Dataset implementation. Parameters ---------- path : str, path object or file-like object The filepath of the parquet file in local filesystem or hdfs. engine : str Parquet library to use (only 'PyArrow' is supported for now). storage_options : dict Parameters for specific storage engine. Returns ------- Dataset Either a PyArrowDataset or FastParquetDataset object. " 1642,"def test_causal_lm_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past_with_attn_mask(*config_and_inputs) ",Test the causal LM model with `past_key_values` and `attention_mask`,9,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_causal_lm_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_model_past_with_attn_mask(*config_and_inputs) ``` ###Assistant : Test the causal LM model with `past_key_values` and `attention_mask` " 1643,"def _whatsnd(data): hdr = data[:512] fakefile = BytesIO(hdr) for testfn in sndhdr.tests: res = testfn(hdr, fakefile) if res is not None: return _sndhdr_MIMEmap.get(res[0]) return None","Try to identify a sound file type. sndhdr.what() has a pretty cruddy interface, unfortunately. This is why we re-do it here. It would be easier to reverse engineer the Unix 'file' command and use the standard 'magic' file, as shipped with a modern Unix. ",44,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _whatsnd(data): hdr = data[:512] fakefile = BytesIO(hdr) for testfn in sndhdr.tests: res = testfn(hdr, fakefile) if res is not None: return _sndhdr_MIMEmap.get(res[0]) return None ``` ###Assistant : Try to identify a sound file type. sndhdr.what() has a pretty cruddy interface, unfortunately. This is why we re-do it here. It would be easier to reverse engineer the Unix 'file' command and use the standard 'magic' file, as shipped with a modern Unix. " 1644,"async def test_protect_loop_debugger_sleep(caplog): block_async_io.enable() with patch( ""homeassistant.util.async_.extract_stack"", return_value=[ Mock( filename=""/home/paulus/homeassistant/.venv/blah/pydevd.py"", lineno=""23"", line=""do_something()"", ), Mock( filename=""/home/paulus/homeassistant/util/async.py"", lineno=""123"", line=""protected_loop_func"", ), Mock( filename=""/home/paulus/homeassistant/util/async.py"", lineno=""123"", line=""check_loop()"", ), ], ): time.sleep(0) assert ""Detected blocking call inside the event loop"" not in caplog.text ",Test time.sleep injected by the debugger is not reported.,9,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_protect_loop_debugger_sleep(caplog): block_async_io.enable() with patch( ""homeassistant.util.async_.extract_stack"", return_value=[ Mock( filename=""/home/paulus/homeassistant/.venv/blah/pydevd.py"", lineno=""23"", line=""do_something()"", ), Mock( filename=""/home/paulus/homeassistant/util/async.py"", lineno=""123"", line=""protected_loop_func"", ), Mock( filename=""/home/paulus/homeassistant/util/async.py"", lineno=""123"", line=""check_loop()"", ), ], ): time.sleep(0) assert ""Detected blocking call inside the event loop"" not in caplog.text ``` ###Assistant : Test time.sleep injected by the debugger is not reported. " 1645,"def update_worker_pea_args(self): self.peas_args['peas'] = self._set_peas_args(self.args) ", Update args of all its worker peas based on Pod args. Does not touch head and tail,17,5,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update_worker_pea_args(self): self.peas_args['peas'] = self._set_peas_args(self.args) ``` ###Assistant : Update args of all its worker peas based on Pod args. Does not touch head and tail " 1646,"def prevprime(n): n = _as_int_ceiling(n) if n < 3: raise ValueError(""no preceding primes"") if n < 8: return {3: 2, 4: 3, 5: 3, 6: 5, 7: 5}[n] if n <= sieve._list[-1]: l, u = sieve.search(n) if l == u: return sieve[l-1] else: return sieve[l] nn = 6*(n//6) if n - nn <= 1: n = nn - 1 if isprime(n): return n n -= 4 else: n = nn + 1 while 1: if isprime(n): return n n -= 2 if isprime(n): return n n -= 4 "," Return the largest prime smaller than n. Notes ===== Potential primes are located at 6*j +/- 1. This property is used during searching. >>> from sympy import prevprime >>> [(i, prevprime(i)) for i in range(10, 15)] [(10, 7), (11, 7), (12, 11), (13, 11), (14, 13)] See Also ======== nextprime : Return the ith prime greater than n primerange : Generates all primes in a given range ",67,88,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def prevprime(n): n = _as_int_ceiling(n) if n < 3: raise ValueError(""no preceding primes"") if n < 8: return {3: 2, 4: 3, 5: 3, 6: 5, 7: 5}[n] if n <= sieve._list[-1]: l, u = sieve.search(n) if l == u: return sieve[l-1] else: return sieve[l] nn = 6*(n//6) if n - nn <= 1: n = nn - 1 if isprime(n): return n n -= 4 else: n = nn + 1 while 1: if isprime(n): return n n -= 2 if isprime(n): return n n -= 4 ``` ###Assistant : Return the largest prime smaller than n. Notes ===== Potential primes are located at 6*j +/- 1. This property is used during searching. >>> from sympy import prevprime >>> [(i, prevprime(i)) for i in range(10, 15)] [(10, 7), (11, 7), (12, 11), (13, 11), (14, 13)] See Also ======== nextprime : Return the ith prime greater than n primerange : Generates all primes in a given range " 1647,"def test_duplicate_statistics_handle_integrity_error(hass_recorder, caplog): hass = hass_recorder() wait_recording_done(hass) period1 = dt_util.as_utc(dt_util.parse_datetime(""2021-09-01 00:00:00"")) period2 = dt_util.as_utc(dt_util.parse_datetime(""2021-09-30 23:00:00"")) external_energy_metadata_1 = { ""has_mean"": False, ""has_sum"": True, ""name"": ""Total imported energy"", ""source"": ""test"", ""state_unit_of_measurement"": ""kWh"", ""statistic_id"": ""test:total_energy_import_tariff_1"", ""unit_of_measurement"": ""kWh"", } external_energy_statistics_1 = [ { ""start"": period1, ""last_reset"": None, ""state"": 3, ""sum"": 5, }, ] external_energy_statistics_2 = [ { ""start"": period2, ""last_reset"": None, ""state"": 3, ""sum"": 6, } ] with patch.object( statistics, ""_statistics_exists"", return_value=False ), patch.object( statistics, ""_insert_statistics"", wraps=statistics._insert_statistics ) as insert_statistics_mock: async_add_external_statistics( hass, external_energy_metadata_1, external_energy_statistics_1 ) async_add_external_statistics( hass, external_energy_metadata_1, external_energy_statistics_1 ) async_add_external_statistics( hass, external_energy_metadata_1, external_energy_statistics_2 ) wait_recording_done(hass) assert insert_statistics_mock.call_count == 3 with session_scope(hass=hass) as session: tmp = session.query(recorder.db_schema.Statistics).all() assert len(tmp) == 2 assert ""Blocked attempt to insert duplicated statistic rows"" in caplog.text ",Test the recorder does not blow up if statistics is duplicated.,11,117,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_duplicate_statistics_handle_integrity_error(hass_recorder, caplog): hass = hass_recorder() wait_recording_done(hass) period1 = dt_util.as_utc(dt_util.parse_datetime(""2021-09-01 00:00:00"")) period2 = dt_util.as_utc(dt_util.parse_datetime(""2021-09-30 23:00:00"")) external_energy_metadata_1 = { ""has_mean"": False, ""has_sum"": True, ""name"": ""Total imported energy"", ""source"": ""test"", ""state_unit_of_measurement"": ""kWh"", ""statistic_id"": ""test:total_energy_import_tariff_1"", ""unit_of_measurement"": ""kWh"", } external_energy_statistics_1 = [ { ""start"": period1, ""last_reset"": None, ""state"": 3, ""sum"": 5, }, ] external_energy_statistics_2 = [ { ""start"": period2, ""last_reset"": None, ""state"": 3, ""sum"": 6, } ] with patch.object( statistics, ""_statistics_exists"", return_value=False ), patch.object( statistics, ""_insert_statistics"", wraps=statistics._insert_statistics ) as insert_statistics_mock: async_add_external_statistics( hass, external_energy_metadata_1, external_energy_statistics_1 ) async_add_external_statistics( hass, external_energy_metadata_1, external_energy_statistics_1 ) async_add_external_statistics( hass, external_energy_metadata_1, external_energy_statistics_2 ) wait_recording_done(hass) assert insert_statistics_mock.call_count == 3 with session_scope(hass=hass) as session: tmp = session.query(recorder.db_schema.Statistics).all() assert len(tmp) == 2 assert ""Blocked attempt to insert duplicated statistic rows"" in caplog.text ``` ###Assistant : Test the recorder does not blow up if statistics is duplicated. " 1648,"def testRequestResourcesRaceConditionWithResourceDemands(self): config = copy.deepcopy(MULTI_WORKER_CLUSTER) config[""available_node_types""].update( { ""empty_node"": { ""node_config"": {}, ""resources"": {""CPU"": 2, ""GPU"": 1}, ""max_workers"": 1, }, ""def_worker"": { ""node_config"": {}, ""resources"": {""CPU"": 2, ""GPU"": 1, ""WORKER"": 1}, ""max_workers"": 3, }, } ) config[""idle_timeout_minutes""] = 0 config_path = self.write_config(config) self.provider = MockProvider() self.provider.create_node( {}, { TAG_RAY_NODE_KIND: ""head"", TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, TAG_RAY_USER_NODE_TYPE: ""empty_node"", }, 1, ) runner = MockProcessRunner() runner.respond_to_call(""json .Config.Env"", [""[]"" for i in range(2)]) lm = LoadMetrics() autoscaler = MockAutoscaler( config_path, lm, MockNodeInfoStub(), max_failures=0, process_runner=runner, update_interval_s=0, ) lm.update( ""127.0.0.0"", mock_raylet_id(), {""CPU"": 2, ""GPU"": 1}, {""CPU"": 2}, {}, waiting_bundles=[{""CPU"": 2}], ) autoscaler.load_metrics.set_resource_requests([{""CPU"": 2, ""GPU"": 1}] * 2) autoscaler.update() # 1 head, 1 worker. self.waitForNodes(2) lm.update( ""127.0.0.0"", mock_raylet_id(), {""CPU"": 2, ""GPU"": 1}, {""CPU"": 2}, {}, waiting_bundles=[{""CPU"": 2}], ) # make sure it stays consistent. for _ in range(10): autoscaler.update() self.waitForNodes(2) ","Test request_resources() with resource_demands. Tests when request_resources() is called simultaneously with resource demands in multiple orders. ",16,130,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def testRequestResourcesRaceConditionWithResourceDemands(self): config = copy.deepcopy(MULTI_WORKER_CLUSTER) config[""available_node_types""].update( { ""empty_node"": { ""node_config"": {}, ""resources"": {""CPU"": 2, ""GPU"": 1}, ""max_workers"": 1, }, ""def_worker"": { ""node_config"": {}, ""resources"": {""CPU"": 2, ""GPU"": 1, ""WORKER"": 1}, ""max_workers"": 3, }, } ) config[""idle_timeout_minutes""] = 0 config_path = self.write_config(config) self.provider = MockProvider() self.provider.create_node( {}, { TAG_RAY_NODE_KIND: ""head"", TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, TAG_RAY_USER_NODE_TYPE: ""empty_node"", }, 1, ) runner = MockProcessRunner() runner.respond_to_call(""json .Config.Env"", [""[]"" for i in range(2)]) lm = LoadMetrics() autoscaler = MockAutoscaler( config_path, lm, MockNodeInfoStub(), max_failures=0, process_runner=runner, update_interval_s=0, ) lm.update( ""127.0.0.0"", mock_raylet_id(), {""CPU"": 2, ""GPU"": 1}, {""CPU"": 2}, {}, waiting_bundles=[{""CPU"": 2}], ) autoscaler.load_metrics.set_resource_requests([{""CPU"": 2, ""GPU"": 1}] * 2) autoscaler.update() # 1 head, 1 worker. self.waitForNodes(2) lm.update( ""127.0.0.0"", mock_raylet_id(), {""CPU"": 2, ""GPU"": 1}, {""CPU"": 2}, {}, waiting_bundles=[{""CPU"": 2}], ) # make sure it stays consistent. for _ in range(10): autoscaler.update() self.waitForNodes(2) ``` ###Assistant : Test request_resources() with resource_demands. Tests when request_resources() is called simultaneously with resource demands in multiple orders. " 1649,"def test_sitemap_unpublished_titles(self): sitemap = CMSSitemap() locations = [] urlset = sitemap.get_urls() unpublished_titles = set() for item in urlset: locations.append(item['location']) for page in Page.objects.drafts(): if page.get_public_object(): set1 = set(page.get_public_object().title_set.values_list('path', flat=True)) set2 = set(page.title_set.values_list('path', flat=True)) unpublished_titles.update(set2.difference(set1)) else: unpublished_titles.update(page.title_set.values_list('path', flat=True)) for path in unpublished_titles: title = Title.objects.get(path=path) if title.path: url = f'http://example.com/{title.language}/{title.path}/' else: url = f'http://example.com/{title.language}/{title.path}' self.assertFalse(url in locations) "," Check that titles attached to unpublished pages are not in the urlset. As titles are 'published' depending on their attached page, we create a set of unpublished titles by checking titles attached to the draft and public version of each page ",41,56,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_sitemap_unpublished_titles(self): sitemap = CMSSitemap() locations = [] urlset = sitemap.get_urls() unpublished_titles = set() for item in urlset: locations.append(item['location']) for page in Page.objects.drafts(): if page.get_public_object(): set1 = set(page.get_public_object().title_set.values_list('path', flat=True)) set2 = set(page.title_set.values_list('path', flat=True)) unpublished_titles.update(set2.difference(set1)) else: unpublished_titles.update(page.title_set.values_list('path', flat=True)) for path in unpublished_titles: title = Title.objects.get(path=path) if title.path: url = f'http://example.com/{title.language}/{title.path}/' else: url = f'http://example.com/{title.language}/{title.path}' self.assertFalse(url in locations) ``` ###Assistant : Check that titles attached to unpublished pages are not in the urlset. As titles are 'published' depending on their attached page, we create a set of unpublished titles by checking titles attached to the draft and public version of each page " 1650,"def mixin_gateway_parser(parser): gp = add_arg_group(parser, title='Gateway') _add_host(gp) _add_proxy(gp) gp.add_argument( '--uses', type=str, default=None, # TODO: add Jina Hub Gateway help=, ) gp.add_argument( '--uses-with', action=KVAppendAction, metavar='KEY: VALUE', nargs='*', help=, ) gp.add_argument( '--py-modules', type=str, nargs='*', metavar='PATH', help=, ) mixin_base_runtime_parser(gp) gp.add_argument( '--port-expose', type=int, dest='port', default=helper.random_port(), help='The port that the gateway exposes for clients for GRPC connections.', ) parser.add_argument( '--graph-description', type=str, help='Routing graph for the gateway', default='{}', ) parser.add_argument( '--graph-conditions', type=str, help='Dictionary stating which filtering conditions each Executor in the graph requires to receive Documents.', default='{}', ) parser.add_argument( '--deployments-addresses', type=str, help='dictionary JSON with the input addresses of each Deployment', default='{}', ) parser.add_argument( '--deployments-disable-reduce', type=str, help='list JSON disabling the built-in merging mechanism for each Deployment listed', default='[]', ) gp.add_argument( '--compression', choices=['NoCompression', 'Deflate', 'Gzip'], help='The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, ' 'check https://grpc.github.io/grpc/python/grpc.html#compression.', ) gp.add_argument( '--timeout-send', type=int, default=None, help='The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default', ) ","Add the options for remote expose at the Gateway :param parser: the parser The config of the gateway, it could be one of the followings: * the string literal of an Gateway class name * a Gateway YAML file (.yml, .yaml, .jaml) * a docker image (must start with `docker://`) * the string literal of a YAML config (must start with `!` or `jtype: `) * the string literal of a JSON config When use it under Python, one can use the following values additionally: - a Python dict that represents the config - a text file stream has `.read()` interface Dictionary of keyword arguments that will override the `with` configuration in `uses` The customized python modules need to be imported before loading the gateway Note that the recommended way is to only import a single module - a simple python file, if your gateway can be defined in a single file, or an ``__init__.py`` file if you have multiple files, which should be structured as a python package. ",169,160,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mixin_gateway_parser(parser): gp = add_arg_group(parser, title='Gateway') _add_host(gp) _add_proxy(gp) gp.add_argument( '--uses', type=str, default=None, # TODO: add Jina Hub Gateway help=, ) gp.add_argument( '--uses-with', action=KVAppendAction, metavar='KEY: VALUE', nargs='*', help=, ) gp.add_argument( '--py-modules', type=str, nargs='*', metavar='PATH', help=, ) mixin_base_runtime_parser(gp) gp.add_argument( '--port-expose', type=int, dest='port', default=helper.random_port(), help='The port that the gateway exposes for clients for GRPC connections.', ) parser.add_argument( '--graph-description', type=str, help='Routing graph for the gateway', default='{}', ) parser.add_argument( '--graph-conditions', type=str, help='Dictionary stating which filtering conditions each Executor in the graph requires to receive Documents.', default='{}', ) parser.add_argument( '--deployments-addresses', type=str, help='dictionary JSON with the input addresses of each Deployment', default='{}', ) parser.add_argument( '--deployments-disable-reduce', type=str, help='list JSON disabling the built-in merging mechanism for each Deployment listed', default='[]', ) gp.add_argument( '--compression', choices=['NoCompression', 'Deflate', 'Gzip'], help='The compression mechanism used when sending requests from the Head to the WorkerRuntimes. For more details, ' 'check https://grpc.github.io/grpc/python/grpc.html#compression.', ) gp.add_argument( '--timeout-send', type=int, default=None, help='The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default', ) ``` ###Assistant : Add the options for remote expose at the Gateway :param parser: the parser The config of the gateway, it could be one of the followings: * the string literal of an Gateway class name * a Gateway YAML file (.yml, .yaml, .jaml) * a docker image (must start with `docker://`) * the string literal of a YAML config (must start with `!` or `jtype: `) * the string literal of a JSON config When use it under Python, one can use the following values additionally: - a Python dict that represents the config - a text file stream has `.read()` interface Dictionary of keyword arguments that will override the `with` configuration in `uses` The customized python modules need to be imported before loading the gateway Note that the recommended way is to only import a single module - a simple python file, if your gateway can be defined in a single file, or an ``__init__.py`` file if you have multiple files, which should be structured as a python package. " 1651,"def _format_changelog(self, changelog): if not changelog: return changelog new_changelog = [] for line in changelog.strip().split('\n'): line = line.strip() if line[0] == '*': new_changelog.extend(['', line]) elif line[0] == '-': new_changelog.append(line) else: new_changelog.append(' ' + line) # strip trailing newline inserted by first changelog entry if not new_changelog[0]: del new_changelog[0] return new_changelog ","Format the changelog correctly and convert it to a list of strings ",12,50,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _format_changelog(self, changelog): if not changelog: return changelog new_changelog = [] for line in changelog.strip().split('\n'): line = line.strip() if line[0] == '*': new_changelog.extend(['', line]) elif line[0] == '-': new_changelog.append(line) else: new_changelog.append(' ' + line) # strip trailing newline inserted by first changelog entry if not new_changelog[0]: del new_changelog[0] return new_changelog ``` ###Assistant : Format the changelog correctly and convert it to a list of strings " 1652,"def _get_time(self) -> float: # N.B. We could remove this method and always call `self._timer.get_time()` internally, # but it's handy to have in mocking situations return self._timer.get_time() ","Get the current wall clock time, via the internal Timer.",10,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_time(self) -> float: # N.B. We could remove this method and always call `self._timer.get_time()` internally, # but it's handy to have in mocking situations return self._timer.get_time() ``` ###Assistant : Get the current wall clock time, via the internal Timer. " 1653,"def _generate(self, pset, min_, max_, condition, type_=None): if type_ is None: type_ = pset.ret expr = [] height = np.random.randint(min_, max_) stack = [(0, type_)] while len(stack) != 0: depth, type_ = stack.pop() # We've added a type_ parameter to the condition function if condition(height, depth, type_): try: term = np.random.choice(pset.terminals[type_]) except IndexError: _, _, traceback = sys.exc_info() raise IndexError( ""The gp.generate function tried to add "" ""a terminal of type {}, but there is"" ""none available. {}"".format(type_, traceback) ) if inspect.isclass(term): term = term() expr.append(term) else: try: prim = np.random.choice(pset.primitives[type_]) except IndexError: _, _, traceback = sys.exc_info() raise IndexError( ""The gp.generate function tried to add "" ""a primitive of type {}, but there is"" ""none available. {}"".format(type_, traceback) ) expr.append(prim) for arg in reversed(prim.args): stack.append((depth + 1, arg)) return expr ","Generate a Tree as a list of lists. The tree is build from the root to the leaves, and it stop growing when the condition is fulfilled. Parameters ---------- pset: PrimitiveSetTyped Primitive set from which primitives are selected. min_: int Minimum height of the produced trees. max_: int Maximum height of the produced trees. condition: function The condition is a function that takes two arguments, the height of the tree to build and the current depth in the tree. type_: class The type that should return the tree when called, when :obj:None (default) no return type is enforced. Returns ------- individual: list A grown tree with leaves at possibly different depths depending on the condition function. ",116,131,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _generate(self, pset, min_, max_, condition, type_=None): if type_ is None: type_ = pset.ret expr = [] height = np.random.randint(min_, max_) stack = [(0, type_)] while len(stack) != 0: depth, type_ = stack.pop() # We've added a type_ parameter to the condition function if condition(height, depth, type_): try: term = np.random.choice(pset.terminals[type_]) except IndexError: _, _, traceback = sys.exc_info() raise IndexError( ""The gp.generate function tried to add "" ""a terminal of type {}, but there is"" ""none available. {}"".format(type_, traceback) ) if inspect.isclass(term): term = term() expr.append(term) else: try: prim = np.random.choice(pset.primitives[type_]) except IndexError: _, _, traceback = sys.exc_info() raise IndexError( ""The gp.generate function tried to add "" ""a primitive of type {}, but there is"" ""none available. {}"".format(type_, traceback) ) expr.append(prim) for arg in reversed(prim.args): stack.append((depth + 1, arg)) return expr ``` ###Assistant : Generate a Tree as a list of lists. The tree is build from the root to the leaves, and it stop growing when the condition is fulfilled. Parameters ---------- pset: PrimitiveSetTyped Primitive set from which primitives are selected. min_: int Minimum height of the produced trees. max_: int Maximum height of the produced trees. condition: function The condition is a function that takes two arguments, the height of the tree to build and the current depth in the tree. type_: class The type that should return the tree when called, when :obj:None (default) no return type is enforced. Returns ------- individual: list A grown tree with leaves at possibly different depths depending on the condition function. " 1654,"def test_ohe_infrequent_multiple_categories_dtypes(): pd = pytest.importorskip(""pandas"") X = pd.DataFrame( { ""str"": [""a"", ""f"", ""c"", ""f"", ""f"", ""a"", ""c"", ""b"", ""b""], ""int"": [5, 3, 0, 10, 10, 12, 0, 3, 5], }, columns=[""str"", ""int""], ) ohe = OneHotEncoder( categories=""auto"", max_categories=3, handle_unknown=""infrequent_if_exist"" ) # X[:, 0] 'a', 'b', 'c' have the same frequency. 'a' and 'b' will be # considered infrequent because they are greater # X[:, 1] 0, 3, 5, 10 has frequency 2 and 12 has frequency 1. # 0, 3, 12 will be considered infrequent X_trans = ohe.fit_transform(X).toarray() assert_array_equal(ohe.infrequent_categories_[0], [""a"", ""b""]) assert_array_equal(ohe.infrequent_categories_[1], [0, 3, 12]) expected = [ [0, 0, 1, 1, 0, 0], [0, 1, 0, 0, 0, 1], [1, 0, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 0], [0, 0, 1, 0, 0, 1], [1, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 1], [0, 0, 1, 1, 0, 0], ] assert_allclose(expected, X_trans) X_test = pd.DataFrame({""str"": [""b"", ""f""], ""int"": [14, 12]}, columns=[""str"", ""int""]) expected = [[0, 0, 1, 0, 0, 1], [0, 1, 0, 0, 0, 1]] X_test_trans = ohe.transform(X_test) assert_allclose(expected, X_test_trans.toarray()) X_inv = ohe.inverse_transform(X_test_trans) expected_inv = np.array( [[""infrequent_sklearn"", ""infrequent_sklearn""], [""f"", ""infrequent_sklearn""]], dtype=object, ) assert_array_equal(expected_inv, X_inv) # only infrequent or known categories X_test = pd.DataFrame({""str"": [""c"", ""b""], ""int"": [12, 5]}, columns=[""str"", ""int""]) X_test_trans = ohe.transform(X_test).toarray() expected = [[1, 0, 0, 0, 0, 1], [0, 0, 1, 1, 0, 0]] assert_allclose(expected, X_test_trans) X_inv = ohe.inverse_transform(X_test_trans) expected_inv = np.array( [[""c"", ""infrequent_sklearn""], [""infrequent_sklearn"", 5]], dtype=object ) assert_array_equal(expected_inv, X_inv) @pytest.mark.parametrize(""kwargs"", [{""min_frequency"": 21, ""max_categories"": 1}])",Test infrequent categories with a pandas dataframe with multiple dtypes.,10,252,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_ohe_infrequent_multiple_categories_dtypes(): pd = pytest.importorskip(""pandas"") X = pd.DataFrame( { ""str"": [""a"", ""f"", ""c"", ""f"", ""f"", ""a"", ""c"", ""b"", ""b""], ""int"": [5, 3, 0, 10, 10, 12, 0, 3, 5], }, columns=[""str"", ""int""], ) ohe = OneHotEncoder( categories=""auto"", max_categories=3, handle_unknown=""infrequent_if_exist"" ) # X[:, 0] 'a', 'b', 'c' have the same frequency. 'a' and 'b' will be # considered infrequent because they are greater # X[:, 1] 0, 3, 5, 10 has frequency 2 and 12 has frequency 1. # 0, 3, 12 will be considered infrequent X_trans = ohe.fit_transform(X).toarray() assert_array_equal(ohe.infrequent_categories_[0], [""a"", ""b""]) assert_array_equal(ohe.infrequent_categories_[1], [0, 3, 12]) expected = [ [0, 0, 1, 1, 0, 0], [0, 1, 0, 0, 0, 1], [1, 0, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 0], [0, 0, 1, 0, 0, 1], [1, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 1], [0, 0, 1, 1, 0, 0], ] assert_allclose(expected, X_trans) X_test = pd.DataFrame({""str"": [""b"", ""f""], ""int"": [14, 12]}, columns=[""str"", ""int""]) expected = [[0, 0, 1, 0, 0, 1], [0, 1, 0, 0, 0, 1]] X_test_trans = ohe.transform(X_test) assert_allclose(expected, X_test_trans.toarray()) X_inv = ohe.inverse_transform(X_test_trans) expected_inv = np.array( [[""infrequent_sklearn"", ""infrequent_sklearn""], [""f"", ""infrequent_sklearn""]], dtype=object, ) assert_array_equal(expected_inv, X_inv) # only infrequent or known categories X_test = pd.DataFrame({""str"": [""c"", ""b""], ""int"": [12, 5]}, columns=[""str"", ""int""]) X_test_trans = ohe.transform(X_test).toarray() expected = [[1, 0, 0, 0, 0, 1], [0, 0, 1, 1, 0, 0]] assert_allclose(expected, X_test_trans) X_inv = ohe.inverse_transform(X_test_trans) expected_inv = np.array( [[""c"", ""infrequent_sklearn""], [""infrequent_sklearn"", 5]], dtype=object ) assert_array_equal(expected_inv, X_inv) @pytest.mark.parametrize(""kwargs"", [{""min_frequency"": 21, ""max_categories"": 1}]) ``` ###Assistant : Test infrequent categories with a pandas dataframe with multiple dtypes. " 1655,"def _set_random_id(self): if getattr(self, ""persistence"", False): raise RuntimeError( ) if ""dash_snapshots"" in sys.modules: raise RuntimeError( ) if not hasattr(self, ""id""): v = str(uuid.UUID(int=rd.randint(0, 2 ** 128))) setattr(self, ""id"", v) return getattr(self, ""id"") "," Attempting to use an auto-generated ID with the `persistence` prop. This is prohibited because persistence is tied to component IDs and auto-generated IDs can easily change. Please assign an explicit ID to this component. Attempting to use an auto-generated ID in an app with `dash_snapshots`. This is prohibited because snapshots saves the whole app layout, including component IDs, and auto-generated IDs can easily change. Callbacks referencing the new IDs will not work old snapshots. Please assign an explicit ID to this component. ",82,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _set_random_id(self): if getattr(self, ""persistence"", False): raise RuntimeError( ) if ""dash_snapshots"" in sys.modules: raise RuntimeError( ) if not hasattr(self, ""id""): v = str(uuid.UUID(int=rd.randint(0, 2 ** 128))) setattr(self, ""id"", v) return getattr(self, ""id"") ``` ###Assistant : Attempting to use an auto-generated ID with the `persistence` prop. This is prohibited because persistence is tied to component IDs and auto-generated IDs can easily change. Please assign an explicit ID to this component. Attempting to use an auto-generated ID in an app with `dash_snapshots`. This is prohibited because snapshots saves the whole app layout, including component IDs, and auto-generated IDs can easily change. Callbacks referencing the new IDs will not work old snapshots. Please assign an explicit ID to this component. " 1656,"def final(): head = [] head.append((""layernorm.weight"", ""norm.weight"")) head.append((""layernorm.bias"", ""norm.bias"")) head.append((""classifier.weight"", ""head.weight"")) head.append((""classifier.bias"", ""head.bias"")) return head "," Function helps in renaming final classification layer ",7,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def final(): head = [] head.append((""layernorm.weight"", ""norm.weight"")) head.append((""layernorm.bias"", ""norm.bias"")) head.append((""classifier.weight"", ""head.weight"")) head.append((""classifier.bias"", ""head.bias"")) return head ``` ###Assistant : Function helps in renaming final classification layer " 1657,"def test_delete_index(self, ds, documents): ds.write_documents(documents, index=""custom_index"") assert ds.get_document_count(index=""custom_index"") == len(documents) ds.delete_index(index=""custom_index"") assert ds.get_document_count(index=""custom_index"") == 0 ","Contrary to other Document Stores, SQLDocumentStore doesn't raise if the index is empty",13,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_delete_index(self, ds, documents): ds.write_documents(documents, index=""custom_index"") assert ds.get_document_count(index=""custom_index"") == len(documents) ds.delete_index(index=""custom_index"") assert ds.get_document_count(index=""custom_index"") == 0 ``` ###Assistant : Contrary to other Document Stores, SQLDocumentStore doesn't raise if the index is empty " 1658,"def add_holidays(events, start, end, employee, company): applicable_holiday_list = get_holiday_list_for_employee(employee, company) if not applicable_holiday_list: return for holiday in frappe.db.sql( , (applicable_holiday_list, start, end), as_dict=True, ): events.append( { ""doctype"": ""Holiday"", ""from_date"": holiday.holiday_date, ""to_date"": holiday.holiday_date, ""title"": _(""Holiday"") + "": "" + cstr(holiday.description), ""name"": holiday.name, } ) @frappe.whitelist()","select name, holiday_date, description from `tabHoliday` where parent=%s and holiday_date between %s and %s",14,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add_holidays(events, start, end, employee, company): applicable_holiday_list = get_holiday_list_for_employee(employee, company) if not applicable_holiday_list: return for holiday in frappe.db.sql( , (applicable_holiday_list, start, end), as_dict=True, ): events.append( { ""doctype"": ""Holiday"", ""from_date"": holiday.holiday_date, ""to_date"": holiday.holiday_date, ""title"": _(""Holiday"") + "": "" + cstr(holiday.description), ""name"": holiday.name, } ) @frappe.whitelist() ``` ###Assistant : select name, holiday_date, description from `tabHoliday` where parent=%s and holiday_date between %s and %s " 1659,"def get_video_input_devices_names() -> List[str]: # based on https://docs.microsoft.com/ru-ru/windows/win32/directshow/selecting-a-capture-device names = [] sys_dev_enum = strmif.ICreateDevEnum() if ole32.CoCreateInstance(uuids.CLSID_SystemDeviceEnum, None, ole32.CLSCTX.CLSCTX_INPROC_SERVER, strmif.ICreateDevEnum.IID, sys_dev_enum) == wintypes.ERROR.SUCCESS: pEnumCat = objidl.IEnumMoniker() if sys_dev_enum.CreateClassEnumerator(uuids.CLSID_VideoInputDeviceCategory, pEnumCat, 0) == wintypes.ERROR.SUCCESS: moniker = objidl.IMoniker() while pEnumCat.Next(1, moniker, None) == wintypes.ERROR.SUCCESS: prop_bag = oaidl.IPropertyBag() if moniker.BindToStorage(None, None, oaidl.IPropertyBag.IID, prop_bag) == wintypes.ERROR.SUCCESS: var = wintypes.VARIANT() hr = prop_bag.Read(wintypes.LPCOLESTR('Description'), var, None ) if hr != wintypes.ERROR.SUCCESS: hr = prop_bag.Read(wintypes.LPCOLESTR('FriendlyName'), var, None ) names.append(var.value.bstrVal.value if hr == wintypes.ERROR.SUCCESS else 'unnamed') prop_bag.Release() moniker.Release() pEnumCat.Release() sys_dev_enum.Release() return names"," returns a list of available names of VideoInputDevice's ole32 should be initialized before use ",14,82,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_video_input_devices_names() -> List[str]: # based on https://docs.microsoft.com/ru-ru/windows/win32/directshow/selecting-a-capture-device names = [] sys_dev_enum = strmif.ICreateDevEnum() if ole32.CoCreateInstance(uuids.CLSID_SystemDeviceEnum, None, ole32.CLSCTX.CLSCTX_INPROC_SERVER, strmif.ICreateDevEnum.IID, sys_dev_enum) == wintypes.ERROR.SUCCESS: pEnumCat = objidl.IEnumMoniker() if sys_dev_enum.CreateClassEnumerator(uuids.CLSID_VideoInputDeviceCategory, pEnumCat, 0) == wintypes.ERROR.SUCCESS: moniker = objidl.IMoniker() while pEnumCat.Next(1, moniker, None) == wintypes.ERROR.SUCCESS: prop_bag = oaidl.IPropertyBag() if moniker.BindToStorage(None, None, oaidl.IPropertyBag.IID, prop_bag) == wintypes.ERROR.SUCCESS: var = wintypes.VARIANT() hr = prop_bag.Read(wintypes.LPCOLESTR('Description'), var, None ) if hr != wintypes.ERROR.SUCCESS: hr = prop_bag.Read(wintypes.LPCOLESTR('FriendlyName'), var, None ) names.append(var.value.bstrVal.value if hr == wintypes.ERROR.SUCCESS else 'unnamed') prop_bag.Release() moniker.Release() pEnumCat.Release() sys_dev_enum.Release() return names ``` ###Assistant : returns a list of available names of VideoInputDevice's ole32 should be initialized before use " 1660,"def check_for_updates(): version_message = get_update_status() print(version_message) "," Check for updates to the current version. ",7,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_for_updates(): version_message = get_update_status() print(version_message) ``` ###Assistant : Check for updates to the current version. " 1661,"def _get_base_knot_positions(X, n_knots=10, knots=""uniform"", sample_weight=None): if knots == ""quantile"": percentiles = 100 * np.linspace( start=0, stop=1, num=n_knots, dtype=np.float64 ) if sample_weight is None: knots = np.percentile(X, percentiles, axis=0) else: knots = np.array( [ _weighted_percentile(X, sample_weight, percentile) for percentile in percentiles ] ) else: # knots == 'uniform': # Note that the variable `knots` has already been validated and # `else` is therefore safe. # Disregard observations with zero weight. mask = slice(None, None, 1) if sample_weight is None else sample_weight > 0 x_min = np.amin(X[mask], axis=0) x_max = np.amax(X[mask], axis=0) knots = np.linspace( start=x_min, stop=x_max, num=n_knots, endpoint=True, dtype=np.float64, ) return knots ","Calculate base knot positions. Base knots such that first knot <= feature <= last knot. For the B-spline construction with scipy.interpolate.BSpline, 2*degree knots beyond the base interval are added. Returns ------- knots : ndarray of shape (n_knots, n_features), dtype=np.float64 Knot positions (points) of base interval. ",45,101,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_base_knot_positions(X, n_knots=10, knots=""uniform"", sample_weight=None): if knots == ""quantile"": percentiles = 100 * np.linspace( start=0, stop=1, num=n_knots, dtype=np.float64 ) if sample_weight is None: knots = np.percentile(X, percentiles, axis=0) else: knots = np.array( [ _weighted_percentile(X, sample_weight, percentile) for percentile in percentiles ] ) else: # knots == 'uniform': # Note that the variable `knots` has already been validated and # `else` is therefore safe. # Disregard observations with zero weight. mask = slice(None, None, 1) if sample_weight is None else sample_weight > 0 x_min = np.amin(X[mask], axis=0) x_max = np.amax(X[mask], axis=0) knots = np.linspace( start=x_min, stop=x_max, num=n_knots, endpoint=True, dtype=np.float64, ) return knots ``` ###Assistant : Calculate base knot positions. Base knots such that first knot <= feature <= last knot. For the B-spline construction with scipy.interpolate.BSpline, 2*degree knots beyond the base interval are added. Returns ------- knots : ndarray of shape (n_knots, n_features), dtype=np.float64 Knot positions (points) of base interval. " 1662,"def _num_tokens(self, data): if tf_utils.is_sparse(data): flat_values = data.values elif tf_utils.is_ragged(data): flat_values = data.flat_values else: flat_values = tf.reshape(data, [-1]) tokens, _, counts = tf.unique_with_counts(flat_values, out_idx=tf.int64) return tokens, counts ","Count the number of tokens in a ragged, sparse or dense tensor.",12,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _num_tokens(self, data): if tf_utils.is_sparse(data): flat_values = data.values elif tf_utils.is_ragged(data): flat_values = data.flat_values else: flat_values = tf.reshape(data, [-1]) tokens, _, counts = tf.unique_with_counts(flat_values, out_idx=tf.int64) return tokens, counts ``` ###Assistant : Count the number of tokens in a ragged, sparse or dense tensor. " 1663,"def no_devicess_fixture(): return json.loads(load_fixture(""awair/no_devices.json"")) @pytest.fixture(name=""awair_offline"", scope=""session"")",Fixture representing when no devices are found in Awair's cloud API.,11,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def no_devicess_fixture(): return json.loads(load_fixture(""awair/no_devices.json"")) @pytest.fixture(name=""awair_offline"", scope=""session"") ``` ###Assistant : Fixture representing when no devices are found in Awair's cloud API. " 1664,"def data(): return pd.array( [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False], dtype=""boolean"", ) @pytest.mark.parametrize( ""values, exp_any, exp_all, exp_any_noskip, exp_all_noskip"", [ ([True, pd.NA], True, True, True, pd.NA), ([False, pd.NA], False, False, pd.NA, False), ([pd.NA], False, True, pd.NA, pd.NA), ([], False, True, False, True), # GH-33253: all True / all False values buggy with skipna=False ([True, True], True, True, True, True), ([False, False], False, False, False, False), ], )","Fixture returning boolean array, with valid and missing values.",9,76,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def data(): return pd.array( [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False], dtype=""boolean"", ) @pytest.mark.parametrize( ""values, exp_any, exp_all, exp_any_noskip, exp_all_noskip"", [ ([True, pd.NA], True, True, True, pd.NA), ([False, pd.NA], False, False, pd.NA, False), ([pd.NA], False, True, pd.NA, pd.NA), ([], False, True, False, True), # GH-33253: all True / all False values buggy with skipna=False ([True, True], True, True, True, True), ([False, False], False, False, False, False), ], ) ``` ###Assistant : Fixture returning boolean array, with valid and missing values. " 1665,"def should_toggle_mask(self) -> bool: with self._lock: retval = self._toggle_mask if retval: logger.debug(""Sending toggle mask"") self._toggle_mask = False return retval "," Check whether the mask should be toggled and return the value. If ``True`` is returned then resets :attr:`_toggle_mask` back to ``False`` Returns ------- bool ``True`` if the mask should be toggled otherwise ``False``. ",33,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def should_toggle_mask(self) -> bool: with self._lock: retval = self._toggle_mask if retval: logger.debug(""Sending toggle mask"") self._toggle_mask = False return retval ``` ###Assistant : Check whether the mask should be toggled and return the value. If ``True`` is returned then resets :attr:`_toggle_mask` back to ``False`` Returns ------- bool ``True`` if the mask should be toggled otherwise ``False``. " 1666,"def load_mtpl2(n_samples=100000): # freMTPL2freq dataset from https://www.openml.org/d/41214 df_freq = fetch_openml(data_id=41214, as_frame=True, parser=""pandas"").data df_freq[""IDpol""] = df_freq[""IDpol""].astype(int) df_freq.set_index(""IDpol"", inplace=True) # freMTPL2sev dataset from https://www.openml.org/d/41215 df_sev = fetch_openml(data_id=41215, as_frame=True, parser=""pandas"").data # sum ClaimAmount over identical IDs df_sev = df_sev.groupby(""IDpol"").sum() df = df_freq.join(df_sev, how=""left"") df[""ClaimAmount""].fillna(0, inplace=True) # unquote string fields for column_name in df.columns[df.dtypes.values == object]: df[column_name] = df[column_name].str.strip(""'"") return df.iloc[:n_samples] ","Fetch the French Motor Third-Party Liability Claims dataset. Parameters ---------- n_samples: int, default=100000 number of samples to select (for faster run time). Full dataset has 678013 samples. ",27,57,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_mtpl2(n_samples=100000): # freMTPL2freq dataset from https://www.openml.org/d/41214 df_freq = fetch_openml(data_id=41214, as_frame=True, parser=""pandas"").data df_freq[""IDpol""] = df_freq[""IDpol""].astype(int) df_freq.set_index(""IDpol"", inplace=True) # freMTPL2sev dataset from https://www.openml.org/d/41215 df_sev = fetch_openml(data_id=41215, as_frame=True, parser=""pandas"").data # sum ClaimAmount over identical IDs df_sev = df_sev.groupby(""IDpol"").sum() df = df_freq.join(df_sev, how=""left"") df[""ClaimAmount""].fillna(0, inplace=True) # unquote string fields for column_name in df.columns[df.dtypes.values == object]: df[column_name] = df[column_name].str.strip(""'"") return df.iloc[:n_samples] ``` ###Assistant : Fetch the French Motor Third-Party Liability Claims dataset. Parameters ---------- n_samples: int, default=100000 number of samples to select (for faster run time). Full dataset has 678013 samples. " 1667,"def _get_driver(self) -> str: try: driver = pynvml.nvmlSystemGetDriverVersion().decode(""utf-8"") except pynvml.NVMLError as err: self._log(""debug"", f""Unable to obtain driver. Original error: {str(err)}"") driver = ""No Nvidia driver found"" self._log(""debug"", f""GPU Driver: {driver}"") return driver "," Obtain the Nvidia driver version currently in use. Returns ------- str The current GPU driver version ",16,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_driver(self) -> str: try: driver = pynvml.nvmlSystemGetDriverVersion().decode(""utf-8"") except pynvml.NVMLError as err: self._log(""debug"", f""Unable to obtain driver. Original error: {str(err)}"") driver = ""No Nvidia driver found"" self._log(""debug"", f""GPU Driver: {driver}"") return driver ``` ###Assistant : Obtain the Nvidia driver version currently in use. Returns ------- str The current GPU driver version " 1668,"def _load_serializers(): global _serializers serializers = {} for format in BUILTIN_SERIALIZERS: register_serializer(format, BUILTIN_SERIALIZERS[format], serializers) if hasattr(settings, ""SERIALIZATION_MODULES""): for format in settings.SERIALIZATION_MODULES: register_serializer( format, settings.SERIALIZATION_MODULES[format], serializers ) _serializers = serializers "," Register built-in and settings-defined serializers. This is done lazily so that user code has a chance to (e.g.) set up custom settings without needing to be careful of import order. ",30,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _load_serializers(): global _serializers serializers = {} for format in BUILTIN_SERIALIZERS: register_serializer(format, BUILTIN_SERIALIZERS[format], serializers) if hasattr(settings, ""SERIALIZATION_MODULES""): for format in settings.SERIALIZATION_MODULES: register_serializer( format, settings.SERIALIZATION_MODULES[format], serializers ) _serializers = serializers ``` ###Assistant : Register built-in and settings-defined serializers. This is done lazily so that user code has a chance to (e.g.) set up custom settings without needing to be careful of import order. " 1669,"def pop(self): if self.keyorder: value = self.keys()[0] self.remove(value) return value return None "," Pops the top element from the sorted keys if it exists. Returns None otherwise. ",14,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def pop(self): if self.keyorder: value = self.keys()[0] self.remove(value) return value return None ``` ###Assistant : Pops the top element from the sorted keys if it exists. Returns None otherwise. " 1670,"def probs_to_pianoroll_viterbi(frame_probs, onset_probs, alpha=0.5): n, d = onset_probs.shape loss_matrix = np.zeros([n, d, 2], dtype=float) path_matrix = np.zeros([n, d, 2], dtype=bool) frame_losses = (1 - alpha) * -np.log(np.stack([1 - frame_probs, frame_probs], axis=-1)) onset_losses = alpha * -np.log(np.stack([1 - onset_probs, onset_probs], axis=-1)) loss_matrix[0, :, :] = frame_losses[0, :, :] + onset_losses[0, :, :] for i in range(1, n): transition_loss = np.tile(loss_matrix[i - 1, :, :][:, :, np.newaxis], [1, 1, 2]) transition_loss[:, 0, 0] += onset_losses[i, :, 0] transition_loss[:, 0, 1] += onset_losses[i, :, 1] transition_loss[:, 1, 0] += onset_losses[i, :, 0] transition_loss[:, 1, 1] += onset_losses[i, :, 0] path_matrix[i, :, :] = np.argmin(transition_loss, axis=1) loss_matrix[i, :, 0] = transition_loss[ np.arange(d), path_matrix[i, :, 0].astype(int), 0] loss_matrix[i, :, 1] = transition_loss[ np.arange(d), path_matrix[i, :, 1].astype(int), 1] loss_matrix[i, :, :] += frame_losses[i, :, :] pianoroll = np.zeros([n, d], dtype=bool) pianoroll[n - 1, :] = np.argmin(loss_matrix[n - 1, :, :], axis=-1) for i in range(n - 2, -1, -1): pianoroll[i, :] = path_matrix[ i + 1, np.arange(d), pianoroll[i + 1, :].astype(int)] return pianoroll ","Viterbi decoding of frame & onset probabilities to pianoroll. Args: frame_probs: A numpy array (num-frames-by-num-pitches) of frame probabilities. onset_probs: A numpy array (num-frames-by-num-pitches) of onset probabilities. alpha: Relative weight of onset and frame loss, a float between 0 and 1. With alpha = 0, onset probabilities will be ignored. With alpha = 1, frame probabilities will be ignored. Returns: A numpy array (num-frames-by-num-pitches) representing the boolean-valued pianoroll. ",67,167,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def probs_to_pianoroll_viterbi(frame_probs, onset_probs, alpha=0.5): n, d = onset_probs.shape loss_matrix = np.zeros([n, d, 2], dtype=float) path_matrix = np.zeros([n, d, 2], dtype=bool) frame_losses = (1 - alpha) * -np.log(np.stack([1 - frame_probs, frame_probs], axis=-1)) onset_losses = alpha * -np.log(np.stack([1 - onset_probs, onset_probs], axis=-1)) loss_matrix[0, :, :] = frame_losses[0, :, :] + onset_losses[0, :, :] for i in range(1, n): transition_loss = np.tile(loss_matrix[i - 1, :, :][:, :, np.newaxis], [1, 1, 2]) transition_loss[:, 0, 0] += onset_losses[i, :, 0] transition_loss[:, 0, 1] += onset_losses[i, :, 1] transition_loss[:, 1, 0] += onset_losses[i, :, 0] transition_loss[:, 1, 1] += onset_losses[i, :, 0] path_matrix[i, :, :] = np.argmin(transition_loss, axis=1) loss_matrix[i, :, 0] = transition_loss[ np.arange(d), path_matrix[i, :, 0].astype(int), 0] loss_matrix[i, :, 1] = transition_loss[ np.arange(d), path_matrix[i, :, 1].astype(int), 1] loss_matrix[i, :, :] += frame_losses[i, :, :] pianoroll = np.zeros([n, d], dtype=bool) pianoroll[n - 1, :] = np.argmin(loss_matrix[n - 1, :, :], axis=-1) for i in range(n - 2, -1, -1): pianoroll[i, :] = path_matrix[ i + 1, np.arange(d), pianoroll[i + 1, :].astype(int)] return pianoroll ``` ###Assistant : Viterbi decoding of frame & onset probabilities to pianoroll. Args: frame_probs: A numpy array (num-frames-by-num-pitches) of frame probabilities. onset_probs: A numpy array (num-frames-by-num-pitches) of onset probabilities. alpha: Relative weight of onset and frame loss, a float between 0 and 1. With alpha = 0, onset probabilities will be ignored. With alpha = 1, frame probabilities will be ignored. Returns: A numpy array (num-frames-by-num-pitches) representing the boolean-valued pianoroll. " 1671,"def to_native_string(string, encoding=""ascii""): if isinstance(string, builtin_str): out = string else: out = string.decode(encoding) return out ","Given a string object, regardless of type, returns a representation of that string in the native string type, encoding and decoding where necessary. This assumes ASCII unless told otherwise. ",29,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def to_native_string(string, encoding=""ascii""): if isinstance(string, builtin_str): out = string else: out = string.decode(encoding) return out ``` ###Assistant : Given a string object, regardless of type, returns a representation of that string in the native string type, encoding and decoding where necessary. This assumes ASCII unless told otherwise. " 1672,"def mathematica(s, additional_translations=None): parser = MathematicaParser(additional_translations) if additional_translations is not None: sympy_deprecation_warning( , deprecated_since_version=""1.11"", active_deprecations_target=""mathematica-parser-additional-translations"", ) return sympify(parser._parse_old(s)) return parser.parse(s) "," Translate a string containing a Wolfram Mathematica expression to a SymPy expression. If the translator is unable to find a suitable SymPy expression, the ``FullForm`` of the Mathematica expression will be output, using SymPy ``Function`` objects as nodes of the syntax tree. Examples ======== >>> from sympy.parsing.mathematica import mathematica >>> mathematica(""Sin[x]^2 Tan[y]"") sin(x)**2*tan(y) >>> e = mathematica(""F[7,5,3]"") >>> e F(7, 5, 3) >>> from sympy import Function, Max, Min >>> e.replace(Function(""F""), lambda *x: Max(*x)*Min(*x)) 21 Both standard input form and Mathematica full form are supported: >>> mathematica(""x*(a + b)"") x*(a + b) >>> mathematica(""Times[x, Plus[a, b]]"") x*(a + b) To get a matrix from Wolfram's code: >>> m = mathematica(""{{a, b}, {c, d}}"") >>> m ((a, b), (c, d)) >>> from sympy import Matrix >>> Matrix(m) Matrix([ [a, b], [c, d]]) If the translation into equivalent SymPy expressions fails, an SymPy expression equivalent to Wolfram Mathematica's ""FullForm"" will be created: >>> mathematica(""x_."") Optional(Pattern(x, Blank())) >>> mathematica(""Plus @@ {x, y, z}"") Apply(Plus, (x, y, z)) >>> mathematica(""f[x_, 3] := x^3 /; x > 0"") SetDelayed(f(Pattern(x, Blank()), 3), Condition(x**3, x > 0)) The ``additional_translations`` parameter for the Mathematica parser is now deprecated. Use SymPy's .replace( ) or .subs( ) methods on the output expression instead.",203,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mathematica(s, additional_translations=None): parser = MathematicaParser(additional_translations) if additional_translations is not None: sympy_deprecation_warning( , deprecated_since_version=""1.11"", active_deprecations_target=""mathematica-parser-additional-translations"", ) return sympify(parser._parse_old(s)) return parser.parse(s) ``` ###Assistant : Translate a string containing a Wolfram Mathematica expression to a SymPy expression. If the translator is unable to find a suitable SymPy expression, the ``FullForm`` of the Mathematica expression will be output, using SymPy ``Function`` objects as nodes of the syntax tree. Examples ======== >>> from sympy.parsing.mathematica import mathematica >>> mathematica(""Sin[x]^2 Tan[y]"") sin(x)**2*tan(y) >>> e = mathematica(""F[7,5,3]"") >>> e F(7, 5, 3) >>> from sympy import Function, Max, Min >>> e.replace(Function(""F""), lambda *x: Max(*x)*Min(*x)) 21 Both standard input form and Mathematica full form are supported: >>> mathematica(""x*(a + b)"") x*(a + b) >>> mathematica(""Times[x, Plus[a, b]]"") x*(a + b) To get a matrix from Wolfram's code: >>> m = mathematica(""{{a, b}, {c, d}}"") >>> m ((a, b), (c, d)) >>> from sympy import Matrix >>> Matrix(m) Matrix([ [a, b], [c, d]]) If the translation into equivalent SymPy expressions fails, an SymPy expression equivalent to Wolfram Mathematica's ""FullForm"" will be created: >>> mathematica(""x_."") Optional(Pattern(x, Blank())) >>> mathematica(""Plus @@ {x, y, z}"") Apply(Plus, (x, y, z)) >>> mathematica(""f[x_, 3] := x^3 /; x > 0"") SetDelayed(f(Pattern(x, Blank()), 3), Condition(x**3, x > 0)) The ``additional_translations`` parameter for the Mathematica parser is now deprecated. Use SymPy's .replace( ) or .subs( ) methods on the output expression instead. " 1673,"def evaluation(self): # adding info about the eval tasks if self.eval_tasks == self.train_tasks: msg = ""For evalution, we used the same training datasets; check the [Datasets Used](#datasets-used) section for more information"" eval_list = '' else: msg = f""This model was evaluated on the datasets below (use the `parlai display_data` commands to show data). Visit the {make_link('task (dataset) list', task_site)} for more details about the datasets.\n"" eval_list = get_dataset_info(self.eval_tasks) eval_list = '\n' + '\n'.join(eval_list) content = [msg + eval_list] # validation metric info: getting metric name and description splitted = re.sub(r'_+', ' ', self.valid_metric).split() key = splitted[-1] if extra_metric_info.get(key): mname, description = extra_metric_info[key] elif METRICS_DISPLAY_DATA.get(key): mname = METRICS_DISPLAY_DATA[key].title description = METRICS_DISPLAY_DATA[key].description else: description, mname = (None, None) # adding description for validation metric and re-wording it: msg = f""\n\nWe used the metric {metric_format(self.valid_metric)}"" if len(splitted) == 3 and splitted[0] == 'class' and mname: msg += f"", the {mname.lower()} scores for the class {splitted[1]}"" content.append(msg + ' as the validation metric. ') if description: description = description[0].lower() + description[1:] content[-1] += f""Recall that `{self.valid_metric}` is {description}."" # evaluation table # getting list of subtasks and making columns eval_tasks = self.eval_tasks if len(self.eval_tasks) > 1: eval_tasks.insert(0, 'All') columns = [' '] + [taskname(subtask) for subtask in eval_tasks] # only one row: validation row = [metric_format(self.valid_metric)] for subtask in eval_tasks: # creating the key to get metric and formatting pre = '' if subtask == 'All' or len(eval_tasks) == 1 else subtask + '/' key = pre + self.valid_metric fmt = '{:.4f}' if self.valid_metric in not_percent else '{:.2%}' row.append(fmt.format(self.eval_results[key])) return '\n'.join(content) + '\n\n' + '\n'.join(make_md_table([row], columns)) "," returns a section with dataset info about the eval tasks if they exist, information about the validation metric if it exists, and create a table with the validation metric. ",29,262,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def evaluation(self): # adding info about the eval tasks if self.eval_tasks == self.train_tasks: msg = ""For evalution, we used the same training datasets; check the [Datasets Used](#datasets-used) section for more information"" eval_list = '' else: msg = f""This model was evaluated on the datasets below (use the `parlai display_data` commands to show data). Visit the {make_link('task (dataset) list', task_site)} for more details about the datasets.\n"" eval_list = get_dataset_info(self.eval_tasks) eval_list = '\n' + '\n'.join(eval_list) content = [msg + eval_list] # validation metric info: getting metric name and description splitted = re.sub(r'_+', ' ', self.valid_metric).split() key = splitted[-1] if extra_metric_info.get(key): mname, description = extra_metric_info[key] elif METRICS_DISPLAY_DATA.get(key): mname = METRICS_DISPLAY_DATA[key].title description = METRICS_DISPLAY_DATA[key].description else: description, mname = (None, None) # adding description for validation metric and re-wording it: msg = f""\n\nWe used the metric {metric_format(self.valid_metric)}"" if len(splitted) == 3 and splitted[0] == 'class' and mname: msg += f"", the {mname.lower()} scores for the class {splitted[1]}"" content.append(msg + ' as the validation metric. ') if description: description = description[0].lower() + description[1:] content[-1] += f""Recall that `{self.valid_metric}` is {description}."" # evaluation table # getting list of subtasks and making columns eval_tasks = self.eval_tasks if len(self.eval_tasks) > 1: eval_tasks.insert(0, 'All') columns = [' '] + [taskname(subtask) for subtask in eval_tasks] # only one row: validation row = [metric_format(self.valid_metric)] for subtask in eval_tasks: # creating the key to get metric and formatting pre = '' if subtask == 'All' or len(eval_tasks) == 1 else subtask + '/' key = pre + self.valid_metric fmt = '{:.4f}' if self.valid_metric in not_percent else '{:.2%}' row.append(fmt.format(self.eval_results[key])) return '\n'.join(content) + '\n\n' + '\n'.join(make_md_table([row], columns)) ``` ###Assistant : returns a section with dataset info about the eval tasks if they exist, information about the validation metric if it exists, and create a table with the validation metric. " 1674,"def _train_with_recompute(n_steps): img_dim, n_channels, batch_size = 256, 1, 4 x, y = _get_dummy_data(img_dim, n_channels, batch_size) # This model is the same model as _get_big_cnn_model but split into 3 parts. models = _get_split_cnn_model( img_dim, n_channels, num_partitions=3, blocks_per_partition=2 ) model1, model2, model3 = models # Apply gradient checkpointing to the submodels using tf.recompute_grad. model1_re = tf.recompute_grad(model1) model2_re = tf.recompute_grad(model2) model3_re = tf.recompute_grad(model3) optimizer = optimizers.SGD() tr_vars = ( model1.trainable_variables + model2.trainable_variables + model3.trainable_variables ) losses = [] for _ in range(n_steps): with tf.GradientTape() as tape: logits1 = model1_re(x) logits2 = model2_re(logits1) logits3 = model3_re(logits2) loss = _compute_loss(logits3, y) losses.append(loss) grads = tape.gradient(loss, tr_vars) # tr_vars optimizer.apply_gradients(zip(grads, tr_vars)) del grads return losses @tf_test_utils.with_eager_op_as_function",Trains a single large model with gradient checkpointing using tf.recompute_grad.,10,110,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _train_with_recompute(n_steps): img_dim, n_channels, batch_size = 256, 1, 4 x, y = _get_dummy_data(img_dim, n_channels, batch_size) # This model is the same model as _get_big_cnn_model but split into 3 parts. models = _get_split_cnn_model( img_dim, n_channels, num_partitions=3, blocks_per_partition=2 ) model1, model2, model3 = models # Apply gradient checkpointing to the submodels using tf.recompute_grad. model1_re = tf.recompute_grad(model1) model2_re = tf.recompute_grad(model2) model3_re = tf.recompute_grad(model3) optimizer = optimizers.SGD() tr_vars = ( model1.trainable_variables + model2.trainable_variables + model3.trainable_variables ) losses = [] for _ in range(n_steps): with tf.GradientTape() as tape: logits1 = model1_re(x) logits2 = model2_re(logits1) logits3 = model3_re(logits2) loss = _compute_loss(logits3, y) losses.append(loss) grads = tape.gradient(loss, tr_vars) # tr_vars optimizer.apply_gradients(zip(grads, tr_vars)) del grads return losses @tf_test_utils.with_eager_op_as_function ``` ###Assistant : Trains a single large model with gradient checkpointing using tf.recompute_grad. " 1675,"def test_syncer_callback_dead_node_log_error(caplog, ray_start_2_cpus, temp_data_dirs): caplog.set_level(logging.ERROR, logger=""ray.tune.syncer"") tmp_source, tmp_target = temp_data_dirs syncer_callback = TestSyncerCallback( sync_period=0, local_logdir_override=tmp_target, ) trial1 = MockTrial(trial_id=""a"", logdir=tmp_source, on_dead_node=True) syncer_callback.on_trial_result(iteration=1, trials=[], trial=trial1, result={}) assert ( ""An error occurred when trying to get the node ip where this trial is running"" in caplog.text ) ",Check that we catch + log errors when trying syncing with a dead remote node.,15,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_syncer_callback_dead_node_log_error(caplog, ray_start_2_cpus, temp_data_dirs): caplog.set_level(logging.ERROR, logger=""ray.tune.syncer"") tmp_source, tmp_target = temp_data_dirs syncer_callback = TestSyncerCallback( sync_period=0, local_logdir_override=tmp_target, ) trial1 = MockTrial(trial_id=""a"", logdir=tmp_source, on_dead_node=True) syncer_callback.on_trial_result(iteration=1, trials=[], trial=trial1, result={}) assert ( ""An error occurred when trying to get the node ip where this trial is running"" in caplog.text ) ``` ###Assistant : Check that we catch + log errors when trying syncing with a dead remote node. " 1676,"def directed_modularity_matrix(G, nodelist=None, weight=None): import numpy as np if nodelist is None: nodelist = list(G) A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format=""csr"") k_in = A.sum(axis=0) k_out = A.sum(axis=1) m = k_in.sum() # Expected adjacency matrix X = np.outer(k_out, k_in) / m return A - X ","Returns the directed modularity matrix of G. The modularity matrix is the matrix B = A - , where A is the adjacency matrix and is the expected adjacency matrix, assuming that the graph is described by the configuration model. More specifically, the element B_ij of B is defined as .. math:: B_{ij} = A_{ij} - k_i^{out} k_j^{in} / m where :math:`k_i^{in}` is the in degree of node i, and :math:`k_j^{out}` is the out degree of node j, with m the number of edges in the graph. When weight is set to a name of an attribute edge, Aij, k_i, k_j and m are computed using its value. Parameters ---------- G : DiGraph A NetworkX DiGraph nodelist : list, optional The rows and columns are ordered according to the nodes in nodelist. If nodelist is None, then the ordering is produced by G.nodes(). weight : string or None, optional (default=None) The edge attribute that holds the numerical value used for the edge weight. If None then all edge weights are 1. Returns ------- B : Numpy array The modularity matrix of G. Examples -------- >>> G = nx.DiGraph() >>> G.add_edges_from( ... ( ... (1, 2), ... (1, 3), ... (3, 1), ... (3, 2), ... (3, 5), ... (4, 5), ... (4, 6), ... (5, 4), ... (5, 6), ... (6, 4), ... ) ... ) >>> B = nx.directed_modularity_matrix(G) Notes ----- NetworkX defines the element A_ij of the adjacency matrix as 1 if there is a link going from node i to node j. Leicht and Newman use the opposite definition. This explains the different expression for B_ij. See Also -------- to_numpy_array modularity_spectrum adjacency_matrix modularity_matrix References ---------- .. [1] E. A. Leicht, M. E. J. Newman, ""Community structure in directed networks"", Phys. Rev Lett., vol. 100, no. 11, p. 118703, 2008. ",303,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def directed_modularity_matrix(G, nodelist=None, weight=None): import numpy as np if nodelist is None: nodelist = list(G) A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format=""csr"") k_in = A.sum(axis=0) k_out = A.sum(axis=1) m = k_in.sum() # Expected adjacency matrix X = np.outer(k_out, k_in) / m return A - X ``` ###Assistant : Returns the directed modularity matrix of G. The modularity matrix is the matrix B = A - , where A is the adjacency matrix and is the expected adjacency matrix, assuming that the graph is described by the configuration model. More specifically, the element B_ij of B is defined as .. math:: B_{ij} = A_{ij} - k_i^{out} k_j^{in} / m where :math:`k_i^{in}` is the in degree of node i, and :math:`k_j^{out}` is the out degree of node j, with m the number of edges in the graph. When weight is set to a name of an attribute edge, Aij, k_i, k_j and m are computed using its value. Parameters ---------- G : DiGraph A NetworkX DiGraph nodelist : list, optional The rows and columns are ordered according to the nodes in nodelist. If nodelist is None, then the ordering is produced by G.nodes(). weight : string or None, optional (default=None) The edge attribute that holds the numerical value used for the edge weight. If None then all edge weights are 1. Returns ------- B : Numpy array The modularity matrix of G. Examples -------- >>> G = nx.DiGraph() >>> G.add_edges_from( ... ( ... (1, 2), ... (1, 3), ... (3, 1), ... (3, 2), ... (3, 5), ... (4, 5), ... (4, 6), ... (5, 4), ... (5, 6), ... (6, 4), ... ) ... ) >>> B = nx.directed_modularity_matrix(G) Notes ----- NetworkX defines the element A_ij of the adjacency matrix as 1 if there is a link going from node i to node j. Leicht and Newman use the opposite definition. This explains the different expression for B_ij. See Also -------- to_numpy_array modularity_spectrum adjacency_matrix modularity_matrix References ---------- .. [1] E. A. Leicht, M. E. J. Newman, ""Community structure in directed networks"", Phys. Rev Lett., vol. 100, no. 11, p. 118703, 2008. " 1677,"def project_columns(self, columns): if columns == self.columns: return self return ParquetFunctionWrapper( self.engine, self.fs, self.meta, columns, self.index, None, # Already merged into common_kwargs self.common_kwargs, ) ","Return a new ParquetFunctionWrapper object with a sub-column projection. ",9,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def project_columns(self, columns): if columns == self.columns: return self return ParquetFunctionWrapper( self.engine, self.fs, self.meta, columns, self.index, None, # Already merged into common_kwargs self.common_kwargs, ) ``` ###Assistant : Return a new ParquetFunctionWrapper object with a sub-column projection. " 1678,"def sort_args_by_name(self): expr = self.expr if not isinstance(expr, ArrayTensorProduct): return self args = expr.args sorted_data = sorted(enumerate(args), key=lambda x: default_sort_key(x[1])) pos_sorted, args_sorted = zip(*sorted_data) reordering_map = {i: pos_sorted.index(i) for i, arg in enumerate(args)} contraction_tuples = self._get_contraction_tuples() contraction_tuples = [[(reordering_map[j], k) for j, k in i] for i in contraction_tuples] c_tp = _array_tensor_product(*args_sorted) new_contr_indices = self._contraction_tuples_to_contraction_indices( c_tp, contraction_tuples ) return _array_contraction(c_tp, *new_contr_indices) "," Sort arguments in the tensor product so that their order is lexicographical. Examples ======== >>> from sympy.tensor.array.expressions.from_matrix_to_array import convert_matrix_to_array >>> from sympy import MatrixSymbol >>> from sympy.abc import N >>> A = MatrixSymbol(""A"", N, N) >>> B = MatrixSymbol(""B"", N, N) >>> C = MatrixSymbol(""C"", N, N) >>> D = MatrixSymbol(""D"", N, N) >>> cg = convert_matrix_to_array(C*D*A*B) >>> cg ArrayContraction(ArrayTensorProduct(A, D, C, B), (0, 3), (1, 6), (2, 5)) >>> cg.sort_args_by_name() ArrayContraction(ArrayTensorProduct(A, D, B, C), (0, 3), (1, 4), (2, 7)) ",81,61,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def sort_args_by_name(self): expr = self.expr if not isinstance(expr, ArrayTensorProduct): return self args = expr.args sorted_data = sorted(enumerate(args), key=lambda x: default_sort_key(x[1])) pos_sorted, args_sorted = zip(*sorted_data) reordering_map = {i: pos_sorted.index(i) for i, arg in enumerate(args)} contraction_tuples = self._get_contraction_tuples() contraction_tuples = [[(reordering_map[j], k) for j, k in i] for i in contraction_tuples] c_tp = _array_tensor_product(*args_sorted) new_contr_indices = self._contraction_tuples_to_contraction_indices( c_tp, contraction_tuples ) return _array_contraction(c_tp, *new_contr_indices) ``` ###Assistant : Sort arguments in the tensor product so that their order is lexicographical. Examples ======== >>> from sympy.tensor.array.expressions.from_matrix_to_array import convert_matrix_to_array >>> from sympy import MatrixSymbol >>> from sympy.abc import N >>> A = MatrixSymbol(""A"", N, N) >>> B = MatrixSymbol(""B"", N, N) >>> C = MatrixSymbol(""C"", N, N) >>> D = MatrixSymbol(""D"", N, N) >>> cg = convert_matrix_to_array(C*D*A*B) >>> cg ArrayContraction(ArrayTensorProduct(A, D, C, B), (0, 3), (1, 6), (2, 5)) >>> cg.sort_args_by_name() ArrayContraction(ArrayTensorProduct(A, D, B, C), (0, 3), (1, 4), (2, 7)) " 1679,"def gather_async(self, batch_ms=0, num_async=1) -> ""LocalIterator[T]"": if num_async < 1: raise ValueError(""queue depth must be positive"") if batch_ms < 0: raise ValueError(""batch time must be positive"") # Forward reference to the returned iterator. local_iter = None ","Returns a local iterable for asynchronous iteration. New items will be fetched from the shards asynchronously as soon as the previous one is computed. Items arrive in non-deterministic order. Arguments: batch_ms (int): Batches items for batch_ms milliseconds on each shard before retrieving it. Increasing batch_ms increases latency but improves throughput. If this value is 0, then items are returned immediately. num_async (int): The max number of async requests in flight per actor. Increasing this improves the amount of pipeline parallelism in the iterator. Examples: >>> it = from_range(100, 1).gather_async() >>> next(it) ... 3 >>> next(it) ... 0 >>> next(it) ... 1 ",101,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def gather_async(self, batch_ms=0, num_async=1) -> ""LocalIterator[T]"": if num_async < 1: raise ValueError(""queue depth must be positive"") if batch_ms < 0: raise ValueError(""batch time must be positive"") # Forward reference to the returned iterator. local_iter = None ``` ###Assistant : Returns a local iterable for asynchronous iteration. New items will be fetched from the shards asynchronously as soon as the previous one is computed. Items arrive in non-deterministic order. Arguments: batch_ms (int): Batches items for batch_ms milliseconds on each shard before retrieving it. Increasing batch_ms increases latency but improves throughput. If this value is 0, then items are returned immediately. num_async (int): The max number of async requests in flight per actor. Increasing this improves the amount of pipeline parallelism in the iterator. Examples: >>> it = from_range(100, 1).gather_async() >>> next(it) ... 3 >>> next(it) ... 0 >>> next(it) ... 1 " 1680,"def test_dict_checkpoint_dict(self): checkpoint = self._prepare_dict_checkpoint() # Convert into dict checkpoint data_dict = checkpoint.to_dict() self.assertIsInstance(data_dict, dict) # Create from dict checkpoint = Checkpoint.from_dict(data_dict) self.assertTrue(checkpoint._data_dict) self._assert_dict_checkpoint(checkpoint) ",Test conversion from dict to dict checkpoint and back.,9,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_dict_checkpoint_dict(self): checkpoint = self._prepare_dict_checkpoint() # Convert into dict checkpoint data_dict = checkpoint.to_dict() self.assertIsInstance(data_dict, dict) # Create from dict checkpoint = Checkpoint.from_dict(data_dict) self.assertTrue(checkpoint._data_dict) self._assert_dict_checkpoint(checkpoint) ``` ###Assistant : Test conversion from dict to dict checkpoint and back. " 1681,"def test_5_model(self): query = predict_query = for cid, char in [(CID_A, 'a'), (CID_B, 'b')]: self.sql_via_http( query.format(char, char), company_id=cid, expected_resp_type=RESPONSE_TYPE.OK ) response = self.sql_via_http( predict_query.format(char), company_id=cid, expected_resp_type=RESPONSE_TYPE.TABLE ) self.assertTrue(len(response['data']), 1) "," CREATE MODEL mindsdb.model_{} FROM test_integration_{} ( select * from test_data.home_rentals limit 50 ) PREDICT rental_price USING join_learn_process=true, time_aim=5 select * from mindsdb.model_{} where sqft = 100 ",26,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_5_model(self): query = predict_query = for cid, char in [(CID_A, 'a'), (CID_B, 'b')]: self.sql_via_http( query.format(char, char), company_id=cid, expected_resp_type=RESPONSE_TYPE.OK ) response = self.sql_via_http( predict_query.format(char), company_id=cid, expected_resp_type=RESPONSE_TYPE.TABLE ) self.assertTrue(len(response['data']), 1) ``` ###Assistant : CREATE MODEL mindsdb.model_{} FROM test_integration_{} ( select * from test_data.home_rentals limit 50 ) PREDICT rental_price USING join_learn_process=true, time_aim=5 select * from mindsdb.model_{} where sqft = 100 " 1682,"def putpalette(self, data, rawmode=""RGB""): from . import ImagePalette if self.mode not in (""L"", ""LA"", ""P"", ""PA""): raise ValueError(""illegal image mode"") if isinstance(data, ImagePalette.ImagePalette): palette = ImagePalette.raw(data.rawmode, data.palette) else: if not isinstance(data, bytes): data = bytes(data) palette = ImagePalette.raw(rawmode, data) self.mode = ""PA"" if ""A"" in self.mode else ""P"" self.palette = palette self.palette.mode = ""RGB"" self.load() # install new palette "," Attaches a palette to this image. The image must be a ""P"", ""PA"", ""L"" or ""LA"" image. The palette sequence must contain at most 256 colors, made up of one integer value for each channel in the raw mode. For example, if the raw mode is ""RGB"", then it can contain at most 768 values, made up of red, green and blue values for the corresponding pixel index in the 256 colors. If the raw mode is ""RGBA"", then it can contain at most 1024 values, containing red, green, blue and alpha values. Alternatively, an 8-bit string may be used instead of an integer sequence. :param data: A palette sequence (either a list or a string). :param rawmode: The raw mode of the palette. Either ""RGB"", ""RGBA"", or a mode that can be transformed to ""RGB"" or ""RGBA"" (e.g. ""R"", ""BGR;15"", ""RGBA;L""). ",142,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def putpalette(self, data, rawmode=""RGB""): from . import ImagePalette if self.mode not in (""L"", ""LA"", ""P"", ""PA""): raise ValueError(""illegal image mode"") if isinstance(data, ImagePalette.ImagePalette): palette = ImagePalette.raw(data.rawmode, data.palette) else: if not isinstance(data, bytes): data = bytes(data) palette = ImagePalette.raw(rawmode, data) self.mode = ""PA"" if ""A"" in self.mode else ""P"" self.palette = palette self.palette.mode = ""RGB"" self.load() # install new palette ``` ###Assistant : Attaches a palette to this image. The image must be a ""P"", ""PA"", ""L"" or ""LA"" image. The palette sequence must contain at most 256 colors, made up of one integer value for each channel in the raw mode. For example, if the raw mode is ""RGB"", then it can contain at most 768 values, made up of red, green and blue values for the corresponding pixel index in the 256 colors. If the raw mode is ""RGBA"", then it can contain at most 1024 values, containing red, green, blue and alpha values. Alternatively, an 8-bit string may be used instead of an integer sequence. :param data: A palette sequence (either a list or a string). :param rawmode: The raw mode of the palette. Either ""RGB"", ""RGBA"", or a mode that can be transformed to ""RGB"" or ""RGBA"" (e.g. ""R"", ""BGR;15"", ""RGBA;L""). " 1683,"def get_timeout() -> t.Optional[t.Dict[str, t.Any]]: if not os.path.exists(TIMEOUT_PATH): return None data = read_json_file(TIMEOUT_PATH) data['deadline'] = datetime.datetime.strptime(data['deadline'], '%Y-%m-%dT%H:%M:%SZ') return data ","Return details about the currently set timeout, if any, otherwise return None.",12,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_timeout() -> t.Optional[t.Dict[str, t.Any]]: if not os.path.exists(TIMEOUT_PATH): return None data = read_json_file(TIMEOUT_PATH) data['deadline'] = datetime.datetime.strptime(data['deadline'], '%Y-%m-%dT%H:%M:%SZ') return data ``` ###Assistant : Return details about the currently set timeout, if any, otherwise return None. " 1684,"def my_mean_squared_error(y_true, y_pred): return backend.mean(tf.math.squared_difference(y_pred, y_true), axis=-1) module_my_mean_squared_error = my_mean_squared_error @test_utils.run_v2_only","Identical to built-in `mean_squared_error`, added here as a custom func. ",10,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def my_mean_squared_error(y_true, y_pred): return backend.mean(tf.math.squared_difference(y_pred, y_true), axis=-1) module_my_mean_squared_error = my_mean_squared_error @test_utils.run_v2_only ``` ###Assistant : Identical to built-in `mean_squared_error`, added here as a custom func. " 1685,"def _amd_predict_with_optimized_batchsizes(self, feed, batch_size): if isinstance(feed, np.ndarray): feed = [feed] items = feed[0].shape[0] done_items = 0 results = [] while done_items < items: if batch_size < 4: # Not much difference in BS < 4 batch_size = 1 batch_items = ((items - done_items) // batch_size) * batch_size if batch_items: pred_data = [x[done_items:done_items + batch_items] for x in feed] pred = self._model.predict(pred_data, batch_size=batch_size) done_items += batch_items results.append(pred) batch_size //= 2 if isinstance(results[0], np.ndarray): return np.concatenate(results) return [np.concatenate(x) for x in zip(*results)] "," Minimizes the amount of kernels to be compiled when using the ``amd`` backend with varying batch sizes while trying to keep the batchsize as high as possible. Parameters ---------- feed: numpy.ndarray or list The feed to be provided to the model as input. This should be a ``numpy.ndarray`` for single inputs or a ``list`` of ``numpy.ndarray`` objects for multiple inputs. batch_size: int The upper batchsize to use. ",67,80,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _amd_predict_with_optimized_batchsizes(self, feed, batch_size): if isinstance(feed, np.ndarray): feed = [feed] items = feed[0].shape[0] done_items = 0 results = [] while done_items < items: if batch_size < 4: # Not much difference in BS < 4 batch_size = 1 batch_items = ((items - done_items) // batch_size) * batch_size if batch_items: pred_data = [x[done_items:done_items + batch_items] for x in feed] pred = self._model.predict(pred_data, batch_size=batch_size) done_items += batch_items results.append(pred) batch_size //= 2 if isinstance(results[0], np.ndarray): return np.concatenate(results) return [np.concatenate(x) for x in zip(*results)] ``` ###Assistant : Minimizes the amount of kernels to be compiled when using the ``amd`` backend with varying batch sizes while trying to keep the batchsize as high as possible. Parameters ---------- feed: numpy.ndarray or list The feed to be provided to the model as input. This should be a ``numpy.ndarray`` for single inputs or a ``list`` of ``numpy.ndarray`` objects for multiple inputs. batch_size: int The upper batchsize to use. " 1686,"def set_positions(self, posA, posB): if posA is not None: self._posA_posB[0] = posA if posB is not None: self._posA_posB[1] = posB self.stale = True "," Set the start and end positions of the connecting path. Parameters ---------- posA, posB : None, tuple (x, y) coordinates of arrow tail and arrow head respectively. If `None` use current value. ",32,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_positions(self, posA, posB): if posA is not None: self._posA_posB[0] = posA if posB is not None: self._posA_posB[1] = posB self.stale = True ``` ###Assistant : Set the start and end positions of the connecting path. Parameters ---------- posA, posB : None, tuple (x, y) coordinates of arrow tail and arrow head respectively. If `None` use current value. " 1687,"def test_orderby_percentile_with_many_fields_multiple_entities_with_missing_data(self): for tag, value, numbers in ( (""transaction"", ""/foo/"", [10, 11, 12]), (""transaction"", ""/bar/"", [4, 5, 6]), ): for subvalue in numbers: self.store_performance_metric( name=TransactionMRI.MEASUREMENTS_LCP.value, tags={tag: value}, value=subvalue, ) response = self.get_success_response( self.organization.slug, field=[ f""p50({TransactionMetricKey.MEASUREMENTS_LCP.value})"", f""count_unique({TransactionMetricKey.USER.value})"", ], statsPeriod=""1h"", interval=""1h"", groupBy=[""project_id"", ""transaction""], orderBy=f""p50({TransactionMetricKey.MEASUREMENTS_LCP.value})"", useCase=""performance"", ) groups = response.data[""groups""] assert len(groups) == 2 expected = [ (""/bar/"", 5.0, 5), (""/foo/"", 11.0, 1), ] for (expected_tag_value, expected_lcp_count, users), group in zip(expected, groups): # With orderBy, you only get totals: assert group[""by""] == {""transaction"": expected_tag_value, ""project_id"": self.project.id} assert group[""totals""] == { f""count_unique({TransactionMetricKey.USER.value})"": 0, f""p50({TransactionMetricKey.MEASUREMENTS_LCP.value})"": expected_lcp_count, } assert group[""series""] == { f""count_unique({TransactionMetricKey.USER.value})"": [0], f""p50({TransactionMetricKey.MEASUREMENTS_LCP.value})"": [expected_lcp_count], } "," Test that ensures when transactions table has null values for some fields (i.e. fields with a different entity than the entity of the field in the order by), then the table gets populated accordingly ",34,101,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_orderby_percentile_with_many_fields_multiple_entities_with_missing_data(self): for tag, value, numbers in ( (""transaction"", ""/foo/"", [10, 11, 12]), (""transaction"", ""/bar/"", [4, 5, 6]), ): for subvalue in numbers: self.store_performance_metric( name=TransactionMRI.MEASUREMENTS_LCP.value, tags={tag: value}, value=subvalue, ) response = self.get_success_response( self.organization.slug, field=[ f""p50({TransactionMetricKey.MEASUREMENTS_LCP.value})"", f""count_unique({TransactionMetricKey.USER.value})"", ], statsPeriod=""1h"", interval=""1h"", groupBy=[""project_id"", ""transaction""], orderBy=f""p50({TransactionMetricKey.MEASUREMENTS_LCP.value})"", useCase=""performance"", ) groups = response.data[""groups""] assert len(groups) == 2 expected = [ (""/bar/"", 5.0, 5), (""/foo/"", 11.0, 1), ] for (expected_tag_value, expected_lcp_count, users), group in zip(expected, groups): # With orderBy, you only get totals: assert group[""by""] == {""transaction"": expected_tag_value, ""project_id"": self.project.id} assert group[""totals""] == { f""count_unique({TransactionMetricKey.USER.value})"": 0, f""p50({TransactionMetricKey.MEASUREMENTS_LCP.value})"": expected_lcp_count, } assert group[""series""] == { f""count_unique({TransactionMetricKey.USER.value})"": [0], f""p50({TransactionMetricKey.MEASUREMENTS_LCP.value})"": [expected_lcp_count], } ``` ###Assistant : Test that ensures when transactions table has null values for some fields (i.e. fields with a different entity than the entity of the field in the order by), then the table gets populated accordingly " 1688,"def get_earning_components_max_benefits(employee, date, earning_component): salary_structure = get_assigned_salary_structure(employee, date) amount = frappe.db.sql( , salary_structure, earning_component, ) return amount if amount else 0 "," select amount from `tabSalary Detail` where parent = %s and is_flexible_benefit = 1 and salary_component = %s order by name ",20,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_earning_components_max_benefits(employee, date, earning_component): salary_structure = get_assigned_salary_structure(employee, date) amount = frappe.db.sql( , salary_structure, earning_component, ) return amount if amount else 0 ``` ###Assistant : select amount from `tabSalary Detail` where parent = %s and is_flexible_benefit = 1 and salary_component = %s order by name " 1689,"def get_input_mask_at(self, node_index): inputs = self.get_input_at(node_index) if isinstance(inputs, list): return [getattr(x, ""_keras_mask"", None) for x in inputs] else: return getattr(inputs, ""_keras_mask"", None) ","Retrieves the input mask tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first time the layer was called. Returns: A mask tensor (or list of tensors if the layer has multiple inputs). ",51,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_input_mask_at(self, node_index): inputs = self.get_input_at(node_index) if isinstance(inputs, list): return [getattr(x, ""_keras_mask"", None) for x in inputs] else: return getattr(inputs, ""_keras_mask"", None) ``` ###Assistant : Retrieves the input mask tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first time the layer was called. Returns: A mask tensor (or list of tensors if the layer has multiple inputs). " 1690,"def get_default_grpc_options(): return [ ('grpc.max_send_message_length', -1), ('grpc.max_receive_message_length', -1), ] "," Returns a list of default options used for creating grpc channels. Documentation is here https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/grpc_types.h :returns: list of tuples defining grpc parameters ",22,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_default_grpc_options(): return [ ('grpc.max_send_message_length', -1), ('grpc.max_receive_message_length', -1), ] ``` ###Assistant : Returns a list of default options used for creating grpc channels. Documentation is here https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/grpc_types.h :returns: list of tuples defining grpc parameters " 1691,"def _cancel_futures_kwargs(self): if sys.version_info[:2] < (3, 9): return {} return dict(cancel_futures=True) ","Shim older Pythons that do not have Executor.shutdown(...cancel_futures=). Remove this code when support for Python 3.8 is dropped. ",18,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _cancel_futures_kwargs(self): if sys.version_info[:2] < (3, 9): return {} return dict(cancel_futures=True) ``` ###Assistant : Shim older Pythons that do not have Executor.shutdown(...cancel_futures=). Remove this code when support for Python 3.8 is dropped. " 1692,"def _estimate_available_parallelism() -> int: cur_pg = ray.util.get_current_placement_group() return _estimate_avail_cpus(cur_pg) ","Estimates the available CPU parallelism for this Dataset in the cluster. If we are currently in a placement group, take that into account.",23,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _estimate_available_parallelism() -> int: cur_pg = ray.util.get_current_placement_group() return _estimate_avail_cpus(cur_pg) ``` ###Assistant : Estimates the available CPU parallelism for this Dataset in the cluster. If we are currently in a placement group, take that into account. " 1693,"def bin(num, max_bits=None): ceiling = 2 ** (num).bit_length() if num >= 0: s = bltns.bin(num + ceiling).replace('1', '0', 1) else: s = bltns.bin(~num ^ (ceiling - 1) + ceiling) sign = s[:3] digits = s[3:] if max_bits is not None: if len(digits) < max_bits: digits = (sign[-1] * max_bits + digits)[-max_bits:] return ""%s %s"" % (sign, digits) "," Like built-in bin(), except negative values are represented in twos-compliment, and the leading bit always indicates sign (0=positive, 1=negative). >>> bin(10) '0b0 1010' >>> bin(~10) # ~10 is -11 '0b1 0101' ",31,57,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def bin(num, max_bits=None): ceiling = 2 ** (num).bit_length() if num >= 0: s = bltns.bin(num + ceiling).replace('1', '0', 1) else: s = bltns.bin(~num ^ (ceiling - 1) + ceiling) sign = s[:3] digits = s[3:] if max_bits is not None: if len(digits) < max_bits: digits = (sign[-1] * max_bits + digits)[-max_bits:] return ""%s %s"" % (sign, digits) ``` ###Assistant : Like built-in bin(), except negative values are represented in twos-compliment, and the leading bit always indicates sign (0=positive, 1=negative). >>> bin(10) '0b0 1010' >>> bin(~10) # ~10 is -11 '0b1 0101' " 1694,"def axis0_safe_slice(X, mask, len_mask): if len_mask != 0: return X[safe_mask(X, mask), :] return np.zeros(shape=(0, X.shape[1])) ","Return a mask which is safer to use on X than safe_mask. This mask is safer than safe_mask since it returns an empty array, when a sparse matrix is sliced with a boolean mask with all False, instead of raising an unhelpful error in older versions of SciPy. See: https://github.com/scipy/scipy/issues/5361 Also note that we can avoid doing the dot product by checking if the len_mask is not zero in _huber_loss_and_gradient but this is not going to be the bottleneck, since the number of outliers and non_outliers are typically non-zero and it makes the code tougher to follow. Parameters ---------- X : {array-like, sparse matrix} Data on which to apply mask. mask : ndarray Mask to be used on X. len_mask : int The length of the mask. Returns ------- mask : ndarray Array that is safe to use on X. ",140,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def axis0_safe_slice(X, mask, len_mask): if len_mask != 0: return X[safe_mask(X, mask), :] return np.zeros(shape=(0, X.shape[1])) ``` ###Assistant : Return a mask which is safer to use on X than safe_mask. This mask is safer than safe_mask since it returns an empty array, when a sparse matrix is sliced with a boolean mask with all False, instead of raising an unhelpful error in older versions of SciPy. See: https://github.com/scipy/scipy/issues/5361 Also note that we can avoid doing the dot product by checking if the len_mask is not zero in _huber_loss_and_gradient but this is not going to be the bottleneck, since the number of outliers and non_outliers are typically non-zero and it makes the code tougher to follow. Parameters ---------- X : {array-like, sparse matrix} Data on which to apply mask. mask : ndarray Mask to be used on X. len_mask : int The length of the mask. Returns ------- mask : ndarray Array that is safe to use on X. " 1695,"def _most_frequent(array, extra_value, n_repeat): # Compute the most frequent value in array only if array.size > 0: if array.dtype == object: # scipy.stats.mode is slow with object dtype array. # Python Counter is more efficient counter = Counter(array) most_frequent_count = counter.most_common(1)[0][1] # tie breaking similarly to scipy.stats.mode most_frequent_value = min( value for value, count in counter.items() if count == most_frequent_count ) else: mode = _mode(array) most_frequent_value = mode[0][0] most_frequent_count = mode[1][0] else: most_frequent_value = 0 most_frequent_count = 0 # Compare to array + [extra_value] * n_repeat if most_frequent_count == 0 and n_repeat == 0: return np.nan elif most_frequent_count < n_repeat: return extra_value elif most_frequent_count > n_repeat: return most_frequent_value elif most_frequent_count == n_repeat: # tie breaking similarly to scipy.stats.mode return min(most_frequent_value, extra_value) ","Compute the most frequent value in a 1d array extended with [extra_value] * n_repeat, where extra_value is assumed to be not part of the array.",25,121,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _most_frequent(array, extra_value, n_repeat): # Compute the most frequent value in array only if array.size > 0: if array.dtype == object: # scipy.stats.mode is slow with object dtype array. # Python Counter is more efficient counter = Counter(array) most_frequent_count = counter.most_common(1)[0][1] # tie breaking similarly to scipy.stats.mode most_frequent_value = min( value for value, count in counter.items() if count == most_frequent_count ) else: mode = _mode(array) most_frequent_value = mode[0][0] most_frequent_count = mode[1][0] else: most_frequent_value = 0 most_frequent_count = 0 # Compare to array + [extra_value] * n_repeat if most_frequent_count == 0 and n_repeat == 0: return np.nan elif most_frequent_count < n_repeat: return extra_value elif most_frequent_count > n_repeat: return most_frequent_value elif most_frequent_count == n_repeat: # tie breaking similarly to scipy.stats.mode return min(most_frequent_value, extra_value) ``` ###Assistant : Compute the most frequent value in a 1d array extended with [extra_value] * n_repeat, where extra_value is assumed to be not part of the array. " 1696,"def _cov(X, shrinkage=None, covariance_estimator=None): if covariance_estimator is None: shrinkage = ""empirical"" if shrinkage is None else shrinkage if isinstance(shrinkage, str): if shrinkage == ""auto"": sc = StandardScaler() # standardize features X = sc.fit_transform(X) s = ledoit_wolf(X)[0] # rescale s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] elif shrinkage == ""empirical"": s = empirical_covariance(X) else: raise ValueError(""unknown shrinkage parameter"") elif isinstance(shrinkage, Real): if shrinkage < 0 or shrinkage > 1: raise ValueError(""shrinkage parameter must be between 0 and 1"") s = shrunk_covariance(empirical_covariance(X), shrinkage) else: raise TypeError(""shrinkage must be a float or a string"") else: if shrinkage is not None and shrinkage != 0: raise ValueError( ""covariance_estimator and shrinkage parameters "" ""are not None. Only one of the two can be set."" ) covariance_estimator.fit(X) if not hasattr(covariance_estimator, ""covariance_""): raise ValueError( ""%s does not have a covariance_ attribute"" % covariance_estimator.__class__.__name__ ) s = covariance_estimator.covariance_ return s ","Estimate covariance matrix (using optional covariance_estimator). Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. shrinkage : {'empirical', 'auto'} or float, default=None Shrinkage parameter, possible values: - None or 'empirical': no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Shrinkage parameter is ignored if `covariance_estimator` is not None. covariance_estimator : estimator, default=None If not None, `covariance_estimator` is used to estimate the covariance matrices instead of relying on the empirical covariance estimator (with potential shrinkage). The object should have a fit method and a ``covariance_`` attribute like the estimators in :mod:`sklearn.covariance``. if None the shrinkage parameter drives the estimate. .. versionadded:: 0.24 Returns ------- s : ndarray of shape (n_features, n_features) Estimated covariance matrix. ",126,144,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _cov(X, shrinkage=None, covariance_estimator=None): if covariance_estimator is None: shrinkage = ""empirical"" if shrinkage is None else shrinkage if isinstance(shrinkage, str): if shrinkage == ""auto"": sc = StandardScaler() # standardize features X = sc.fit_transform(X) s = ledoit_wolf(X)[0] # rescale s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] elif shrinkage == ""empirical"": s = empirical_covariance(X) else: raise ValueError(""unknown shrinkage parameter"") elif isinstance(shrinkage, Real): if shrinkage < 0 or shrinkage > 1: raise ValueError(""shrinkage parameter must be between 0 and 1"") s = shrunk_covariance(empirical_covariance(X), shrinkage) else: raise TypeError(""shrinkage must be a float or a string"") else: if shrinkage is not None and shrinkage != 0: raise ValueError( ""covariance_estimator and shrinkage parameters "" ""are not None. Only one of the two can be set."" ) covariance_estimator.fit(X) if not hasattr(covariance_estimator, ""covariance_""): raise ValueError( ""%s does not have a covariance_ attribute"" % covariance_estimator.__class__.__name__ ) s = covariance_estimator.covariance_ return s ``` ###Assistant : Estimate covariance matrix (using optional covariance_estimator). Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. shrinkage : {'empirical', 'auto'} or float, default=None Shrinkage parameter, possible values: - None or 'empirical': no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Shrinkage parameter is ignored if `covariance_estimator` is not None. covariance_estimator : estimator, default=None If not None, `covariance_estimator` is used to estimate the covariance matrices instead of relying on the empirical covariance estimator (with potential shrinkage). The object should have a fit method and a ``covariance_`` attribute like the estimators in :mod:`sklearn.covariance``. if None the shrinkage parameter drives the estimate. .. versionadded:: 0.24 Returns ------- s : ndarray of shape (n_features, n_features) Estimated covariance matrix. " 1697,"def from_key_val_list(value): if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError(""cannot encode objects that are not 2-tuples"") return OrderedDict(value) ","Take an object and test to see if it can be represented as a dictionary. Unless it can not be represented as such, return an OrderedDict, e.g., :: >>> from_key_val_list([('key', 'val')]) OrderedDict([('key', 'val')]) >>> from_key_val_list('string') Traceback (most recent call last): ... ValueError: cannot encode objects that are not 2-tuples >>> from_key_val_list({'key': 'val'}) OrderedDict([('key', 'val')]) :rtype: OrderedDict ",56,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def from_key_val_list(value): if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError(""cannot encode objects that are not 2-tuples"") return OrderedDict(value) ``` ###Assistant : Take an object and test to see if it can be represented as a dictionary. Unless it can not be represented as such, return an OrderedDict, e.g., :: >>> from_key_val_list([('key', 'val')]) OrderedDict([('key', 'val')]) >>> from_key_val_list('string') Traceback (most recent call last): ... ValueError: cannot encode objects that are not 2-tuples >>> from_key_val_list({'key': 'val'}) OrderedDict([('key', 'val')]) :rtype: OrderedDict " 1698,"def load_lexer_from_file(filename, lexername=""CustomLexer"", **options): try: # This empty dict will contain the namespace for the exec'd file custom_namespace = {} with open(filename, 'rb') as f: exec(f.read(), custom_namespace) # Retrieve the class `lexername` from that namespace if lexername not in custom_namespace: raise ClassNotFound('no valid %s class found in %s' % (lexername, filename)) lexer_class = custom_namespace[lexername] # And finally instantiate it with the options return lexer_class(**options) except OSError as err: raise ClassNotFound('cannot read %s: %s' % (filename, err)) except ClassNotFound: raise except Exception as err: raise ClassNotFound('error when loading custom lexer: %s' % err) ","Load a lexer from a file. This method expects a file located relative to the current working directory, which contains a Lexer class. By default, it expects the Lexer to be name CustomLexer; you can specify your own class name as the second argument to this function. Users should be very careful with the input, because this method is equivalent to running eval on the input file. Raises ClassNotFound if there are any problems importing the Lexer. .. versionadded:: 2.2 ",80,92,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_lexer_from_file(filename, lexername=""CustomLexer"", **options): try: # This empty dict will contain the namespace for the exec'd file custom_namespace = {} with open(filename, 'rb') as f: exec(f.read(), custom_namespace) # Retrieve the class `lexername` from that namespace if lexername not in custom_namespace: raise ClassNotFound('no valid %s class found in %s' % (lexername, filename)) lexer_class = custom_namespace[lexername] # And finally instantiate it with the options return lexer_class(**options) except OSError as err: raise ClassNotFound('cannot read %s: %s' % (filename, err)) except ClassNotFound: raise except Exception as err: raise ClassNotFound('error when loading custom lexer: %s' % err) ``` ###Assistant : Load a lexer from a file. This method expects a file located relative to the current working directory, which contains a Lexer class. By default, it expects the Lexer to be name CustomLexer; you can specify your own class name as the second argument to this function. Users should be very careful with the input, because this method is equivalent to running eval on the input file. Raises ClassNotFound if there are any problems importing the Lexer. .. versionadded:: 2.2 " 1699,"def get_price(item_code, price_list, customer_group, company, qty=1): from erpnext.e_commerce.shopping_cart.cart import get_party template_item_code = frappe.db.get_value(""Item"", item_code, ""variant_of"") if price_list: price = frappe.get_all(""Item Price"", fields=[""price_list_rate"", ""currency""], filters={""price_list"": price_list, ""item_code"": item_code}) if template_item_code and not price: price = frappe.get_all(""Item Price"", fields=[""price_list_rate"", ""currency""], filters={""price_list"": price_list, ""item_code"": template_item_code}) if price: party = get_party() pricing_rule_dict = frappe._dict({ ""item_code"": item_code, ""qty"": qty, ""stock_qty"": qty, ""transaction_type"": ""selling"", ""price_list"": price_list, ""customer_group"": customer_group, ""company"": company, ""conversion_rate"": 1, ""for_shopping_cart"": True, ""currency"": frappe.db.get_value(""Price List"", price_list, ""currency"") }) if party and party.doctype == ""Customer"": pricing_rule_dict.update({""customer"": party.name}) pricing_rule = get_pricing_rule_for_item(pricing_rule_dict) price_obj = price[0] if pricing_rule: # price without any rules applied mrp = price_obj.price_list_rate or 0 if pricing_rule.pricing_rule_for == ""Discount Percentage"": price_obj.discount_percent = pricing_rule.discount_percentage price_obj.formatted_discount_percent = str(flt(pricing_rule.discount_percentage, 0)) + ""%"" price_obj.price_list_rate = flt(price_obj.price_list_rate * (1.0 - (flt(pricing_rule.discount_percentage) / 100.0))) if pricing_rule.pricing_rule_for == ""Rate"": rate_discount = flt(mrp) - flt(pricing_rule.price_list_rate) if rate_discount > 0: price_obj.formatted_discount_rate = fmt_money(rate_discount, currency=price_obj[""currency""]) price_obj.price_list_rate = pricing_rule.price_list_rate or 0 if price_obj: price_obj[""formatted_price""] = fmt_money(price_obj[""price_list_rate""], currency=price_obj[""currency""]) if mrp != price_obj[""price_list_rate""]: price_obj[""formatted_mrp""] = fmt_money(mrp, currency=price_obj[""currency""]) price_obj[""currency_symbol""] = not cint(frappe.db.get_default(""hide_currency_symbol"")) \ and (frappe.db.get_value(""Currency"", price_obj.currency, ""symbol"", cache=True) or price_obj.currency) \ or """" uom_conversion_factor = frappe.db.sql(, item_code) uom_conversion_factor = uom_conversion_factor[0][0] if uom_conversion_factor else 1 price_obj[""formatted_price_sales_uom""] = fmt_money(price_obj[""price_list_rate""] * uom_conversion_factor, currency=price_obj[""currency""]) if not price_obj[""price_list_rate""]: price_obj[""price_list_rate""] = 0 if not price_obj[""currency""]: price_obj[""currency""] = """" if not price_obj[""formatted_price""]: price_obj[""formatted_price""], price_obj[""formatted_mrp""] = """", """" return price_obj ","select C.conversion_factor from `tabUOM Conversion Detail` C inner join `tabItem` I on C.parent = I.name and C.uom = I.sales_uom where I.name = %s",23,214,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_price(item_code, price_list, customer_group, company, qty=1): from erpnext.e_commerce.shopping_cart.cart import get_party template_item_code = frappe.db.get_value(""Item"", item_code, ""variant_of"") if price_list: price = frappe.get_all(""Item Price"", fields=[""price_list_rate"", ""currency""], filters={""price_list"": price_list, ""item_code"": item_code}) if template_item_code and not price: price = frappe.get_all(""Item Price"", fields=[""price_list_rate"", ""currency""], filters={""price_list"": price_list, ""item_code"": template_item_code}) if price: party = get_party() pricing_rule_dict = frappe._dict({ ""item_code"": item_code, ""qty"": qty, ""stock_qty"": qty, ""transaction_type"": ""selling"", ""price_list"": price_list, ""customer_group"": customer_group, ""company"": company, ""conversion_rate"": 1, ""for_shopping_cart"": True, ""currency"": frappe.db.get_value(""Price List"", price_list, ""currency"") }) if party and party.doctype == ""Customer"": pricing_rule_dict.update({""customer"": party.name}) pricing_rule = get_pricing_rule_for_item(pricing_rule_dict) price_obj = price[0] if pricing_rule: # price without any rules applied mrp = price_obj.price_list_rate or 0 if pricing_rule.pricing_rule_for == ""Discount Percentage"": price_obj.discount_percent = pricing_rule.discount_percentage price_obj.formatted_discount_percent = str(flt(pricing_rule.discount_percentage, 0)) + ""%"" price_obj.price_list_rate = flt(price_obj.price_list_rate * (1.0 - (flt(pricing_rule.discount_percentage) / 100.0))) if pricing_rule.pricing_rule_for == ""Rate"": rate_discount = flt(mrp) - flt(pricing_rule.price_list_rate) if rate_discount > 0: price_obj.formatted_discount_rate = fmt_money(rate_discount, currency=price_obj[""currency""]) price_obj.price_list_rate = pricing_rule.price_list_rate or 0 if price_obj: price_obj[""formatted_price""] = fmt_money(price_obj[""price_list_rate""], currency=price_obj[""currency""]) if mrp != price_obj[""price_list_rate""]: price_obj[""formatted_mrp""] = fmt_money(mrp, currency=price_obj[""currency""]) price_obj[""currency_symbol""] = not cint(frappe.db.get_default(""hide_currency_symbol"")) \ and (frappe.db.get_value(""Currency"", price_obj.currency, ""symbol"", cache=True) or price_obj.currency) \ or """" uom_conversion_factor = frappe.db.sql(, item_code) uom_conversion_factor = uom_conversion_factor[0][0] if uom_conversion_factor else 1 price_obj[""formatted_price_sales_uom""] = fmt_money(price_obj[""price_list_rate""] * uom_conversion_factor, currency=price_obj[""currency""]) if not price_obj[""price_list_rate""]: price_obj[""price_list_rate""] = 0 if not price_obj[""currency""]: price_obj[""currency""] = """" if not price_obj[""formatted_price""]: price_obj[""formatted_price""], price_obj[""formatted_mrp""] = """", """" return price_obj ``` ###Assistant : select C.conversion_factor from `tabUOM Conversion Detail` C inner join `tabItem` I on C.parent = I.name and C.uom = I.sales_uom where I.name = %s " 1700,"def progress(self, msg): if self.paras.verbose: sys.stdout.write(""\033[K"") # Clear line print('[{}] {}'.format(human_format(self.step), msg), end='\r') ", Verbose function for updating progress on stdout (do not include newline) ,11,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def progress(self, msg): if self.paras.verbose: sys.stdout.write(""\033[K"") # Clear line print('[{}] {}'.format(human_format(self.step), msg), end='\r') ``` ###Assistant : Verbose function for updating progress on stdout (do not include newline) " 1701,"def test_customize_compiler_before_get_config_vars(self): # Issue #21923: test that a Distribution compiler # instance can be called without an explicit call to # get_config_vars(). with open(TESTFN, 'w') as f: f.writelines(textwrap.dedent()) p = subprocess.Popen([str(sys.executable), TESTFN], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) outs, errs = p.communicate() self.assertEqual(0, p.returncode, ""Subprocess failed: "" + outs) ","\ from distutils.core import Distribution config = Distribution().get_command_obj('config') # try_compile may pass or it may fail if no compiler # is found but it should not raise an exception. rc = config.try_compile('int x;') ",33,46,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_customize_compiler_before_get_config_vars(self): # Issue #21923: test that a Distribution compiler # instance can be called without an explicit call to # get_config_vars(). with open(TESTFN, 'w') as f: f.writelines(textwrap.dedent()) p = subprocess.Popen([str(sys.executable), TESTFN], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) outs, errs = p.communicate() self.assertEqual(0, p.returncode, ""Subprocess failed: "" + outs) ``` ###Assistant : \ from distutils.core import Distribution config = Distribution().get_command_obj('config') # try_compile may pass or it may fail if no compiler # is found but it should not raise an exception. rc = config.try_compile('int x;') " 1702,"def siren_platform_only(): with patch( ""homeassistant.components.zha.PLATFORMS"", ( Platform.DEVICE_TRACKER, Platform.NUMBER, Platform.SENSOR, Platform.SELECT, Platform.SIREN, ), ): yield @pytest.fixture",Only setup the siren and required base platforms to speed up tests.,12,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def siren_platform_only(): with patch( ""homeassistant.components.zha.PLATFORMS"", ( Platform.DEVICE_TRACKER, Platform.NUMBER, Platform.SENSOR, Platform.SELECT, Platform.SIREN, ), ): yield @pytest.fixture ``` ###Assistant : Only setup the siren and required base platforms to speed up tests. " 1703,"def test_maybe_send_server_notice_when_alerting_suppressed_room_blocked(self): self._rlsn._auth.check_auth_blocking = Mock( return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, ""foo"", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER ), ) self._rlsn._server_notices_manager.__is_room_currently_blocked = Mock( return_value=make_awaitable((True, [])) ) mock_event = Mock( type=EventTypes.Message, content={""msgtype"": ServerNoticeMsgType} ) self._rlsn._store.get_events = Mock( return_value=make_awaitable({""123"": mock_event}) ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) self._send_notice.assert_called_once() "," When the room is already in a blocked state, test that when alerting is suppressed that the room is returned to an unblocked state. ",24,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_maybe_send_server_notice_when_alerting_suppressed_room_blocked(self): self._rlsn._auth.check_auth_blocking = Mock( return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, ""foo"", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER ), ) self._rlsn._server_notices_manager.__is_room_currently_blocked = Mock( return_value=make_awaitable((True, [])) ) mock_event = Mock( type=EventTypes.Message, content={""msgtype"": ServerNoticeMsgType} ) self._rlsn._store.get_events = Mock( return_value=make_awaitable({""123"": mock_event}) ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) self._send_notice.assert_called_once() ``` ###Assistant : When the room is already in a blocked state, test that when alerting is suppressed that the room is returned to an unblocked state. " 1704,"def test_send(event_listener, salt_master, salt_minion, salt_call_cli): event_tag = random_string(""salt/test/event/"") data = {""event.fire"": ""just test it!!!!""} start_time = time.time() ret = salt_call_cli.run( ""event.send"", event_tag, data=data, with_grains=True, with_pillar=True, preload={""foo"": ""bar""}, ) assert ret.exitcode == 0 assert ret.json assert ret.json is True event_pattern = (salt_master.id, event_tag) matched_events = event_listener.wait_for_events( [event_pattern], after_time=start_time, timeout=30 ) assert matched_events.found_all_events for event in matched_events: assert event.data[""id""] == salt_minion.id assert event.data[""cmd""] == ""_minion_event"" assert ""event.fire"" in event.data[""data""] assert event.data[""foo""] == ""bar"" assert event.data[""data""][""grains""][""test_grain""] == ""cheese"" assert event.data[""data""][""pillar""][""ext_spam""] == ""eggs"" "," Test sending an event to the master event bus ",9,79,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_send(event_listener, salt_master, salt_minion, salt_call_cli): event_tag = random_string(""salt/test/event/"") data = {""event.fire"": ""just test it!!!!""} start_time = time.time() ret = salt_call_cli.run( ""event.send"", event_tag, data=data, with_grains=True, with_pillar=True, preload={""foo"": ""bar""}, ) assert ret.exitcode == 0 assert ret.json assert ret.json is True event_pattern = (salt_master.id, event_tag) matched_events = event_listener.wait_for_events( [event_pattern], after_time=start_time, timeout=30 ) assert matched_events.found_all_events for event in matched_events: assert event.data[""id""] == salt_minion.id assert event.data[""cmd""] == ""_minion_event"" assert ""event.fire"" in event.data[""data""] assert event.data[""foo""] == ""bar"" assert event.data[""data""][""grains""][""test_grain""] == ""cheese"" assert event.data[""data""][""pillar""][""ext_spam""] == ""eggs"" ``` ###Assistant : Test sending an event to the master event bus " 1705,"def _check_tree_and_avals(what, tree1, avals1, tree2, avals2): if tree1 != tree2: raise TypeError( f""{what} must have same type structure, got {tree1} and {tree2}."") if not all(_map(core.typematch, avals1, avals2)): diff = tree_map(_show_diff, tree_unflatten(tree1, avals1), tree_unflatten(tree2, avals2)) raise TypeError(f""{what} must have identical types, got\n{diff}."") ","Raises TypeError if (tree1, avals1) does not match (tree2, avals2). Corresponding `tree` and `avals` must match in the sense that the number of leaves in `tree` must be equal to the length of `avals`. `what` will be prepended to details of the mismatch in TypeError. ",45,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_tree_and_avals(what, tree1, avals1, tree2, avals2): if tree1 != tree2: raise TypeError( f""{what} must have same type structure, got {tree1} and {tree2}."") if not all(_map(core.typematch, avals1, avals2)): diff = tree_map(_show_diff, tree_unflatten(tree1, avals1), tree_unflatten(tree2, avals2)) raise TypeError(f""{what} must have identical types, got\n{diff}."") ``` ###Assistant : Raises TypeError if (tree1, avals1) does not match (tree2, avals2). Corresponding `tree` and `avals` must match in the sense that the number of leaves in `tree` must be equal to the length of `avals`. `what` will be prepended to details of the mismatch in TypeError. " 1706,"def test_load_global_local_flag_config(self): global_config = local_config = global_config_path = ""/mock/home/folder/.streamlit/config.toml"" local_config_path = os.path.join(os.getcwd(), "".streamlit/config.toml"") global_open = mock_open(read_data=global_config) local_open = mock_open(read_data=local_config) open = mock_open() open.side_effect = [global_open.return_value, local_open.return_value] open_patch = patch(""streamlit.config.open"", open) # patch streamlit.*.os.* instead of os.* for py35 compat makedirs_patch = patch(""streamlit.config.os.makedirs"") makedirs_patch.return_value = True pathexists_patch = patch(""streamlit.config.os.path.exists"") pathexists_patch.side_effect = lambda path: path in [ global_config_path, local_config_path, ] with open_patch, makedirs_patch, pathexists_patch: config.get_config_options(options_from_flags={""theme.font"": ""monospace""}) self.assertEqual(""light"", config.get_option(""theme.base"")) self.assertEqual(""#FFFFFF"", config.get_option(""theme.textColor"")) self.assertEqual(""monospace"", config.get_option(""theme.font"")) ","Test that CLI flags have higher priority than both ~/.streamlit/config.toml and $CWD/.streamlit/config.toml at parse time. [theme] base = ""dark"" font = ""sans serif"" textColor = ""#FFFFFF"" [theme] base = ""light"" font = ""serif"" ",33,70,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_load_global_local_flag_config(self): global_config = local_config = global_config_path = ""/mock/home/folder/.streamlit/config.toml"" local_config_path = os.path.join(os.getcwd(), "".streamlit/config.toml"") global_open = mock_open(read_data=global_config) local_open = mock_open(read_data=local_config) open = mock_open() open.side_effect = [global_open.return_value, local_open.return_value] open_patch = patch(""streamlit.config.open"", open) # patch streamlit.*.os.* instead of os.* for py35 compat makedirs_patch = patch(""streamlit.config.os.makedirs"") makedirs_patch.return_value = True pathexists_patch = patch(""streamlit.config.os.path.exists"") pathexists_patch.side_effect = lambda path: path in [ global_config_path, local_config_path, ] with open_patch, makedirs_patch, pathexists_patch: config.get_config_options(options_from_flags={""theme.font"": ""monospace""}) self.assertEqual(""light"", config.get_option(""theme.base"")) self.assertEqual(""#FFFFFF"", config.get_option(""theme.textColor"")) self.assertEqual(""monospace"", config.get_option(""theme.font"")) ``` ###Assistant : Test that CLI flags have higher priority than both ~/.streamlit/config.toml and $CWD/.streamlit/config.toml at parse time. [theme] base = ""dark"" font = ""sans serif"" textColor = ""#FFFFFF"" [theme] base = ""light"" font = ""serif"" " 1707,"def get_valid_filename(name): s = str(name).strip().replace("" "", ""_"") s = re.sub(r""(?u)[^-\w.]"", """", s) if s in {"""", ""."", ""..""}: raise SuspiciousFileOperation(""Could not derive file name from '%s'"" % name) return s @keep_lazy_text"," Return the given string converted to a string that can be used for a clean filename. Remove leading and trailing spaces; convert other spaces to underscores; and remove anything that is not an alphanumeric, dash, underscore, or dot. >>> get_valid_filename(""john's portrait in 2004.jpg"") 'johns_portrait_in_2004.jpg' ",44,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_valid_filename(name): s = str(name).strip().replace("" "", ""_"") s = re.sub(r""(?u)[^-\w.]"", """", s) if s in {"""", ""."", ""..""}: raise SuspiciousFileOperation(""Could not derive file name from '%s'"" % name) return s @keep_lazy_text ``` ###Assistant : Return the given string converted to a string that can be used for a clean filename. Remove leading and trailing spaces; convert other spaces to underscores; and remove anything that is not an alphanumeric, dash, underscore, or dot. >>> get_valid_filename(""john's portrait in 2004.jpg"") 'johns_portrait_in_2004.jpg' " 1708,"def test_no_duplicates_for_m2m_in_list_filter(self): blues = Genre.objects.create(name=""Blues"") band = Band.objects.create(name=""B.B. King Review"", nr_of_members=11) band.genres.add(blues) band.genres.add(blues) m = BandAdmin(Band, custom_site) request = self.factory.get(""/band/"", data={""genres"": blues.pk}) request.user = self.superuser cl = m.get_changelist_instance(request) cl.get_results(request) # There's only one Group instance self.assertEqual(cl.result_count, 1) # Queryset must be deletable. self.assertIs(cl.queryset.query.distinct, False) cl.queryset.delete() self.assertEqual(cl.queryset.count(), 0) "," Regression test for #13902: When using a ManyToMany in list_filter, results shouldn't appear more than once. Basic ManyToMany. ",18,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_no_duplicates_for_m2m_in_list_filter(self): blues = Genre.objects.create(name=""Blues"") band = Band.objects.create(name=""B.B. King Review"", nr_of_members=11) band.genres.add(blues) band.genres.add(blues) m = BandAdmin(Band, custom_site) request = self.factory.get(""/band/"", data={""genres"": blues.pk}) request.user = self.superuser cl = m.get_changelist_instance(request) cl.get_results(request) # There's only one Group instance self.assertEqual(cl.result_count, 1) # Queryset must be deletable. self.assertIs(cl.queryset.query.distinct, False) cl.queryset.delete() self.assertEqual(cl.queryset.count(), 0) ``` ###Assistant : Regression test for #13902: When using a ManyToMany in list_filter, results shouldn't appear more than once. Basic ManyToMany. " 1709,"def add_flex_arithmetic_methods(cls) -> None: flex_arith_method, flex_comp_method = _get_method_wrappers(cls) new_methods = _create_methods(cls, flex_arith_method, flex_comp_method) new_methods.update( { ""multiply"": new_methods[""mul""], ""subtract"": new_methods[""sub""], ""divide"": new_methods[""div""], } ) # opt out of bool flex methods for now assert not any(kname in new_methods for kname in (""ror_"", ""rxor"", ""rand_"")) _add_methods(cls, new_methods=new_methods) "," Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``) to the class. Parameters ---------- cls : class flex methods will be defined and pinned to this class ",29,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add_flex_arithmetic_methods(cls) -> None: flex_arith_method, flex_comp_method = _get_method_wrappers(cls) new_methods = _create_methods(cls, flex_arith_method, flex_comp_method) new_methods.update( { ""multiply"": new_methods[""mul""], ""subtract"": new_methods[""sub""], ""divide"": new_methods[""div""], } ) # opt out of bool flex methods for now assert not any(kname in new_methods for kname in (""ror_"", ""rxor"", ""rand_"")) _add_methods(cls, new_methods=new_methods) ``` ###Assistant : Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``) to the class. Parameters ---------- cls : class flex methods will be defined and pinned to this class " 1710,"def get_collected_keypoint(self): output = [] for tracker_id in self.id_to_pop: output.append([tracker_id, self.keypoint_saver[tracker_id]]) del (self.keypoint_saver[tracker_id]) self.flag_to_pop = False self.id_to_pop.clear() return output "," Output (List): List of keypoint results for Action Recognition task, where the format of each element is [tracker_id, KeyPointSequence of tracker_id] ",21,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_collected_keypoint(self): output = [] for tracker_id in self.id_to_pop: output.append([tracker_id, self.keypoint_saver[tracker_id]]) del (self.keypoint_saver[tracker_id]) self.flag_to_pop = False self.id_to_pop.clear() return output ``` ###Assistant : Output (List): List of keypoint results for Action Recognition task, where the format of each element is [tracker_id, KeyPointSequence of tracker_id] " 1711,"def get_course_schedule_events(start, end, filters=None): from frappe.desk.calendar import get_event_conditions conditions = get_event_conditions(""Course Schedule"", filters) data = frappe.db.sql(.format(conditions=conditions), { ""start"": start, ""end"": end }, as_dict=True, update={""allDay"": 0}) return data @frappe.whitelist()","Returns events for Course Schedule Calendar view rendering. :param start: Start date-time. :param end: End date-time. :param filters: Filters (JSON). select name, course, color, timestamp(schedule_date, from_time) as from_time, timestamp(schedule_date, to_time) as to_time, room, student_group, 0 as 'allDay' from `tabCourse Schedule` where ( schedule_date between %(start)s and %(end)s ) {conditions}",49,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_course_schedule_events(start, end, filters=None): from frappe.desk.calendar import get_event_conditions conditions = get_event_conditions(""Course Schedule"", filters) data = frappe.db.sql(.format(conditions=conditions), { ""start"": start, ""end"": end }, as_dict=True, update={""allDay"": 0}) return data @frappe.whitelist() ``` ###Assistant : Returns events for Course Schedule Calendar view rendering. :param start: Start date-time. :param end: End date-time. :param filters: Filters (JSON). select name, course, color, timestamp(schedule_date, from_time) as from_time, timestamp(schedule_date, to_time) as to_time, room, student_group, 0 as 'allDay' from `tabCourse Schedule` where ( schedule_date between %(start)s and %(end)s ) {conditions} " 1712,"def check_keys_split(self, decoded) -> None: bad_keys = set(decoded.keys()).difference(set(self._split_keys)) if bad_keys: bad_keys_joined = "", "".join(bad_keys) raise ValueError(f""JSON data had unexpected key(s): {bad_keys_joined}"") "," Checks that dict has only the appropriate keys for orient='split'. ",10,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_keys_split(self, decoded) -> None: bad_keys = set(decoded.keys()).difference(set(self._split_keys)) if bad_keys: bad_keys_joined = "", "".join(bad_keys) raise ValueError(f""JSON data had unexpected key(s): {bad_keys_joined}"") ``` ###Assistant : Checks that dict has only the appropriate keys for orient='split'. " 1713,"def adapt_datetimefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, ""resolve_expression""): return value # cx_Oracle doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError( ""Oracle backend does not support timezone-aware datetimes when USE_TZ is False."" ) return Oracle_datetime.from_datetime(value) "," Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns. If naive datetime is passed assumes that is in UTC. Normally Django models.DateTimeField makes sure that if USE_TZ is True passed datetime is timezone aware. ",44,53,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def adapt_datetimefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, ""resolve_expression""): return value # cx_Oracle doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError( ""Oracle backend does not support timezone-aware datetimes when USE_TZ is False."" ) return Oracle_datetime.from_datetime(value) ``` ###Assistant : Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns. If naive datetime is passed assumes that is in UTC. Normally Django models.DateTimeField makes sure that if USE_TZ is True passed datetime is timezone aware. " 1714,"def __pow__(a, b): if isinstance(b, numbers.Rational): if b.denominator == 1: power = b.numerator if power >= 0: return Fraction(a._numerator ** power, a._denominator ** power, _normalize=False) elif a._numerator >= 0: return Fraction(a._denominator ** -power, a._numerator ** -power, _normalize=False) else: return Fraction((-a._denominator) ** -power, (-a._numerator) ** -power, _normalize=False) else: # A fractional power will generally produce an # irrational number. return float(a) ** float(b) else: return float(a) ** b ","a ** b If b is not an integer, the result will be a float or complex since roots are generally irrational. If b is an integer, the result will be rational. ",32,67,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __pow__(a, b): if isinstance(b, numbers.Rational): if b.denominator == 1: power = b.numerator if power >= 0: return Fraction(a._numerator ** power, a._denominator ** power, _normalize=False) elif a._numerator >= 0: return Fraction(a._denominator ** -power, a._numerator ** -power, _normalize=False) else: return Fraction((-a._denominator) ** -power, (-a._numerator) ** -power, _normalize=False) else: # A fractional power will generally produce an # irrational number. return float(a) ** float(b) else: return float(a) ** b ``` ###Assistant : a ** b If b is not an integer, the result will be a float or complex since roots are generally irrational. If b is an integer, the result will be rational. " 1715,"def fit(self, X, y, sample_weight=None): self._validate_params() super().fit(X, y, sample_weight=sample_weight) return self ","Fit Ridge regression model with cv. Parameters ---------- X : ndarray of shape (n_samples, n_features) Training data. If using GCV, will be cast to float64 if necessary. y : ndarray of shape (n_samples,) or (n_samples, n_targets) Target values. Will be cast to X's dtype if necessary. sample_weight : float or ndarray of shape (n_samples,), default=None Individual weights for each sample. If given a float, every sample will have the same weight. Returns ------- self : object Fitted estimator. Notes ----- When sample_weight is provided, the selected hyperparameter may depend on whether we use leave-one-out cross-validation (cv=None or cv='auto') or another form of cross-validation, because only leave-one-out cross-validation takes the sample weights into account when computing the validation score. ",118,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fit(self, X, y, sample_weight=None): self._validate_params() super().fit(X, y, sample_weight=sample_weight) return self ``` ###Assistant : Fit Ridge regression model with cv. Parameters ---------- X : ndarray of shape (n_samples, n_features) Training data. If using GCV, will be cast to float64 if necessary. y : ndarray of shape (n_samples,) or (n_samples, n_targets) Target values. Will be cast to X's dtype if necessary. sample_weight : float or ndarray of shape (n_samples,), default=None Individual weights for each sample. If given a float, every sample will have the same weight. Returns ------- self : object Fitted estimator. Notes ----- When sample_weight is provided, the selected hyperparameter may depend on whether we use leave-one-out cross-validation (cv=None or cv='auto') or another form of cross-validation, because only leave-one-out cross-validation takes the sample weights into account when computing the validation score. " 1716,"def calc_position(self, x): if x < self.x[0]: return None elif x > self.x[-1]: return None i = self.__search_index(x) dx = x - self.x[i] position = self.a[i] + self.b[i] * dx + \ self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0 return position "," Calc `y` position for given `x`. if `x` is outside the data point's `x` range, return None. Returns ------- y : float y position for given x. ",27,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def calc_position(self, x): if x < self.x[0]: return None elif x > self.x[-1]: return None i = self.__search_index(x) dx = x - self.x[i] position = self.a[i] + self.b[i] * dx + \ self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0 return position ``` ###Assistant : Calc `y` position for given `x`. if `x` is outside the data point's `x` range, return None. Returns ------- y : float y position for given x. " 1717,"def get_daily_sector_prices(start_date, end_date) -> dict: # sector ticker information sp500_tickers = { ""S&P 500 Materials (Sector)"": ""^SP500-15"", ""S&P 500 Industrials (Sector)"": ""^SP500-20"", ""S&P 500 Consumer Discretionary (Sector)"": ""^SP500-25"", ""S&P 500 Consumer Staples (Sector)"": ""^SP500-30"", ""S&P 500 Health Care (Sector)"": ""^SP500-35"", ""S&P 500 Financials (Sector)"": ""^SP500-40"", ""S&P 500 Information Technology (Sector)"": ""^SP500-45"", ""S&P 500 Telecommunication Services (Sector)"": ""^SP500-50"", ""S&P 500 Utilities (Sector)"": ""^SP500-55"", ""S&P 500 Real Estate (Sector)"": ""^SP500-60"", ""S&P 500 Energy (Sector)"": ""^GSPE"", } sp500_tickers_data = {} # to store data for ( sector, sector_ticker, ) in sp500_tickers.items(): # iterate thru the sectors # load the data required from yfinance sp500_tickers_data[ sector ] = { # builds a dictionary entry for the sector with adj close data ""sector_data"": yf.download( sector_ticker, start=start_date, end=end_date, progress=False, )[""Adj Close""] } # stores the data here return sp500_tickers_data "," fetches daily sector prices for S&P500 for a fixed time period Parameters ---------- start_date : str ('yyyy-mm-dd') or datetime.date start date for fetching data end_date : str ('yyyy-mm-dd') or datetime.date end date for fetching data Returns ------- sp500_tickers_data : dict dictionary of dataframes with SPY daily sector prices ",48,133,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_daily_sector_prices(start_date, end_date) -> dict: # sector ticker information sp500_tickers = { ""S&P 500 Materials (Sector)"": ""^SP500-15"", ""S&P 500 Industrials (Sector)"": ""^SP500-20"", ""S&P 500 Consumer Discretionary (Sector)"": ""^SP500-25"", ""S&P 500 Consumer Staples (Sector)"": ""^SP500-30"", ""S&P 500 Health Care (Sector)"": ""^SP500-35"", ""S&P 500 Financials (Sector)"": ""^SP500-40"", ""S&P 500 Information Technology (Sector)"": ""^SP500-45"", ""S&P 500 Telecommunication Services (Sector)"": ""^SP500-50"", ""S&P 500 Utilities (Sector)"": ""^SP500-55"", ""S&P 500 Real Estate (Sector)"": ""^SP500-60"", ""S&P 500 Energy (Sector)"": ""^GSPE"", } sp500_tickers_data = {} # to store data for ( sector, sector_ticker, ) in sp500_tickers.items(): # iterate thru the sectors # load the data required from yfinance sp500_tickers_data[ sector ] = { # builds a dictionary entry for the sector with adj close data ""sector_data"": yf.download( sector_ticker, start=start_date, end=end_date, progress=False, )[""Adj Close""] } # stores the data here return sp500_tickers_data ``` ###Assistant : fetches daily sector prices for S&P500 for a fixed time period Parameters ---------- start_date : str ('yyyy-mm-dd') or datetime.date start date for fetching data end_date : str ('yyyy-mm-dd') or datetime.date end date for fetching data Returns ------- sp500_tickers_data : dict dictionary of dataframes with SPY daily sector prices " 1718,"def get_unpositioned_tip(self, tip_shape=None, tip_length=None): from manim.mobject.geometry.tips import ArrowTriangleFilledTip if tip_shape is None: tip_shape = ArrowTriangleFilledTip if tip_length is None: tip_length = self.get_default_tip_length() color = self.get_color() style = {""fill_color"": color, ""stroke_color"": color} style.update(self.tip_style) tip = tip_shape(length=tip_length, **style) return tip "," Returns a tip that has been stylistically configured, but has not yet been given a position in space. ",18,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_unpositioned_tip(self, tip_shape=None, tip_length=None): from manim.mobject.geometry.tips import ArrowTriangleFilledTip if tip_shape is None: tip_shape = ArrowTriangleFilledTip if tip_length is None: tip_length = self.get_default_tip_length() color = self.get_color() style = {""fill_color"": color, ""stroke_color"": color} style.update(self.tip_style) tip = tip_shape(length=tip_length, **style) return tip ``` ###Assistant : Returns a tip that has been stylistically configured, but has not yet been given a position in space. " 1719,"def test_parse_due_date_without_timezone_uses_offset(): data: DueDate = { ""date"": ""2022-02-02T14:00:00"", ""is_recurring"": False, ""lang"": ""en"", ""string"": ""Feb 2 2:00 PM"", ""timezone"": None, } actual = _parse_due_date(data, timezone_offset=-8) assert datetime(2022, 2, 2, 22, 0, 0, tzinfo=dt.UTC) == actual ",Test due date uses user local timezone offset when it has no timezone.,13,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_parse_due_date_without_timezone_uses_offset(): data: DueDate = { ""date"": ""2022-02-02T14:00:00"", ""is_recurring"": False, ""lang"": ""en"", ""string"": ""Feb 2 2:00 PM"", ""timezone"": None, } actual = _parse_due_date(data, timezone_offset=-8) assert datetime(2022, 2, 2, 22, 0, 0, tzinfo=dt.UTC) == actual ``` ###Assistant : Test due date uses user local timezone offset when it has no timezone. " 1720,"def serving_output(self, output): hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFBaseModelOutput( last_hidden_state=output.last_hidden_state, hidden_states=hidden_states, attentions=attentions ) @add_start_docstrings( , HUBERT_START_DOCSTRING, )",TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).,14,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def serving_output(self, output): hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFBaseModelOutput( last_hidden_state=output.last_hidden_state, hidden_states=hidden_states, attentions=attentions ) @add_start_docstrings( , HUBERT_START_DOCSTRING, ) ``` ###Assistant : TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC). " 1721,"def _unique_np(values, return_inverse=False, return_counts=False): uniques = np.unique( values, return_inverse=return_inverse, return_counts=return_counts ) inverse, counts = None, None if return_counts: *uniques, counts = uniques if return_inverse: *uniques, inverse = uniques if return_counts or return_inverse: uniques = uniques[0] # np.unique will have duplicate missing values at the end of `uniques` # here we clip the nans and remove it from uniques if uniques.size and is_scalar_nan(uniques[-1]): nan_idx = np.searchsorted(uniques, np.nan) uniques = uniques[: nan_idx + 1] if return_inverse: inverse[inverse > nan_idx] = nan_idx if return_counts: counts[nan_idx] = np.sum(counts[nan_idx:]) counts = counts[: nan_idx + 1] ret = (uniques,) if return_inverse: ret += (inverse,) if return_counts: ret += (counts,) return ret[0] if len(ret) == 1 else ret ","Helper function to find unique values for numpy arrays that correctly accounts for nans. See `_unique` documentation for details.",19,111,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _unique_np(values, return_inverse=False, return_counts=False): uniques = np.unique( values, return_inverse=return_inverse, return_counts=return_counts ) inverse, counts = None, None if return_counts: *uniques, counts = uniques if return_inverse: *uniques, inverse = uniques if return_counts or return_inverse: uniques = uniques[0] # np.unique will have duplicate missing values at the end of `uniques` # here we clip the nans and remove it from uniques if uniques.size and is_scalar_nan(uniques[-1]): nan_idx = np.searchsorted(uniques, np.nan) uniques = uniques[: nan_idx + 1] if return_inverse: inverse[inverse > nan_idx] = nan_idx if return_counts: counts[nan_idx] = np.sum(counts[nan_idx:]) counts = counts[: nan_idx + 1] ret = (uniques,) if return_inverse: ret += (inverse,) if return_counts: ret += (counts,) return ret[0] if len(ret) == 1 else ret ``` ###Assistant : Helper function to find unique values for numpy arrays that correctly accounts for nans. See `_unique` documentation for details. " 1722,"def switch_to_live(self) -> None: self.logbook_run.event_cache.clear() self.logbook_run.context_lookup.clear() ","Switch to live stream. Clear caches so we can reduce memory pressure. ",12,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def switch_to_live(self) -> None: self.logbook_run.event_cache.clear() self.logbook_run.context_lookup.clear() ``` ###Assistant : Switch to live stream. Clear caches so we can reduce memory pressure. " 1723,"def require_comet_ml(test_case): return unittest.skipUnless(is_comet_ml_available(), ""test requires comet_ml"")(test_case) "," Decorator marking a test that requires comet_ml installed. These tests are skipped when comet_ml isn't installed ",16,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def require_comet_ml(test_case): return unittest.skipUnless(is_comet_ml_available(), ""test requires comet_ml"")(test_case) ``` ###Assistant : Decorator marking a test that requires comet_ml installed. These tests are skipped when comet_ml isn't installed " 1724,"def triggered_id(self): component_id = None if self.triggered: prop_id = self.triggered_prop_ids.first() component_id = self.triggered_prop_ids[prop_id] return component_id "," Returns the component id (str or dict) of the Input component that triggered the callback. Note - use `triggered_prop_ids` if you need both the component id and the prop that triggered the callback or if multiple Inputs triggered the callback. Example usage: `if ""btn-1"" == ctx.triggered_id: do_something()` ",47,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def triggered_id(self): component_id = None if self.triggered: prop_id = self.triggered_prop_ids.first() component_id = self.triggered_prop_ids[prop_id] return component_id ``` ###Assistant : Returns the component id (str or dict) of the Input component that triggered the callback. Note - use `triggered_prop_ids` if you need both the component id and the prop that triggered the callback or if multiple Inputs triggered the callback. Example usage: `if ""btn-1"" == ctx.triggered_id: do_something()` " 1725,"def _solve_W(self, X, H, max_iter): avg = np.sqrt(X.mean() / self._n_components) W = np.full((X.shape[0], self._n_components), avg, dtype=X.dtype) W_buffer = W.copy() # Get scaled regularization terms. Done for each minibatch to take into account # variable sizes of minibatches. l1_reg_W, _, l2_reg_W, _ = self._scale_regularization(X) for _ in range(max_iter): W, *_ = _multiplicative_update_w( X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma ) W_diff = linalg.norm(W - W_buffer) / linalg.norm(W) if self.tol > 0 and W_diff <= self.tol: break W_buffer[:] = W return W ","Minimize the objective function w.r.t W. Update W with H being fixed, until convergence. This is the heart of `transform` but it's also used during `fit` when doing fresh restarts. ",30,80,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _solve_W(self, X, H, max_iter): avg = np.sqrt(X.mean() / self._n_components) W = np.full((X.shape[0], self._n_components), avg, dtype=X.dtype) W_buffer = W.copy() # Get scaled regularization terms. Done for each minibatch to take into account # variable sizes of minibatches. l1_reg_W, _, l2_reg_W, _ = self._scale_regularization(X) for _ in range(max_iter): W, *_ = _multiplicative_update_w( X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma ) W_diff = linalg.norm(W - W_buffer) / linalg.norm(W) if self.tol > 0 and W_diff <= self.tol: break W_buffer[:] = W return W ``` ###Assistant : Minimize the objective function w.r.t W. Update W with H being fixed, until convergence. This is the heart of `transform` but it's also used during `fit` when doing fresh restarts. " 1726,"def convert_bbox_to_z(bbox): w = bbox[2] - bbox[0] h = bbox[3] - bbox[1] x = bbox[0] + w / 2. y = bbox[1] + h / 2. s = w * h # scale is just area r = w / float(h + 1e-6) return np.array([x, y, s, r]).reshape((4, 1)) "," Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is the aspect ratio ",34,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def convert_bbox_to_z(bbox): w = bbox[2] - bbox[0] h = bbox[3] - bbox[1] x = bbox[0] + w / 2. y = bbox[1] + h / 2. s = w * h # scale is just area r = w / float(h + 1e-6) return np.array([x, y, s, r]).reshape((4, 1)) ``` ###Assistant : Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is the aspect ratio " 1727,"def remove_whitespace(string, leading=False, trailing=False): # Remove any leading new line characters along with any surrounding white space if leading: string = re.sub(r'^\s*\n+\s*', '', string) # Remove any trailing new line characters along with any surrounding white space if trailing: string = re.sub(r'\s*\n+\s*$', '', string) # Replace new line characters and absorb any surrounding space. string = re.sub(r'\s*\n\s*', ' ', string) # TODO need some way to get rid of extra spaces in e.g. text text return re.sub(r'\s+', ' ', string) ","Remove white space from a string. Args: string(str): The string to remove white space from. leading(bool, optional): Remove leading new lines when True. trailing(bool, optional): Remove trailing new lines when False. Returns: str: The input string with new line characters removed and white space squashed. Examples: Single or multiple new line characters are replaced with space. >>> remove_whitespace(""abc\\ndef"") 'abc def' >>> remove_whitespace(""abc\\n\\n\\ndef"") 'abc def' New line characters surrounded by white space are replaced with a single space. >>> remove_whitespace(""abc \\n \\n \\n def"") 'abc def' >>> remove_whitespace(""abc \\n \\n \\n def"") 'abc def' Leading and trailing new lines are replaced with a single space. >>> remove_whitespace(""\\nabc"") ' abc' >>> remove_whitespace("" \\n abc"") ' abc' >>> remove_whitespace(""abc\\n"") 'abc ' >>> remove_whitespace(""abc \\n "") 'abc ' Use ``leading=True`` to remove leading new line characters, including any surrounding white space: >>> remove_whitespace(""\\nabc"", leading=True) 'abc' >>> remove_whitespace("" \\n abc"", leading=True) 'abc' Use ``trailing=True`` to remove trailing new line characters, including any surrounding white space: >>> remove_whitespace(""abc \\n "", trailing=True) 'abc' ",166,82,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def remove_whitespace(string, leading=False, trailing=False): # Remove any leading new line characters along with any surrounding white space if leading: string = re.sub(r'^\s*\n+\s*', '', string) # Remove any trailing new line characters along with any surrounding white space if trailing: string = re.sub(r'\s*\n+\s*$', '', string) # Replace new line characters and absorb any surrounding space. string = re.sub(r'\s*\n\s*', ' ', string) # TODO need some way to get rid of extra spaces in e.g. text text return re.sub(r'\s+', ' ', string) ``` ###Assistant : Remove white space from a string. Args: string(str): The string to remove white space from. leading(bool, optional): Remove leading new lines when True. trailing(bool, optional): Remove trailing new lines when False. Returns: str: The input string with new line characters removed and white space squashed. Examples: Single or multiple new line characters are replaced with space. >>> remove_whitespace(""abc\\ndef"") 'abc def' >>> remove_whitespace(""abc\\n\\n\\ndef"") 'abc def' New line characters surrounded by white space are replaced with a single space. >>> remove_whitespace(""abc \\n \\n \\n def"") 'abc def' >>> remove_whitespace(""abc \\n \\n \\n def"") 'abc def' Leading and trailing new lines are replaced with a single space. >>> remove_whitespace(""\\nabc"") ' abc' >>> remove_whitespace("" \\n abc"") ' abc' >>> remove_whitespace(""abc\\n"") 'abc ' >>> remove_whitespace(""abc \\n "") 'abc ' Use ``leading=True`` to remove leading new line characters, including any surrounding white space: >>> remove_whitespace(""\\nabc"", leading=True) 'abc' >>> remove_whitespace("" \\n abc"", leading=True) 'abc' Use ``trailing=True`` to remove trailing new line characters, including any surrounding white space: >>> remove_whitespace(""abc \\n "", trailing=True) 'abc' " 1728,"def selectionChanged(self, selected, deselected): if not self._active: return super().selectionChanged(selected, deselected) indexes = selected.indexes() if not indexes: return data = str(self._model().data(indexes[0])) self.selection_changed.emit(data) ",Extend selectionChanged to call completers selection_changed.,6,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def selectionChanged(self, selected, deselected): if not self._active: return super().selectionChanged(selected, deselected) indexes = selected.indexes() if not indexes: return data = str(self._model().data(indexes[0])) self.selection_changed.emit(data) ``` ###Assistant : Extend selectionChanged to call completers selection_changed. " 1729,"def __mul__(self, other): newlist = [v for v in self.args] other = sympify(other) for i, v in enumerate(newlist): newlist[i] = (other * newlist[i][0], newlist[i][1]) return Vector(newlist) ","Multiplies the Vector by a sympifyable expression. Parameters ========== other : Sympifyable The scalar to multiply this Vector with Examples ======== >>> from sympy.physics.vector import ReferenceFrame >>> from sympy import Symbol >>> N = ReferenceFrame('N') >>> b = Symbol('b') >>> V = 10 * b * N.x >>> print(V) 10*b*N.x ",50,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __mul__(self, other): newlist = [v for v in self.args] other = sympify(other) for i, v in enumerate(newlist): newlist[i] = (other * newlist[i][0], newlist[i][1]) return Vector(newlist) ``` ###Assistant : Multiplies the Vector by a sympifyable expression. Parameters ========== other : Sympifyable The scalar to multiply this Vector with Examples ======== >>> from sympy.physics.vector import ReferenceFrame >>> from sympy import Symbol >>> N = ReferenceFrame('N') >>> b = Symbol('b') >>> V = 10 * b * N.x >>> print(V) 10*b*N.x " 1730,"def to_dict(self) -> Dict: return serve_application_to_schema(self._deployments.values()).dict() ","Returns this Application's deployments as a dictionary. This dictionary adheres to the Serve REST API schema. It can be deployed via the Serve REST API. Returns: Dict: The Application's deployments formatted in a dictionary. ",34,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def to_dict(self) -> Dict: return serve_application_to_schema(self._deployments.values()).dict() ``` ###Assistant : Returns this Application's deployments as a dictionary. This dictionary adheres to the Serve REST API schema. It can be deployed via the Serve REST API. Returns: Dict: The Application's deployments formatted in a dictionary. " 1731,"def get_shift_details(shift_type_name, for_timestamp=None): if not shift_type_name: return None if not for_timestamp: for_timestamp = now_datetime() shift_type = frappe.get_doc('Shift Type', shift_type_name) shift_actual_start = shift_type.start_time - timedelta(minutes=shift_type.begin_check_in_before_shift_start_time) if shift_type.start_time > shift_type.end_time: # shift spans accross 2 different days if get_time(for_timestamp.time()) >= get_time(shift_actual_start): # if for_timestamp is greater than start time, its in the first day start_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time for_timestamp = for_timestamp + timedelta(days=1) end_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time elif get_time(for_timestamp.time()) < get_time(shift_actual_start): # if for_timestamp is less than start time, its in the second day end_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time for_timestamp = for_timestamp + timedelta(days=-1) start_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time else: # start and end times fall on the same day start_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time end_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time actual_start = start_datetime - timedelta(minutes=shift_type.begin_check_in_before_shift_start_time) actual_end = end_datetime + timedelta(minutes=shift_type.allow_check_out_after_shift_end_time) return frappe._dict({ 'shift_type': shift_type, 'start_datetime': start_datetime, 'end_datetime': end_datetime, 'actual_start': actual_start, 'actual_end': actual_end }) ","Returns Shift Details which contain some additional information as described below. 'shift_details' contains the following keys: 'shift_type' - Object of DocType Shift Type, 'start_datetime' - Date and Time of shift start on given date, 'end_datetime' - Date and Time of shift end on given date, 'actual_start' - datetime of shift start after adding 'begin_check_in_before_shift_start_time', 'actual_end' - datetime of shift end after adding 'allow_check_out_after_shift_end_time'(None is returned if this is zero) :param shift_type_name: shift type name for which shift_details is required. :param for_timestamp: DateTime value on which shift_details are required ",88,149,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_shift_details(shift_type_name, for_timestamp=None): if not shift_type_name: return None if not for_timestamp: for_timestamp = now_datetime() shift_type = frappe.get_doc('Shift Type', shift_type_name) shift_actual_start = shift_type.start_time - timedelta(minutes=shift_type.begin_check_in_before_shift_start_time) if shift_type.start_time > shift_type.end_time: # shift spans accross 2 different days if get_time(for_timestamp.time()) >= get_time(shift_actual_start): # if for_timestamp is greater than start time, its in the first day start_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time for_timestamp = for_timestamp + timedelta(days=1) end_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time elif get_time(for_timestamp.time()) < get_time(shift_actual_start): # if for_timestamp is less than start time, its in the second day end_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time for_timestamp = for_timestamp + timedelta(days=-1) start_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time else: # start and end times fall on the same day start_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time end_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time actual_start = start_datetime - timedelta(minutes=shift_type.begin_check_in_before_shift_start_time) actual_end = end_datetime + timedelta(minutes=shift_type.allow_check_out_after_shift_end_time) return frappe._dict({ 'shift_type': shift_type, 'start_datetime': start_datetime, 'end_datetime': end_datetime, 'actual_start': actual_start, 'actual_end': actual_end }) ``` ###Assistant : Returns Shift Details which contain some additional information as described below. 'shift_details' contains the following keys: 'shift_type' - Object of DocType Shift Type, 'start_datetime' - Date and Time of shift start on given date, 'end_datetime' - Date and Time of shift end on given date, 'actual_start' - datetime of shift start after adding 'begin_check_in_before_shift_start_time', 'actual_end' - datetime of shift end after adding 'allow_check_out_after_shift_end_time'(None is returned if this is zero) :param shift_type_name: shift type name for which shift_details is required. :param for_timestamp: DateTime value on which shift_details are required " 1732,"def subprocess_run_helper(func, *args, timeout, extra_env=None): target = func.__name__ module = func.__module__ proc = subprocess.run( [sys.executable, ""-c"", f""from {module} import {target}; {target}()"", *args], env={**os.environ, ""SOURCE_DATE_EPOCH"": ""0"", **(extra_env or {})}, timeout=timeout, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) return proc "," Run a function in a sub-process. Parameters ---------- func : function The function to be run. It must be in a module that is importable. *args : str Any additional command line arguments to be passed in the first argument to ``subprocess.run``. extra_env : dict[str, str] Any additional environment variables to be set for the subprocess. ",56,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def subprocess_run_helper(func, *args, timeout, extra_env=None): target = func.__name__ module = func.__module__ proc = subprocess.run( [sys.executable, ""-c"", f""from {module} import {target}; {target}()"", *args], env={**os.environ, ""SOURCE_DATE_EPOCH"": ""0"", **(extra_env or {})}, timeout=timeout, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) return proc ``` ###Assistant : Run a function in a sub-process. Parameters ---------- func : function The function to be run. It must be in a module that is importable. *args : str Any additional command line arguments to be passed in the first argument to ``subprocess.run``. extra_env : dict[str, str] Any additional environment variables to be set for the subprocess. " 1733,"def add_preheated_app_session(self) -> None: session = self._create_or_reuse_app_session(ws=None) session.handle_rerun_script_request(is_preheat=True) ","Register a fake browser with the server and run the script. This is used to start running the user's script even before the first browser connects. ",26,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add_preheated_app_session(self) -> None: session = self._create_or_reuse_app_session(ws=None) session.handle_rerun_script_request(is_preheat=True) ``` ###Assistant : Register a fake browser with the server and run the script. This is used to start running the user's script even before the first browser connects. " 1734,"def score(self, testing_features, testing_target): if self.fitted_pipeline_ is None: raise RuntimeError( ""A pipeline has not yet been optimized. Please call fit() first."" ) testing_features, testing_target = self._check_dataset( testing_features, testing_target, sample_weight=None ) # If the scoring function is a string, we must adjust to use the sklearn # scoring interface if isinstance(self.scoring_function, str): scorer = SCORERS[self.scoring_function] elif callable(self.scoring_function): scorer = self.scoring_function else: raise RuntimeError( ""The scoring function should either be the name of a scikit-learn scorer or a scorer object"" ) score = scorer( self.fitted_pipeline_, testing_features.astype(np.float64), testing_target.astype(np.float64), ) return score ","Return the score on the given testing data using the user-specified scoring function. Parameters ---------- testing_features: array-like {n_samples, n_features} Feature matrix of the testing set testing_target: array-like {n_samples} List of class labels for prediction in the testing set Returns ------- accuracy_score: float The estimated test set accuracy ",47,88,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def score(self, testing_features, testing_target): if self.fitted_pipeline_ is None: raise RuntimeError( ""A pipeline has not yet been optimized. Please call fit() first."" ) testing_features, testing_target = self._check_dataset( testing_features, testing_target, sample_weight=None ) # If the scoring function is a string, we must adjust to use the sklearn # scoring interface if isinstance(self.scoring_function, str): scorer = SCORERS[self.scoring_function] elif callable(self.scoring_function): scorer = self.scoring_function else: raise RuntimeError( ""The scoring function should either be the name of a scikit-learn scorer or a scorer object"" ) score = scorer( self.fitted_pipeline_, testing_features.astype(np.float64), testing_target.astype(np.float64), ) return score ``` ###Assistant : Return the score on the given testing data using the user-specified scoring function. Parameters ---------- testing_features: array-like {n_samples, n_features} Feature matrix of the testing set testing_target: array-like {n_samples} List of class labels for prediction in the testing set Returns ------- accuracy_score: float The estimated test set accuracy " 1735,"def test_as_dict(): expected = { LENGTH: UnitOfLength.KILOMETERS, WIND_SPEED: UnitOfSpeed.METERS_PER_SECOND, TEMPERATURE: UnitOfTemperature.CELSIUS, VOLUME: UnitOfVolume.LITERS, MASS: UnitOfMass.GRAMS, PRESSURE: UnitOfPressure.PA, ACCUMULATED_PRECIPITATION: UnitOfLength.MILLIMETERS, } assert expected == METRIC_SYSTEM.as_dict() ",Test that the as_dict() method returns the expected dictionary.,9,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_as_dict(): expected = { LENGTH: UnitOfLength.KILOMETERS, WIND_SPEED: UnitOfSpeed.METERS_PER_SECOND, TEMPERATURE: UnitOfTemperature.CELSIUS, VOLUME: UnitOfVolume.LITERS, MASS: UnitOfMass.GRAMS, PRESSURE: UnitOfPressure.PA, ACCUMULATED_PRECIPITATION: UnitOfLength.MILLIMETERS, } assert expected == METRIC_SYSTEM.as_dict() ``` ###Assistant : Test that the as_dict() method returns the expected dictionary. " 1736,"def _get_inputs(self): logger.debug(""Getting inputs"") if len(self.input_shape) == 3: input_shapes = [self.input_shape, self.input_shape] else: input_shapes = self.input_shape inputs = [Input(shape=shape, name=f""face_in_{side}"") for side, shape in zip((""a"", ""b""), input_shapes)] logger.debug(""inputs: %s"", inputs) return inputs "," Obtain the standardized inputs for the model. The inputs will be returned for the ""A"" and ""B"" sides in the shape as defined by :attr:`input_shape`. Returns ------- list A list of :class:`keras.layers.Input` tensors. This will be a list of 2 tensors (one for each side) each of shapes :attr:`input_shape`. ",49,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_inputs(self): logger.debug(""Getting inputs"") if len(self.input_shape) == 3: input_shapes = [self.input_shape, self.input_shape] else: input_shapes = self.input_shape inputs = [Input(shape=shape, name=f""face_in_{side}"") for side, shape in zip((""a"", ""b""), input_shapes)] logger.debug(""inputs: %s"", inputs) return inputs ``` ###Assistant : Obtain the standardized inputs for the model. The inputs will be returned for the ""A"" and ""B"" sides in the shape as defined by :attr:`input_shape`. Returns ------- list A list of :class:`keras.layers.Input` tensors. This will be a list of 2 tensors (one for each side) each of shapes :attr:`input_shape`. " 1737,"def collect_units_install() -> t.List[PipInstall]: requirements_paths = [] # type: t.List[t.Tuple[str, str]] constraints_paths = [] # type: t.List[t.Tuple[str, str]] path = os.path.join(data_context().content.unit_path, 'requirements.txt') requirements_paths.append((data_context().content.root, path)) path = os.path.join(data_context().content.unit_path, 'constraints.txt') constraints_paths.append((data_context().content.root, path)) return collect_install(requirements_paths, constraints_paths) ",Return details necessary for the specified units pip install(s).,9,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def collect_units_install() -> t.List[PipInstall]: requirements_paths = [] # type: t.List[t.Tuple[str, str]] constraints_paths = [] # type: t.List[t.Tuple[str, str]] path = os.path.join(data_context().content.unit_path, 'requirements.txt') requirements_paths.append((data_context().content.root, path)) path = os.path.join(data_context().content.unit_path, 'constraints.txt') constraints_paths.append((data_context().content.root, path)) return collect_install(requirements_paths, constraints_paths) ``` ###Assistant : Return details necessary for the specified units pip install(s). " 1738,"def forward(self, features, **kwargs): x = features[:, 0, :] # take token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( , YOSO_START_DOCSTRING, )","YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.",23,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def forward(self, features, **kwargs): x = features[:, 0, :] # take token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( , YOSO_START_DOCSTRING, ) ``` ###Assistant : YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. " 1739,"def delete_batch(self, pk_list, using): # number of objects deleted num_deleted = 0 field = self.get_meta().pk for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): self.clear_where() self.add_filter( f""{field.attname}__in"", pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE], ) num_deleted += self.do_query( self.get_meta().db_table, self.where, using=using ) return num_deleted "," Set up and execute delete queries for all the objects in pk_list. More than one physical query may be executed if there are a lot of values in pk_list. ",29,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def delete_batch(self, pk_list, using): # number of objects deleted num_deleted = 0 field = self.get_meta().pk for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): self.clear_where() self.add_filter( f""{field.attname}__in"", pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE], ) num_deleted += self.do_query( self.get_meta().db_table, self.where, using=using ) return num_deleted ``` ###Assistant : Set up and execute delete queries for all the objects in pk_list. More than one physical query may be executed if there are a lot of values in pk_list. " 1740,"def generate_ansible_coverage_config() -> str: coverage_config = return coverage_config ","Generate code coverage configuration for Ansible tests. [run] branch = True concurrency = multiprocessing parallel = True omit = */python*/dist-packages/* */python*/site-packages/* */python*/distutils/* */pyshared/* */pytest */AnsiballZ_*.py */test/results/* ",26,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def generate_ansible_coverage_config() -> str: coverage_config = return coverage_config ``` ###Assistant : Generate code coverage configuration for Ansible tests. [run] branch = True concurrency = multiprocessing parallel = True omit = */python*/dist-packages/* */python*/site-packages/* */python*/distutils/* */pyshared/* */pytest */AnsiballZ_*.py */test/results/* " 1741,"def get_allowed_roles_to_invite(self): return [ r for r in organization_roles.get_all() if r.priority <= organization_roles.get(self.role).priority ] "," Return a list of roles which that member could invite Must check if member member has member:admin first before checking ",20,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_allowed_roles_to_invite(self): return [ r for r in organization_roles.get_all() if r.priority <= organization_roles.get(self.role).priority ] ``` ###Assistant : Return a list of roles which that member could invite Must check if member member has member:admin first before checking " 1742,"def search_space_spec(self) -> Dict[str, ParameterSpec]: raise NotImplementedError() "," Space specification (sample points). Mapping from spec name to ParameterSpec. The names in choices should be in the same format of export. For example: :: {""layer1"": ParameterSpec(values=[""conv"", ""pool""])} ",28,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def search_space_spec(self) -> Dict[str, ParameterSpec]: raise NotImplementedError() ``` ###Assistant : Space specification (sample points). Mapping from spec name to ParameterSpec. The names in choices should be in the same format of export. For example: :: {""layer1"": ParameterSpec(values=[""conv"", ""pool""])} " 1743,"def test_context_as_admin(self) -> None: # Create a room. We're not part of it. user_id = self.register_user(""test"", ""test"") user_tok = self.login(""test"", ""test"") room_id = self.helper.create_room_as(user_id, tok=user_tok) # Populate the room with events. events = [] for i in range(30): events.append( self.helper.send_event( room_id, ""com.example.test"", content={""index"": i}, tok=user_tok ) ) # Now let's fetch the context for this room. midway = (len(events) - 1) // 2 channel = self.make_request( ""GET"", ""/_synapse/admin/v1/rooms/%s/context/%s"" % (room_id, events[midway][""event_id""]), access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual( channel.json_body[""event""][""event_id""], events[midway][""event_id""] ) for found_event in channel.json_body[""events_before""]: for j, posted_event in enumerate(events): if found_event[""event_id""] == posted_event[""event_id""]: self.assertTrue(j < midway) break else: self.fail(""Event %s from events_before not found"" % j) for found_event in channel.json_body[""events_after""]: for j, posted_event in enumerate(events): if found_event[""event_id""] == posted_event[""event_id""]: self.assertTrue(j > midway) break else: self.fail(""Event %s from events_after not found"" % j) "," Test that, as admin, we can find the context of an event without having joined the room. ",17,132,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_context_as_admin(self) -> None: # Create a room. We're not part of it. user_id = self.register_user(""test"", ""test"") user_tok = self.login(""test"", ""test"") room_id = self.helper.create_room_as(user_id, tok=user_tok) # Populate the room with events. events = [] for i in range(30): events.append( self.helper.send_event( room_id, ""com.example.test"", content={""index"": i}, tok=user_tok ) ) # Now let's fetch the context for this room. midway = (len(events) - 1) // 2 channel = self.make_request( ""GET"", ""/_synapse/admin/v1/rooms/%s/context/%s"" % (room_id, events[midway][""event_id""]), access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual( channel.json_body[""event""][""event_id""], events[midway][""event_id""] ) for found_event in channel.json_body[""events_before""]: for j, posted_event in enumerate(events): if found_event[""event_id""] == posted_event[""event_id""]: self.assertTrue(j < midway) break else: self.fail(""Event %s from events_before not found"" % j) for found_event in channel.json_body[""events_after""]: for j, posted_event in enumerate(events): if found_event[""event_id""] == posted_event[""event_id""]: self.assertTrue(j > midway) break else: self.fail(""Event %s from events_after not found"" % j) ``` ###Assistant : Test that, as admin, we can find the context of an event without having joined the room. " 1744,"def marginal_std(self, t): return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t))) "," Compute sigma_t of a given continuous-time label t in [0, T]. ",11,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def marginal_std(self, t): return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t))) ``` ###Assistant : Compute sigma_t of a given continuous-time label t in [0, T]. " 1745,"def for_request(cls, request): attr_name = cls.get_cache_attr_name() if hasattr(request, attr_name): return getattr(request, attr_name) site = Site.find_for_request(request) site_settings = cls.for_site(site) # to allow more efficient page url generation site_settings._request = request setattr(request, attr_name, site_settings) return site_settings "," Get or create an instance of this model for the request, and cache the result on the request for faster repeat access. ",22,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def for_request(cls, request): attr_name = cls.get_cache_attr_name() if hasattr(request, attr_name): return getattr(request, attr_name) site = Site.find_for_request(request) site_settings = cls.for_site(site) # to allow more efficient page url generation site_settings._request = request setattr(request, attr_name, site_settings) return site_settings ``` ###Assistant : Get or create an instance of this model for the request, and cache the result on the request for faster repeat access. " 1746,"def _enable_ocsp_stapling(self, ssl_vhost, unused_options): min_apache_ver = (2, 3, 3) if self.get_version() < min_apache_ver: raise errors.PluginError( ""Unable to set OCSP directives.\n"" ""Apache version is below 2.3.3."") if ""socache_shmcb_module"" not in self.parser.modules: self.enable_mod(""socache_shmcb"") # Check if there's an existing SSLUseStapling directive on. use_stapling_aug_path = self.parser.find_dir(""SSLUseStapling"", ""on"", start=ssl_vhost.path) if not use_stapling_aug_path: self.parser.add_dir(ssl_vhost.path, ""SSLUseStapling"", ""on"") ssl_vhost_aug_path = self._escape(parser.get_aug_path(ssl_vhost.filep)) # Check if there's an existing SSLStaplingCache directive. stapling_cache_aug_path = self.parser.find_dir('SSLStaplingCache', None, ssl_vhost_aug_path) # We'll simply delete the directive, so that we'll have a # consistent OCSP cache path. if stapling_cache_aug_path: self.parser.aug.remove( re.sub(r""/\w*$"", """", stapling_cache_aug_path[0])) self.parser.add_dir_to_ifmodssl(ssl_vhost_aug_path, ""SSLStaplingCache"", [""shmcb:/var/run/apache2/stapling_cache(128000)""]) msg = ""OCSP Stapling was enabled on SSL Vhost: %s.\n""%( ssl_vhost.filep) self.save_notes += msg self.save() logger.info(msg) ","Enables OCSP Stapling In OCSP, each client (e.g. browser) would have to query the OCSP Responder to validate that the site certificate was not revoked. Enabling OCSP Stapling, would allow the web-server to query the OCSP Responder, and staple its response to the offered certificate during TLS. i.e. clients would not have to query the OCSP responder. OCSP Stapling enablement on Apache implicitly depends on SSLCertificateChainFile being set by other code. .. note:: This function saves the configuration :param ssl_vhost: Destination of traffic, an ssl enabled vhost :type ssl_vhost: :class:`~certbot_apache._internal.obj.VirtualHost` :param unused_options: Not currently used :type unused_options: Not Available :returns: Success, general_vhost (HTTP vhost) :rtype: (bool, :class:`~certbot_apache._internal.obj.VirtualHost`) ",107,108,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _enable_ocsp_stapling(self, ssl_vhost, unused_options): min_apache_ver = (2, 3, 3) if self.get_version() < min_apache_ver: raise errors.PluginError( ""Unable to set OCSP directives.\n"" ""Apache version is below 2.3.3."") if ""socache_shmcb_module"" not in self.parser.modules: self.enable_mod(""socache_shmcb"") # Check if there's an existing SSLUseStapling directive on. use_stapling_aug_path = self.parser.find_dir(""SSLUseStapling"", ""on"", start=ssl_vhost.path) if not use_stapling_aug_path: self.parser.add_dir(ssl_vhost.path, ""SSLUseStapling"", ""on"") ssl_vhost_aug_path = self._escape(parser.get_aug_path(ssl_vhost.filep)) # Check if there's an existing SSLStaplingCache directive. stapling_cache_aug_path = self.parser.find_dir('SSLStaplingCache', None, ssl_vhost_aug_path) # We'll simply delete the directive, so that we'll have a # consistent OCSP cache path. if stapling_cache_aug_path: self.parser.aug.remove( re.sub(r""/\w*$"", """", stapling_cache_aug_path[0])) self.parser.add_dir_to_ifmodssl(ssl_vhost_aug_path, ""SSLStaplingCache"", [""shmcb:/var/run/apache2/stapling_cache(128000)""]) msg = ""OCSP Stapling was enabled on SSL Vhost: %s.\n""%( ssl_vhost.filep) self.save_notes += msg self.save() logger.info(msg) ``` ###Assistant : Enables OCSP Stapling In OCSP, each client (e.g. browser) would have to query the OCSP Responder to validate that the site certificate was not revoked. Enabling OCSP Stapling, would allow the web-server to query the OCSP Responder, and staple its response to the offered certificate during TLS. i.e. clients would not have to query the OCSP responder. OCSP Stapling enablement on Apache implicitly depends on SSLCertificateChainFile being set by other code. .. note:: This function saves the configuration :param ssl_vhost: Destination of traffic, an ssl enabled vhost :type ssl_vhost: :class:`~certbot_apache._internal.obj.VirtualHost` :param unused_options: Not currently used :type unused_options: Not Available :returns: Success, general_vhost (HTTP vhost) :rtype: (bool, :class:`~certbot_apache._internal.obj.VirtualHost`) " 1747,"def make_regional_gl_entries(gl_entries, doc): country = frappe.get_cached_value(""Company"", doc.company, ""country"") if country != ""United Arab Emirates"": return gl_entries if doc.reverse_charge == ""Y"": tax_accounts = get_tax_accounts(doc.company) for tax in doc.get(""taxes""): if tax.category not in (""Total"", ""Valuation and Total""): continue gl_entries = make_gl_entry(tax, gl_entries, doc, tax_accounts) return gl_entries ",Hooked to make_regional_gl_entries in Purchase Invoice.It appends the region specific general ledger entries to the list of GL Entries.,19,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def make_regional_gl_entries(gl_entries, doc): country = frappe.get_cached_value(""Company"", doc.company, ""country"") if country != ""United Arab Emirates"": return gl_entries if doc.reverse_charge == ""Y"": tax_accounts = get_tax_accounts(doc.company) for tax in doc.get(""taxes""): if tax.category not in (""Total"", ""Valuation and Total""): continue gl_entries = make_gl_entry(tax, gl_entries, doc, tax_accounts) return gl_entries ``` ###Assistant : Hooked to make_regional_gl_entries in Purchase Invoice.It appends the region specific general ledger entries to the list of GL Entries. " 1748,"def private_param(param): return pytest.param( *param, marks=pytest.mark.skipif( not _run_private_tests, reason=""Skipping: this test is marked private, set RUN_PRIVATE=1 in your environment to run"", ), ) ","Wrap param to mark it as private, meaning it requires credentials to run. Private tests are skipped by default. Set the RUN_PRIVATE environment variable to a truth value to run them. ",31,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def private_param(param): return pytest.param( *param, marks=pytest.mark.skipif( not _run_private_tests, reason=""Skipping: this test is marked private, set RUN_PRIVATE=1 in your environment to run"", ), ) ``` ###Assistant : Wrap param to mark it as private, meaning it requires credentials to run. Private tests are skipped by default. Set the RUN_PRIVATE environment variable to a truth value to run them. " 1749,"def minimal_d_separator(G, u, v): if not nx.is_directed_acyclic_graph(G): raise nx.NetworkXError(""graph should be directed acyclic"") union_uv = {u, v} if any(n not in G.nodes for n in union_uv): raise nx.NodeNotFound(""one or more specified nodes not found in the graph"") # first construct the set of ancestors of X and Y x_anc = nx.ancestors(G, u) y_anc = nx.ancestors(G, v) D_anc_xy = x_anc.union(y_anc) D_anc_xy.update((u, v)) # second, construct the moralization of the subgraph of Anc(X,Y) moral_G = nx.moral_graph(G.subgraph(D_anc_xy)) # find a separating set Z' in moral_G Z_prime = set(G.predecessors(u)).union(set(G.predecessors(v))) # perform BFS on the graph from 'x' to mark Z_dprime = _bfs_with_marks(moral_G, u, Z_prime) Z = _bfs_with_marks(moral_G, v, Z_dprime) return Z @not_implemented_for(""undirected"")","Compute a minimal d-separating set between 'u' and 'v'. A d-separating set in a DAG is a set of nodes that blocks all paths between the two nodes, 'u' and 'v'. This function constructs a d-separating set that is ""minimal"", meaning it is the smallest d-separating set for 'u' and 'v'. This is not necessarily unique. For more details, see Notes. Parameters ---------- G : graph A networkx DAG. u : node A node in the graph, G. v : node A node in the graph, G. Raises ------ NetworkXError Raises a :exc:`NetworkXError` if the input graph is not a DAG. NodeNotFound If any of the input nodes are not found in the graph, a :exc:`NodeNotFound` exception is raised. References ---------- .. [1] Tian, J., & Paz, A. (1998). Finding Minimal D-separators. Notes ----- This function only finds ``a`` minimal d-separator. It does not guarantee uniqueness, since in a DAG there may be more than one minimal d-separator between two nodes. Moreover, this only checks for minimal separators between two nodes, not two sets. Finding minimal d-separators between two sets of nodes is not supported. Uses the algorithm presented in [1]_. The complexity of the algorithm is :math:`O(|E_{An}^m|)`, where :math:`|E_{An}^m|` stands for the number of edges in the moralized graph of the sub-graph consisting of only the ancestors of 'u' and 'v'. For full details, see [1]_. The algorithm works by constructing the moral graph consisting of just the ancestors of `u` and `v`. Then it constructs a candidate for a separating set ``Z'`` from the predecessors of `u` and `v`. Then BFS is run starting from `u` and marking nodes found from ``Z'`` and calling those nodes ``Z''``. Then BFS is run again starting from `v` and marking nodes if they are present in ``Z''``. Those marked nodes are the returned minimal d-separating set. https://en.wikipedia.org/wiki/Bayesian_network#d-separation ",306,108,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def minimal_d_separator(G, u, v): if not nx.is_directed_acyclic_graph(G): raise nx.NetworkXError(""graph should be directed acyclic"") union_uv = {u, v} if any(n not in G.nodes for n in union_uv): raise nx.NodeNotFound(""one or more specified nodes not found in the graph"") # first construct the set of ancestors of X and Y x_anc = nx.ancestors(G, u) y_anc = nx.ancestors(G, v) D_anc_xy = x_anc.union(y_anc) D_anc_xy.update((u, v)) # second, construct the moralization of the subgraph of Anc(X,Y) moral_G = nx.moral_graph(G.subgraph(D_anc_xy)) # find a separating set Z' in moral_G Z_prime = set(G.predecessors(u)).union(set(G.predecessors(v))) # perform BFS on the graph from 'x' to mark Z_dprime = _bfs_with_marks(moral_G, u, Z_prime) Z = _bfs_with_marks(moral_G, v, Z_dprime) return Z @not_implemented_for(""undirected"") ``` ###Assistant : Compute a minimal d-separating set between 'u' and 'v'. A d-separating set in a DAG is a set of nodes that blocks all paths between the two nodes, 'u' and 'v'. This function constructs a d-separating set that is ""minimal"", meaning it is the smallest d-separating set for 'u' and 'v'. This is not necessarily unique. For more details, see Notes. Parameters ---------- G : graph A networkx DAG. u : node A node in the graph, G. v : node A node in the graph, G. Raises ------ NetworkXError Raises a :exc:`NetworkXError` if the input graph is not a DAG. NodeNotFound If any of the input nodes are not found in the graph, a :exc:`NodeNotFound` exception is raised. References ---------- .. [1] Tian, J., & Paz, A. (1998). Finding Minimal D-separators. Notes ----- This function only finds ``a`` minimal d-separator. It does not guarantee uniqueness, since in a DAG there may be more than one minimal d-separator between two nodes. Moreover, this only checks for minimal separators between two nodes, not two sets. Finding minimal d-separators between two sets of nodes is not supported. Uses the algorithm presented in [1]_. The complexity of the algorithm is :math:`O(|E_{An}^m|)`, where :math:`|E_{An}^m|` stands for the number of edges in the moralized graph of the sub-graph consisting of only the ancestors of 'u' and 'v'. For full details, see [1]_. The algorithm works by constructing the moral graph consisting of just the ancestors of `u` and `v`. Then it constructs a candidate for a separating set ``Z'`` from the predecessors of `u` and `v`. Then BFS is run starting from `u` and marking nodes found from ``Z'`` and calling those nodes ``Z''``. Then BFS is run again starting from `v` and marking nodes if they are present in ``Z''``. Those marked nodes are the returned minimal d-separating set. https://en.wikipedia.org/wiki/Bayesian_network#d-separation " 1750,"def _change_alignment_for_a_line(self, alignment, line_no): self.lines[1][line_no] = alignment if self.lines[1][line_no] == ""center"": self[line_no].move_to( np.array([self.get_center()[0], self[line_no].get_center()[1], 0]), ) elif self.lines[1][line_no] == ""right"": self[line_no].move_to( np.array( [ self.get_right()[0] - self[line_no].width / 2, self[line_no].get_center()[1], 0, ], ), ) elif self.lines[1][line_no] == ""left"": self[line_no].move_to( np.array( [ self.get_left()[0] + self[line_no].width / 2, self[line_no].get_center()[1], 0, ], ), ) ","Function to change one line's alignment to a specific value. Parameters ---------- alignment : :class:`str` Defines the alignment of paragraph. Possible values are ""left"", ""right"", ""center"". line_no : :class:`int` Defines the line number for which we want to set given alignment. ",41,50,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _change_alignment_for_a_line(self, alignment, line_no): self.lines[1][line_no] = alignment if self.lines[1][line_no] == ""center"": self[line_no].move_to( np.array([self.get_center()[0], self[line_no].get_center()[1], 0]), ) elif self.lines[1][line_no] == ""right"": self[line_no].move_to( np.array( [ self.get_right()[0] - self[line_no].width / 2, self[line_no].get_center()[1], 0, ], ), ) elif self.lines[1][line_no] == ""left"": self[line_no].move_to( np.array( [ self.get_left()[0] + self[line_no].width / 2, self[line_no].get_center()[1], 0, ], ), ) ``` ###Assistant : Function to change one line's alignment to a specific value. Parameters ---------- alignment : :class:`str` Defines the alignment of paragraph. Possible values are ""left"", ""right"", ""center"". line_no : :class:`int` Defines the line number for which we want to set given alignment. " 1751,"def test_rejoin_forgotten_by_user(self) -> None: self.helper.join(self.room_id, user=self.bob, tok=self.bob_token) self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token) self.get_success(self.handler.forget(self.alice_ID, self.room_id)) self.assertTrue( self.get_success(self.store.did_forget(self.alice, self.room_id)) ) # the server has not forgotten the room self.assertFalse( self.get_success(self.store.is_locally_forgotten_room(self.room_id)) ) self.helper.join(self.room_id, user=self.alice, tok=self.alice_token) # TODO: A join to a room does not invalidate the forgotten cache # see https://github.com/matrix-org/synapse/issues/13262 self.store.did_forget.invalidate_all() self.assertFalse( self.get_success(self.store.did_forget(self.alice, self.room_id)) ) ","Test that a user that has forgotten a room can do a re-join. The room was not forgotten from the local server. One local user is still member of the room.",31,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_rejoin_forgotten_by_user(self) -> None: self.helper.join(self.room_id, user=self.bob, tok=self.bob_token) self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token) self.get_success(self.handler.forget(self.alice_ID, self.room_id)) self.assertTrue( self.get_success(self.store.did_forget(self.alice, self.room_id)) ) # the server has not forgotten the room self.assertFalse( self.get_success(self.store.is_locally_forgotten_room(self.room_id)) ) self.helper.join(self.room_id, user=self.alice, tok=self.alice_token) # TODO: A join to a room does not invalidate the forgotten cache # see https://github.com/matrix-org/synapse/issues/13262 self.store.did_forget.invalidate_all() self.assertFalse( self.get_success(self.store.did_forget(self.alice, self.room_id)) ) ``` ###Assistant : Test that a user that has forgotten a room can do a re-join. The room was not forgotten from the local server. One local user is still member of the room. " 1752,"def std_call(func): if os.name == ""nt"": return lwingdal[func] else: return lgdal[func] # #### Version-information functions. #### # Return GDAL library version information with the given key. _version_info = std_call(""GDALVersionInfo"") _version_info.argtypes = [c_char_p] _version_info.restype = c_char_p "," Return the correct STDCALL function for certain OSR routines on Win32 platforms. ",12,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def std_call(func): if os.name == ""nt"": return lwingdal[func] else: return lgdal[func] # #### Version-information functions. #### # Return GDAL library version information with the given key. _version_info = std_call(""GDALVersionInfo"") _version_info.argtypes = [c_char_p] _version_info.restype = c_char_p ``` ###Assistant : Return the correct STDCALL function for certain OSR routines on Win32 platforms. " 1753,"def __getitem__(self, key): use_func = key.startswith(self.prefix) if use_func: key = key[len(self.prefix) :] value = super().__getitem__(key) if use_func: return self.func(value) return value "," Retrieve the real value after stripping the prefix string (if present). If the prefix is present, pass the value through self.func before returning, otherwise return the raw value. ",28,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __getitem__(self, key): use_func = key.startswith(self.prefix) if use_func: key = key[len(self.prefix) :] value = super().__getitem__(key) if use_func: return self.func(value) return value ``` ###Assistant : Retrieve the real value after stripping the prefix string (if present). If the prefix is present, pass the value through self.func before returning, otherwise return the raw value. " 1754,"def not_enough_open_files() -> bool: try: import resource except ImportError: # resource limits is not a concept on all systems, notably Windows return False soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE) return soft_limit < 512 or hard_limit < 512 "," The current process does not currently allow enough open files for this test. You can increase the number of open files with `ulimit -n 512`. ",25,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def not_enough_open_files() -> bool: try: import resource except ImportError: # resource limits is not a concept on all systems, notably Windows return False soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE) return soft_limit < 512 or hard_limit < 512 ``` ###Assistant : The current process does not currently allow enough open files for this test. You can increase the number of open files with `ulimit -n 512`. " 1755,"def configure(self, request): # Save ordering preference if request.user.is_authenticated: table_name = self.__class__.__name__ if self.prefixed_order_by_field in request.GET: # If an ordering has been specified as a query parameter, save it as the # user's preferred ordering for this table. ordering = request.GET.getlist(self.prefixed_order_by_field) request.user.config.set(f'tables.{table_name}.ordering', ordering, commit=True) elif ordering := request.user.config.get(f'tables.{table_name}.ordering'): # If no ordering has been specified, set the preferred ordering (if any). self.order_by = ordering # Paginate the table results paginate = { 'paginator_class': EnhancedPaginator, 'per_page': get_paginate_count(request) } tables.RequestConfig(request, paginate).configure(self) "," Configure the table for a specific request context. This performs pagination and records the user's preferred ordering logic. ",18,79,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def configure(self, request): # Save ordering preference if request.user.is_authenticated: table_name = self.__class__.__name__ if self.prefixed_order_by_field in request.GET: # If an ordering has been specified as a query parameter, save it as the # user's preferred ordering for this table. ordering = request.GET.getlist(self.prefixed_order_by_field) request.user.config.set(f'tables.{table_name}.ordering', ordering, commit=True) elif ordering := request.user.config.get(f'tables.{table_name}.ordering'): # If no ordering has been specified, set the preferred ordering (if any). self.order_by = ordering # Paginate the table results paginate = { 'paginator_class': EnhancedPaginator, 'per_page': get_paginate_count(request) } tables.RequestConfig(request, paginate).configure(self) ``` ###Assistant : Configure the table for a specific request context. This performs pagination and records the user's preferred ordering logic. " 1756,"def _shard_arg(arg, devices, arg_indices): if isinstance(arg, ShardedDeviceArray) and arg_indices == arg.indices: # The shard_arg_handlers allow an extensible set of types to be sharded, but # inline handling for ShardedDeviceArray as a special case for performance # NOTE: we compare indices instead of sharding_spec because # pmap_benchmark.pmap_shard_args_benchmark indicates this is faster. return [ buf if buf.device() == d else buf.copy_to_device(d) for d, buf in zip(devices, arg.device_buffers) ] else: arg = xla.canonicalize_dtype(arg) return shard_arg_handlers[type(arg)](arg, devices, arg_indices) @profiler.annotate_function","Returns a list of size len(devices) containing per-device buffers. For the C++ pmap path, we fallback to Python (this function) to shard arguments that are not supported by the C++ `ShardArg`. Arrgs: arg: The Python argument. devices: The list of devices to shard over. arg_indices: A list of `len(devices)` indices to use to shard the argument. ",56,75,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _shard_arg(arg, devices, arg_indices): if isinstance(arg, ShardedDeviceArray) and arg_indices == arg.indices: # The shard_arg_handlers allow an extensible set of types to be sharded, but # inline handling for ShardedDeviceArray as a special case for performance # NOTE: we compare indices instead of sharding_spec because # pmap_benchmark.pmap_shard_args_benchmark indicates this is faster. return [ buf if buf.device() == d else buf.copy_to_device(d) for d, buf in zip(devices, arg.device_buffers) ] else: arg = xla.canonicalize_dtype(arg) return shard_arg_handlers[type(arg)](arg, devices, arg_indices) @profiler.annotate_function ``` ###Assistant : Returns a list of size len(devices) containing per-device buffers. For the C++ pmap path, we fallback to Python (this function) to shard arguments that are not supported by the C++ `ShardArg`. Arrgs: arg: The Python argument. devices: The list of devices to shard over. arg_indices: A list of `len(devices)` indices to use to shard the argument. " 1757,"def test_rect(self): n3x3 = coord_net_spec(ks=3, stride=1, pad=0) n5x5 = coord_net_spec(ks=5, stride=2, pad=10) n3x5 = coord_net_spec(ks=[3, 5], stride=[1, 2], pad=[0, 10]) ax_3x3, a_3x3, b_3x3 = coord_map_from_to(n3x3.deconv, n3x3.data) ax_5x5, a_5x5, b_5x5 = coord_map_from_to(n5x5.deconv, n5x5.data) ax_3x5, a_3x5, b_3x5 = coord_map_from_to(n3x5.deconv, n3x5.data) self.assertTrue(ax_3x3 == ax_5x5 == ax_3x5) self.assertEquals(a_3x3, a_3x5[0]) self.assertEquals(b_3x3, b_3x5[0]) self.assertEquals(a_5x5, a_3x5[1]) self.assertEquals(b_5x5, b_3x5[1]) "," Anisotropic mapping is equivalent to its isotropic parts. ",8,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_rect(self): n3x3 = coord_net_spec(ks=3, stride=1, pad=0) n5x5 = coord_net_spec(ks=5, stride=2, pad=10) n3x5 = coord_net_spec(ks=[3, 5], stride=[1, 2], pad=[0, 10]) ax_3x3, a_3x3, b_3x3 = coord_map_from_to(n3x3.deconv, n3x3.data) ax_5x5, a_5x5, b_5x5 = coord_map_from_to(n5x5.deconv, n5x5.data) ax_3x5, a_3x5, b_3x5 = coord_map_from_to(n3x5.deconv, n3x5.data) self.assertTrue(ax_3x3 == ax_5x5 == ax_3x5) self.assertEquals(a_3x3, a_3x5[0]) self.assertEquals(b_3x3, b_3x5[0]) self.assertEquals(a_5x5, a_3x5[1]) self.assertEquals(b_5x5, b_3x5[1]) ``` ###Assistant : Anisotropic mapping is equivalent to its isotropic parts. " 1758,"def test_get_page_url_when_for_settings_fetched_via_for_site(self): self._create_importantpages_object() settings = ImportantPages.for_site(self.default_site) # Force site root paths query beforehand self.default_site.root_page._get_site_root_paths() for page_fk_field, expected_result in ( (""sign_up_page"", ""http://localhost/""), (""general_terms_page"", ""http://localhost/""), (""privacy_policy_page"", ""http://other/""), ): with self.subTest(page_fk_field=page_fk_field): # only the first request for each URL will trigger queries. # 2 are triggered instead of 1 here, because tests use the # database cache backed, and the cache is queried each time # to fetch site root paths (because there's no 'request' to # store them on) with self.assertNumQueries(2): self.assertEqual( settings.get_page_url(page_fk_field), expected_result ) # when called directly self.assertEqual( settings.get_page_url(page_fk_field), expected_result ) # when called indirectly via shortcut self.assertEqual( getattr(settings.page_url, page_fk_field), expected_result ) ","ImportantPages.for_site() cannot make the settings object request-aware, so things are a little less efficient, and the URLs returned will not be site-relative",22,102,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_page_url_when_for_settings_fetched_via_for_site(self): self._create_importantpages_object() settings = ImportantPages.for_site(self.default_site) # Force site root paths query beforehand self.default_site.root_page._get_site_root_paths() for page_fk_field, expected_result in ( (""sign_up_page"", ""http://localhost/""), (""general_terms_page"", ""http://localhost/""), (""privacy_policy_page"", ""http://other/""), ): with self.subTest(page_fk_field=page_fk_field): # only the first request for each URL will trigger queries. # 2 are triggered instead of 1 here, because tests use the # database cache backed, and the cache is queried each time # to fetch site root paths (because there's no 'request' to # store them on) with self.assertNumQueries(2): self.assertEqual( settings.get_page_url(page_fk_field), expected_result ) # when called directly self.assertEqual( settings.get_page_url(page_fk_field), expected_result ) # when called indirectly via shortcut self.assertEqual( getattr(settings.page_url, page_fk_field), expected_result ) ``` ###Assistant : ImportantPages.for_site() cannot make the settings object request-aware, so things are a little less efficient, and the URLs returned will not be site-relative " 1759,"def test_send_receipts_with_backoff(self): mock_send_transaction = ( self.hs.get_federation_transport_client().send_transaction ) mock_send_transaction.return_value = make_awaitable({}) sender = self.hs.get_federation_sender() receipt = ReadReceipt( ""room_id"", ""m.read"", ""user_id"", [""event_id""], {""ts"": 1234} ) self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt))) self.pump() # expect a call to send_transaction mock_send_transaction.assert_called_once() json_cb = mock_send_transaction.call_args[0][1] data = json_cb() self.assertEqual( data[""edus""], [ { ""edu_type"": EduTypes.RECEIPT, ""content"": { ""room_id"": { ""m.read"": { ""user_id"": { ""event_ids"": [""event_id""], ""data"": {""ts"": 1234}, } } } }, } ], ) mock_send_transaction.reset_mock() # send the second RR receipt = ReadReceipt( ""room_id"", ""m.read"", ""user_id"", [""other_id""], {""ts"": 1234} ) self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt))) self.pump() mock_send_transaction.assert_not_called() self.reactor.advance(19) mock_send_transaction.assert_not_called() self.reactor.advance(10) mock_send_transaction.assert_called_once() json_cb = mock_send_transaction.call_args[0][1] data = json_cb() self.assertEqual( data[""edus""], [ { ""edu_type"": EduTypes.RECEIPT, ""content"": { ""room_id"": { ""m.read"": { ""user_id"": { ""event_ids"": [""other_id""], ""data"": {""ts"": 1234}, } } } }, } ], ) ","Send two receipts in quick succession; the second should be flushed, but only after 20ms",15,119,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_send_receipts_with_backoff(self): mock_send_transaction = ( self.hs.get_federation_transport_client().send_transaction ) mock_send_transaction.return_value = make_awaitable({}) sender = self.hs.get_federation_sender() receipt = ReadReceipt( ""room_id"", ""m.read"", ""user_id"", [""event_id""], {""ts"": 1234} ) self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt))) self.pump() # expect a call to send_transaction mock_send_transaction.assert_called_once() json_cb = mock_send_transaction.call_args[0][1] data = json_cb() self.assertEqual( data[""edus""], [ { ""edu_type"": EduTypes.RECEIPT, ""content"": { ""room_id"": { ""m.read"": { ""user_id"": { ""event_ids"": [""event_id""], ""data"": {""ts"": 1234}, } } } }, } ], ) mock_send_transaction.reset_mock() # send the second RR receipt = ReadReceipt( ""room_id"", ""m.read"", ""user_id"", [""other_id""], {""ts"": 1234} ) self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt))) self.pump() mock_send_transaction.assert_not_called() self.reactor.advance(19) mock_send_transaction.assert_not_called() self.reactor.advance(10) mock_send_transaction.assert_called_once() json_cb = mock_send_transaction.call_args[0][1] data = json_cb() self.assertEqual( data[""edus""], [ { ""edu_type"": EduTypes.RECEIPT, ""content"": { ""room_id"": { ""m.read"": { ""user_id"": { ""event_ids"": [""other_id""], ""data"": {""ts"": 1234}, } } } }, } ], ) ``` ###Assistant : Send two receipts in quick succession; the second should be flushed, but only after 20ms " 1760,"def process_frame(self, processable_frame, processing_task): frame = processable_frame.frame token = None cache = self.cache sourcemaps = self.sourcemaps all_errors = [] sourcemap_applied = False # can't demangle if there's no filename or line number present if not frame.get(""abs_path"") or not frame.get(""lineno""): return # also can't demangle node's internal modules # therefore we only process user-land frames (starting with /) # or those created by bundle/webpack internals if self.data.get(""platform"") == ""node"" and not frame.get(""abs_path"").startswith( (""/"", ""app:"", ""webpack:"") ): return errors = cache.get_errors(frame[""abs_path""]) if errors: all_errors.extend(errors) # This might fail but that's okay, we try with a different path a # bit later down the road. source = self.get_sourceview(frame[""abs_path""]) in_app = None new_frame = dict(frame) raw_frame = dict(frame) sourcemap_url, sourcemap_view = sourcemaps.get_link(frame[""abs_path""]) self.sourcemaps_touched.add(sourcemap_url) if sourcemap_view and frame.get(""colno"") is None: all_errors.append( {""type"": EventError.JS_NO_COLUMN, ""url"": http.expose_url(frame[""abs_path""])} ) elif sourcemap_view: if is_data_uri(sourcemap_url): sourcemap_label = frame[""abs_path""] else: sourcemap_label = sourcemap_url sourcemap_label = http.expose_url(sourcemap_label) if frame.get(""function""): minified_function_name = frame[""function""] minified_source = self.get_sourceview(frame[""abs_path""]) else: minified_function_name = minified_source = None try: # Errors are 1-indexed in the frames, so we need to -1 to get # zero-indexed value from tokens. assert frame[""lineno""] > 0, ""line numbers are 1-indexed"" token = sourcemap_view.lookup( frame[""lineno""] - 1, frame[""colno""] - 1, minified_function_name, minified_source ) except Exception: token = None all_errors.append( { ""type"": EventError.JS_INVALID_SOURCEMAP_LOCATION, ""column"": frame.get(""colno""), ""row"": frame.get(""lineno""), ""source"": frame[""abs_path""], ""sourcemap"": sourcemap_label, } ) # persist the token so that we can find it later processable_frame.data[""token""] = token # Store original data in annotation new_frame[""data""] = dict(frame.get(""data"") or {}, sourcemap=sourcemap_label) sourcemap_applied = True if token is not None: abs_path = non_standard_url_join(sourcemap_url, token.src) logger.debug( ""Mapping compressed source %r to mapping in %r"", frame[""abs_path""], abs_path ) source = self.get_sourceview(abs_path) if source is None: errors = cache.get_errors(abs_path) if errors: all_errors.extend(errors) else: all_errors.append( {""type"": EventError.JS_MISSING_SOURCE, ""url"": http.expose_url(abs_path)} ) # the tokens are zero indexed, so offset correctly new_frame[""lineno""] = token.src_line + 1 new_frame[""colno""] = token.src_col + 1 # Try to use the function name we got from symbolic original_function_name = token.function_name # In the ideal case we can use the function name from the # frame and the location to resolve the original name # through the heuristics in our sourcemap library. if original_function_name is None: last_token = None # Find the previous token for function name handling as a # fallback. if ( processable_frame.previous_frame and processable_frame.previous_frame.processor is self ): last_token = processable_frame.previous_frame.data.get(""token"") if last_token: original_function_name = last_token.name if original_function_name is not None: new_frame[""function""] = original_function_name filename = token.src # special case webpack support # abs_path will always be the full path with webpack:/// prefix. # filename will be relative to that if abs_path.startswith(""webpack:""): filename = abs_path # webpack seems to use ~ to imply ""relative to resolver root"" # which is generally seen for third party deps # (i.e. node_modules) if ""/~/"" in filename: filename = ""~/"" + abs_path.split(""/~/"", 1)[-1] elif WEBPACK_NAMESPACE_RE.match(filename): filename = re.sub(WEBPACK_NAMESPACE_RE, ""./"", abs_path) else: filename = filename.split(""webpack:///"", 1)[-1] # As noted above: # * [js/node] '~/' means they're coming from node_modules, so these are not app dependencies # * [node] sames goes for `./node_modules/` and '../node_modules/', which is used when bundling node apps # * [node] and webpack, which includes it's own code to bootstrap all modules and its internals # eg. webpack:///webpack/bootstrap, webpack:///external if ( filename.startswith(""~/"") or ""/node_modules/"" in filename or not filename.startswith(""./"") ): in_app = False # And conversely, local dependencies start with './' elif filename.startswith(""./""): in_app = True # We want to explicitly generate a webpack module name new_frame[""module""] = generate_module(filename) # while you could technically use a subpath of 'node_modules' for your libraries, # it would be an extremely complicated decision and we've not seen anyone do it # so instead we assume if node_modules is in the path its part of the vendored code elif ""/node_modules/"" in abs_path: in_app = False if abs_path.startswith(""app:""): if filename and NODE_MODULES_RE.search(filename): in_app = False else: in_app = True new_frame[""abs_path""] = abs_path new_frame[""filename""] = filename if not frame.get(""module"") and abs_path.startswith( (""http:"", ""https:"", ""webpack:"", ""app:"") ): new_frame[""module""] = generate_module(abs_path) elif sourcemap_url: new_frame[""data""] = dict( new_frame.get(""data"") or {}, sourcemap=http.expose_url(sourcemap_url) ) # TODO: theoretically a minified source could point to # another mapped, minified source changed_frame = self.expand_frame(new_frame, source=source) # If we did not manage to match but we do have a line or column # we want to report an error here. if not new_frame.get(""context_line"") and source and new_frame.get(""colno"") is not None: all_errors.append( { ""type"": EventError.JS_INVALID_SOURCEMAP_LOCATION, ""column"": new_frame[""colno""], ""row"": new_frame[""lineno""], ""source"": new_frame[""abs_path""], } ) changed_raw = sourcemap_applied and self.expand_frame(raw_frame) if sourcemap_applied or all_errors or changed_frame or changed_raw: # In case we are done processing, we iterate over all errors that we got # and we filter out all `JS_MISSING_SOURCE` errors since we consider if we have # a `context_line` we have a symbolicated frame and we don't need to show the error has_context_line = bool(new_frame.get(""context_line"")) if has_context_line: all_errors[:] = [ x for x in all_errors if x.get(""type"") is not EventError.JS_MISSING_SOURCE ] if in_app is not None: new_frame[""in_app""] = in_app raw_frame[""in_app""] = in_app # Run new processor only for frames that were actually modified in any way. if should_run_smcache(self) and new_frame != raw_frame: smcache_rv = self.smcache_processor.process_frame(processable_frame, None) set_path(new_frame, ""data"", ""smcache_frame"", value=smcache_rv[0][0]) new_frames = [new_frame] raw_frames = [raw_frame] if changed_raw else None return new_frames, raw_frames, all_errors "," Attempt to demangle the given frame. ",6,857,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def process_frame(self, processable_frame, processing_task): frame = processable_frame.frame token = None cache = self.cache sourcemaps = self.sourcemaps all_errors = [] sourcemap_applied = False # can't demangle if there's no filename or line number present if not frame.get(""abs_path"") or not frame.get(""lineno""): return # also can't demangle node's internal modules # therefore we only process user-land frames (starting with /) # or those created by bundle/webpack internals if self.data.get(""platform"") == ""node"" and not frame.get(""abs_path"").startswith( (""/"", ""app:"", ""webpack:"") ): return errors = cache.get_errors(frame[""abs_path""]) if errors: all_errors.extend(errors) # This might fail but that's okay, we try with a different path a # bit later down the road. source = self.get_sourceview(frame[""abs_path""]) in_app = None new_frame = dict(frame) raw_frame = dict(frame) sourcemap_url, sourcemap_view = sourcemaps.get_link(frame[""abs_path""]) self.sourcemaps_touched.add(sourcemap_url) if sourcemap_view and frame.get(""colno"") is None: all_errors.append( {""type"": EventError.JS_NO_COLUMN, ""url"": http.expose_url(frame[""abs_path""])} ) elif sourcemap_view: if is_data_uri(sourcemap_url): sourcemap_label = frame[""abs_path""] else: sourcemap_label = sourcemap_url sourcemap_label = http.expose_url(sourcemap_label) if frame.get(""function""): minified_function_name = frame[""function""] minified_source = self.get_sourceview(frame[""abs_path""]) else: minified_function_name = minified_source = None try: # Errors are 1-indexed in the frames, so we need to -1 to get # zero-indexed value from tokens. assert frame[""lineno""] > 0, ""line numbers are 1-indexed"" token = sourcemap_view.lookup( frame[""lineno""] - 1, frame[""colno""] - 1, minified_function_name, minified_source ) except Exception: token = None all_errors.append( { ""type"": EventError.JS_INVALID_SOURCEMAP_LOCATION, ""column"": frame.get(""colno""), ""row"": frame.get(""lineno""), ""source"": frame[""abs_path""], ""sourcemap"": sourcemap_label, } ) # persist the token so that we can find it later processable_frame.data[""token""] = token # Store original data in annotation new_frame[""data""] = dict(frame.get(""data"") or {}, sourcemap=sourcemap_label) sourcemap_applied = True if token is not None: abs_path = non_standard_url_join(sourcemap_url, token.src) logger.debug( ""Mapping compressed source %r to mapping in %r"", frame[""abs_path""], abs_path ) source = self.get_sourceview(abs_path) if source is None: errors = cache.get_errors(abs_path) if errors: all_errors.extend(errors) else: all_errors.append( {""type"": EventError.JS_MISSING_SOURCE, ""url"": http.expose_url(abs_path)} ) # the tokens are zero indexed, so offset correctly new_frame[""lineno""] = token.src_line + 1 new_frame[""colno""] = token.src_col + 1 # Try to use the function name we got from symbolic original_function_name = token.function_name # In the ideal case we can use the function name from the # frame and the location to resolve the original name # through the heuristics in our sourcemap library. if original_function_name is None: last_token = None # Find the previous token for function name handling as a # fallback. if ( processable_frame.previous_frame and processable_frame.previous_frame.processor is self ): last_token = processable_frame.previous_frame.data.get(""token"") if last_token: original_function_name = last_token.name if original_function_name is not None: new_frame[""function""] = original_function_name filename = token.src # special case webpack support # abs_path will always be the full path with webpack:/// prefix. # filename will be relative to that if abs_path.startswith(""webpack:""): filename = abs_path # webpack seems to use ~ to imply ""relative to resolver root"" # which is generally seen for third party deps # (i.e. node_modules) if ""/~/"" in filename: filename = ""~/"" + abs_path.split(""/~/"", 1)[-1] elif WEBPACK_NAMESPACE_RE.match(filename): filename = re.sub(WEBPACK_NAMESPACE_RE, ""./"", abs_path) else: filename = filename.split(""webpack:///"", 1)[-1] # As noted above: # * [js/node] '~/' means they're coming from node_modules, so these are not app dependencies # * [node] sames goes for `./node_modules/` and '../node_modules/', which is used when bundling node apps # * [node] and webpack, which includes it's own code to bootstrap all modules and its internals # eg. webpack:///webpack/bootstrap, webpack:///external if ( filename.startswith(""~/"") or ""/node_modules/"" in filename or not filename.startswith(""./"") ): in_app = False # And conversely, local dependencies start with './' elif filename.startswith(""./""): in_app = True # We want to explicitly generate a webpack module name new_frame[""module""] = generate_module(filename) # while you could technically use a subpath of 'node_modules' for your libraries, # it would be an extremely complicated decision and we've not seen anyone do it # so instead we assume if node_modules is in the path its part of the vendored code elif ""/node_modules/"" in abs_path: in_app = False if abs_path.startswith(""app:""): if filename and NODE_MODULES_RE.search(filename): in_app = False else: in_app = True new_frame[""abs_path""] = abs_path new_frame[""filename""] = filename if not frame.get(""module"") and abs_path.startswith( (""http:"", ""https:"", ""webpack:"", ""app:"") ): new_frame[""module""] = generate_module(abs_path) elif sourcemap_url: new_frame[""data""] = dict( new_frame.get(""data"") or {}, sourcemap=http.expose_url(sourcemap_url) ) # TODO: theoretically a minified source could point to # another mapped, minified source changed_frame = self.expand_frame(new_frame, source=source) # If we did not manage to match but we do have a line or column # we want to report an error here. if not new_frame.get(""context_line"") and source and new_frame.get(""colno"") is not None: all_errors.append( { ""type"": EventError.JS_INVALID_SOURCEMAP_LOCATION, ""column"": new_frame[""colno""], ""row"": new_frame[""lineno""], ""source"": new_frame[""abs_path""], } ) changed_raw = sourcemap_applied and self.expand_frame(raw_frame) if sourcemap_applied or all_errors or changed_frame or changed_raw: # In case we are done processing, we iterate over all errors that we got # and we filter out all `JS_MISSING_SOURCE` errors since we consider if we have # a `context_line` we have a symbolicated frame and we don't need to show the error has_context_line = bool(new_frame.get(""context_line"")) if has_context_line: all_errors[:] = [ x for x in all_errors if x.get(""type"") is not EventError.JS_MISSING_SOURCE ] if in_app is not None: new_frame[""in_app""] = in_app raw_frame[""in_app""] = in_app # Run new processor only for frames that were actually modified in any way. if should_run_smcache(self) and new_frame != raw_frame: smcache_rv = self.smcache_processor.process_frame(processable_frame, None) set_path(new_frame, ""data"", ""smcache_frame"", value=smcache_rv[0][0]) new_frames = [new_frame] raw_frames = [raw_frame] if changed_raw else None return new_frames, raw_frames, all_errors ``` ###Assistant : Attempt to demangle the given frame. " 1761,"def validate_csv(headers, fields, required_fields): # Validate provided column headers is_update = False for field, to_field in headers.items(): if field == ""id"": is_update = True continue if field not in fields: raise forms.ValidationError(f'Unexpected column header ""{field}"" found.') if to_field and not hasattr(fields[field], 'to_field_name'): raise forms.ValidationError(f'Column ""{field}"" is not a related object; cannot use dots') if to_field and not hasattr(fields[field].queryset.model, to_field): raise forms.ValidationError(f'Invalid related object attribute for column ""{field}"": {to_field}') # Validate required fields (if not an update) if not is_update: for f in required_fields: if f not in headers: raise forms.ValidationError(f'Required column header ""{f}"" not found.') "," Validate that parsed csv data conforms to the object's available fields. Raise validation errors if parsed csv data contains invalid headers or does not contain required headers. ",27,95,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def validate_csv(headers, fields, required_fields): # Validate provided column headers is_update = False for field, to_field in headers.items(): if field == ""id"": is_update = True continue if field not in fields: raise forms.ValidationError(f'Unexpected column header ""{field}"" found.') if to_field and not hasattr(fields[field], 'to_field_name'): raise forms.ValidationError(f'Column ""{field}"" is not a related object; cannot use dots') if to_field and not hasattr(fields[field].queryset.model, to_field): raise forms.ValidationError(f'Invalid related object attribute for column ""{field}"": {to_field}') # Validate required fields (if not an update) if not is_update: for f in required_fields: if f not in headers: raise forms.ValidationError(f'Required column header ""{f}"" not found.') ``` ###Assistant : Validate that parsed csv data conforms to the object's available fields. Raise validation errors if parsed csv data contains invalid headers or does not contain required headers. " 1762,"def bernoulli_poly(n, x=None, polys=False): return appell_poly(n, [[1], [1, QQ(-1,2)]], QQ(1,2), lambda p, i: p * QQ(1<<(i-1), 1-(1< lval assert drawdown == result assert pytest.approx(drawdown_rel) == result_rel "," Test case from issue https://github.com/freqtrade/freqtrade/issues/6655 [1000, 500, 1000, 11000, 10000] # absolute results [1000, 50%, 0%, 0%, ~9%] # Relative drawdowns ",21,91,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_calculate_max_drawdown_abs(values, relative, result, result_rel): dates = [Arrow(2020, 1, 1).shift(days=i) for i in range(len(values))] df = DataFrame(zip(values, dates), columns=['profit_abs', 'open_date']) # sort by profit and reset index df = df.sort_values('profit_abs').reset_index(drop=True) df1 = df.copy() drawdown, hdate, ldate, hval, lval, drawdown_rel = calculate_max_drawdown( df, date_col='open_date', starting_balance=1000, relative=relative) # Ensure df has not been altered. assert df.equals(df1) assert isinstance(drawdown, float) assert isinstance(drawdown_rel, float) # High must be before low assert hdate < ldate # High value must be higher than low value assert hval > lval assert drawdown == result assert pytest.approx(drawdown_rel) == result_rel ``` ###Assistant : Test case from issue https://github.com/freqtrade/freqtrade/issues/6655 [1000, 500, 1000, 11000, 10000] # absolute results [1000, 50%, 0%, 0%, ~9%] # Relative drawdowns " 1764,"def testOnCheckpointUnavailableAttribute(self): checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num=1) no_attr_checkpoint = Checkpoint(Checkpoint.PERSISTENT, 0, {}) with patch.object(logger, ""error"") as log_error_mock: checkpoint_manager.on_checkpoint(no_attr_checkpoint) log_error_mock.assert_called_once() # The newest checkpoint should still be set despite this error. self.assertEqual( checkpoint_manager.newest_persistent_checkpoint, no_attr_checkpoint ) "," Tests that an error is logged when the associated result of the checkpoint has no checkpoint score attribute. ",18,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def testOnCheckpointUnavailableAttribute(self): checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num=1) no_attr_checkpoint = Checkpoint(Checkpoint.PERSISTENT, 0, {}) with patch.object(logger, ""error"") as log_error_mock: checkpoint_manager.on_checkpoint(no_attr_checkpoint) log_error_mock.assert_called_once() # The newest checkpoint should still be set despite this error. self.assertEqual( checkpoint_manager.newest_persistent_checkpoint, no_attr_checkpoint ) ``` ###Assistant : Tests that an error is logged when the associated result of the checkpoint has no checkpoint score attribute. " 1765,"def subscription_app_status_changed_webhook(subscription_webhook): return subscription_webhook( APP_STATUS_CHANGED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.APP_STATUS_CHANGED, ) CATEGORY_CREATED_SUBSCRIPTION_QUERY = @pytest.fixture"," subscription{ event{ ...on CategoryCreated{ category{ id } } } } ",10,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def subscription_app_status_changed_webhook(subscription_webhook): return subscription_webhook( APP_STATUS_CHANGED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.APP_STATUS_CHANGED, ) CATEGORY_CREATED_SUBSCRIPTION_QUERY = @pytest.fixture ``` ###Assistant : subscription{ event{ ...on CategoryCreated{ category{ id } } } } " 1766,"def binary_op(self, op, right_frame, join_type=""outer""): left_parts, right_parts, joined_index, row_lengths = self._copartition( 0, right_frame, join_type, sort=True ) # unwrap list returned by `copartition`. right_parts = right_parts[0] new_frame = self._partition_mgr_cls.binary_operation( 1, left_parts, lambda l, r: op(l, r), right_parts ) new_columns = self.columns.join(right_frame.columns, how=join_type) return self.__constructor__( new_frame, joined_index, new_columns, row_lengths, column_widths=self._column_widths_cache, ) "," Perform an operation that requires joining with another Modin DataFrame. Parameters ---------- op : callable Function to apply after the join. right_frame : PandasDataframe Modin DataFrame to join with. join_type : str, default: ""outer"" Type of join to apply. Returns ------- PandasDataframe New Modin DataFrame. ",45,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def binary_op(self, op, right_frame, join_type=""outer""): left_parts, right_parts, joined_index, row_lengths = self._copartition( 0, right_frame, join_type, sort=True ) # unwrap list returned by `copartition`. right_parts = right_parts[0] new_frame = self._partition_mgr_cls.binary_operation( 1, left_parts, lambda l, r: op(l, r), right_parts ) new_columns = self.columns.join(right_frame.columns, how=join_type) return self.__constructor__( new_frame, joined_index, new_columns, row_lengths, column_widths=self._column_widths_cache, ) ``` ###Assistant : Perform an operation that requires joining with another Modin DataFrame. Parameters ---------- op : callable Function to apply after the join. right_frame : PandasDataframe Modin DataFrame to join with. join_type : str, default: ""outer"" Type of join to apply. Returns ------- PandasDataframe New Modin DataFrame. " 1767,"def rc_file(fname, *, use_default_template=True): # Deprecation warnings were already handled in rc_params_from_file, no need # to reemit them here. with _api.suppress_matplotlib_deprecation_warning(): from .style.core import STYLE_BLACKLIST rc_from_file = rc_params_from_file( fname, use_default_template=use_default_template) rcParams.update({k: rc_from_file[k] for k in rc_from_file if k not in STYLE_BLACKLIST}) @contextlib.contextmanager"," Update `.rcParams` from file. Style-blacklisted `.rcParams` (defined in ``matplotlib.style.core.STYLE_BLACKLIST``) are not updated. Parameters ---------- fname : str or path-like A file with Matplotlib rc settings. use_default_template : bool If True, initialize with default parameters before updating with those in the given file. If False, the current configuration persists and only the parameters specified in the file are updated. ",58,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rc_file(fname, *, use_default_template=True): # Deprecation warnings were already handled in rc_params_from_file, no need # to reemit them here. with _api.suppress_matplotlib_deprecation_warning(): from .style.core import STYLE_BLACKLIST rc_from_file = rc_params_from_file( fname, use_default_template=use_default_template) rcParams.update({k: rc_from_file[k] for k in rc_from_file if k not in STYLE_BLACKLIST}) @contextlib.contextmanager ``` ###Assistant : Update `.rcParams` from file. Style-blacklisted `.rcParams` (defined in ``matplotlib.style.core.STYLE_BLACKLIST``) are not updated. Parameters ---------- fname : str or path-like A file with Matplotlib rc settings. use_default_template : bool If True, initialize with default parameters before updating with those in the given file. If False, the current configuration persists and only the parameters specified in the file are updated. " 1768,"def create_gloo_context(rank, world_size): context = pygloo.rendezvous.Context(rank, world_size) return context ","Create a GLOO context using GLOO APIs. Args: rank: the rank of this process. world_size: the number of processes of this collective group. Returns: context (pygloo.Context): a GLOO context. ",29,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def create_gloo_context(rank, world_size): context = pygloo.rendezvous.Context(rank, world_size) return context ``` ###Assistant : Create a GLOO context using GLOO APIs. Args: rank: the rank of this process. world_size: the number of processes of this collective group. Returns: context (pygloo.Context): a GLOO context. " 1769,"def require_bitsandbytes(test_case): if not is_bitsandbytes_available(): return unittest.skip(""test requires bnb"")(test_case) else: return test_case "," Decorator for bits and bytes (bnb) dependency ",7,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def require_bitsandbytes(test_case): if not is_bitsandbytes_available(): return unittest.skip(""test requires bnb"")(test_case) else: return test_case ``` ###Assistant : Decorator for bits and bytes (bnb) dependency " 1770,"def copy_func(f) -> Callable: g = types.FunctionType( f.__code__, f.__globals__, name=f.__name__, argdefs=f.__defaults__, closure=f.__closure__, ) g = functools.update_wrapper(g, f) g.__kwdefaults__ = f.__kwdefaults__ return g ","Copies the contents and attributes of the entered function. Based on https://stackoverflow.com/a/13503277 Parameters ---------- f: Callable Function to be copied Returns ------- g: Callable New function ",26,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def copy_func(f) -> Callable: g = types.FunctionType( f.__code__, f.__globals__, name=f.__name__, argdefs=f.__defaults__, closure=f.__closure__, ) g = functools.update_wrapper(g, f) g.__kwdefaults__ = f.__kwdefaults__ return g ``` ###Assistant : Copies the contents and attributes of the entered function. Based on https://stackoverflow.com/a/13503277 Parameters ---------- f: Callable Function to be copied Returns ------- g: Callable New function " 1771,"def evaluate(self, expr, context): if isinstance(expr, string_types): if expr[0] in '\'""': result = expr[1:-1] else: if expr not in context: raise SyntaxError('unknown variable: %s' % expr) result = context[expr] else: assert isinstance(expr, dict) op = expr['op'] if op not in self.operations: raise NotImplementedError('op not implemented: %s' % op) elhs = expr['lhs'] erhs = expr['rhs'] if _is_literal(expr['lhs']) and _is_literal(expr['rhs']): raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs)) lhs = self.evaluate(elhs, context) rhs = self.evaluate(erhs, context) if ((elhs == 'python_version' or erhs == 'python_version') and op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')): lhs = NV(lhs) rhs = NV(rhs) elif elhs == 'python_version' and op in ('in', 'not in'): lhs = NV(lhs) rhs = _get_versions(rhs) result = self.operations[op](lhs, rhs) return result "," Evaluate a marker expression returned by the :func:`parse_requirement` function in the specified context. ",13,123,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def evaluate(self, expr, context): if isinstance(expr, string_types): if expr[0] in '\'""': result = expr[1:-1] else: if expr not in context: raise SyntaxError('unknown variable: %s' % expr) result = context[expr] else: assert isinstance(expr, dict) op = expr['op'] if op not in self.operations: raise NotImplementedError('op not implemented: %s' % op) elhs = expr['lhs'] erhs = expr['rhs'] if _is_literal(expr['lhs']) and _is_literal(expr['rhs']): raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs)) lhs = self.evaluate(elhs, context) rhs = self.evaluate(erhs, context) if ((elhs == 'python_version' or erhs == 'python_version') and op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')): lhs = NV(lhs) rhs = NV(rhs) elif elhs == 'python_version' and op in ('in', 'not in'): lhs = NV(lhs) rhs = _get_versions(rhs) result = self.operations[op](lhs, rhs) return result ``` ###Assistant : Evaluate a marker expression returned by the :func:`parse_requirement` function in the specified context. " 1772,"def track_tf_optimizer(tf_optimizer): if tf.executing_eagerly(): return optimizers = _GRAPH_TF_OPTIMIZERS[None] optimizers.add(tf_optimizer) @keras_export(""keras.__internal__.backend.track_variable"", v1=[])",Tracks the given TF optimizer for initialization of its variables.,10,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def track_tf_optimizer(tf_optimizer): if tf.executing_eagerly(): return optimizers = _GRAPH_TF_OPTIMIZERS[None] optimizers.add(tf_optimizer) @keras_export(""keras.__internal__.backend.track_variable"", v1=[]) ``` ###Assistant : Tracks the given TF optimizer for initialization of its variables. " 1773,"def _galois_group_degree_5(T, max_tries=30, randomize=False): r from sympy.combinatorics.permutations import Permutation from sympy.combinatorics.named_groups import ( CyclicGroup, DihedralGroup, AlternatingGroup, SymmetricGroup ) # The ideas here are all the same as in the degree-4 method. # The specific resolvents we use, and how we interpret the results, are # adapted to the degree-5 case. X = symbols('X0 X1 X2 X3 X4') # For the first resolvent, we have G = S5, # and stabilizer H = M20 = < (01234), (1234) >. F1 = (X[0]**2*(X[1]*X[4] + X[2]*X[3]) + X[1]**2*(X[2]*X[0] + X[3]*X[4]) + X[2]**2*(X[3]*X[1] + X[4]*X[0]) + X[3]**2*(X[4]*X[2] + X[0]*X[1]) + X[4]**2*(X[0]*X[3] + X[1]*X[2])) s1 = [ Permutation(4), Permutation(4)(0, 1), Permutation(4)(0, 2), Permutation(4)(0, 3), Permutation(4)(0, 4), Permutation(4)(1, 4) ] R1 = Resolvent(F1, X, s1) # For the second resolvent, we'll have G = D5, H = C5. F2_pre = X[0]*X[1]**2 + X[1]*X[2]**2 + X[2]*X[3]**2 + X[3]*X[4]**2 + X[4]*X[0]**2 s2_pre = [ Permutation(4), Permutation(4)(0, 1)(2, 4) ] history = set() for i in range(max_tries): if i > 0: _, T = tschirnhausen_transformation(T, max_tries=max_tries, history=history, fixed_order=not randomize) R_dup, _, i0 = R1.eval_for_poly(T, find_integer_root=True) if not dup_sqf_p(R_dup, ZZ): continue sq_disc = has_square_disc(T) if i0 is None: return (AlternatingGroup(5), True) if sq_disc else (SymmetricGroup(5), False) if not sq_disc: return (M20(), False) sigma = s1[i0] F2 = F2_pre.subs(zip(X, sigma(X)), simultaneous=True) s2 = [sigma*tau*sigma for tau in s2_pre] R2 = Resolvent(F2, X, s2) R_dup, _, _ = R2.eval_for_poly(T) d = dup_discriminant(R_dup, ZZ) if d == 0: continue if is_square(d): return (CyclicGroup(5), True) else: return (DihedralGroup(5), True) raise MaxTriesException "," Compute the Galois group of a polynomial of degree 5, following Alg 6.3.9 of Cohen. References ========== .. [1] Cohen, H. *A Course in Computational Algebraic Number Theory*. ",28,247,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _galois_group_degree_5(T, max_tries=30, randomize=False): r from sympy.combinatorics.permutations import Permutation from sympy.combinatorics.named_groups import ( CyclicGroup, DihedralGroup, AlternatingGroup, SymmetricGroup ) # The ideas here are all the same as in the degree-4 method. # The specific resolvents we use, and how we interpret the results, are # adapted to the degree-5 case. X = symbols('X0 X1 X2 X3 X4') # For the first resolvent, we have G = S5, # and stabilizer H = M20 = < (01234), (1234) >. F1 = (X[0]**2*(X[1]*X[4] + X[2]*X[3]) + X[1]**2*(X[2]*X[0] + X[3]*X[4]) + X[2]**2*(X[3]*X[1] + X[4]*X[0]) + X[3]**2*(X[4]*X[2] + X[0]*X[1]) + X[4]**2*(X[0]*X[3] + X[1]*X[2])) s1 = [ Permutation(4), Permutation(4)(0, 1), Permutation(4)(0, 2), Permutation(4)(0, 3), Permutation(4)(0, 4), Permutation(4)(1, 4) ] R1 = Resolvent(F1, X, s1) # For the second resolvent, we'll have G = D5, H = C5. F2_pre = X[0]*X[1]**2 + X[1]*X[2]**2 + X[2]*X[3]**2 + X[3]*X[4]**2 + X[4]*X[0]**2 s2_pre = [ Permutation(4), Permutation(4)(0, 1)(2, 4) ] history = set() for i in range(max_tries): if i > 0: _, T = tschirnhausen_transformation(T, max_tries=max_tries, history=history, fixed_order=not randomize) R_dup, _, i0 = R1.eval_for_poly(T, find_integer_root=True) if not dup_sqf_p(R_dup, ZZ): continue sq_disc = has_square_disc(T) if i0 is None: return (AlternatingGroup(5), True) if sq_disc else (SymmetricGroup(5), False) if not sq_disc: return (M20(), False) sigma = s1[i0] F2 = F2_pre.subs(zip(X, sigma(X)), simultaneous=True) s2 = [sigma*tau*sigma for tau in s2_pre] R2 = Resolvent(F2, X, s2) R_dup, _, _ = R2.eval_for_poly(T) d = dup_discriminant(R_dup, ZZ) if d == 0: continue if is_square(d): return (CyclicGroup(5), True) else: return (DihedralGroup(5), True) raise MaxTriesException ``` ###Assistant : Compute the Galois group of a polynomial of degree 5, following Alg 6.3.9 of Cohen. References ========== .. [1] Cohen, H. *A Course in Computational Algebraic Number Theory*. " 1774,"def error(self, message): self.print_usage(_sys.stderr) args = {'prog': self.prog, 'message': message} self.exit(2, _('%(prog)s: error: %(message)s\n') % args) ","error(message: string) Prints a usage message incorporating the message to stderr and exits. If you override this in a subclass, it should not return -- it should either exit or raise an exception. ",33,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def error(self, message): self.print_usage(_sys.stderr) args = {'prog': self.prog, 'message': message} self.exit(2, _('%(prog)s: error: %(message)s\n') % args) ``` ###Assistant : error(message: string) Prints a usage message incorporating the message to stderr and exits. If you override this in a subclass, it should not return -- it should either exit or raise an exception. " 1775,"def preprocess_input(x, data_format=None): # pylint: disable=unused-argument return x @keras_export(""keras.applications.regnet.decode_predictions"")","A placeholder method for backward compatibility. The preprocessing logic has been included in the regnet model implementation. Users are no longer required to call this method to normalize the input data. This method does nothing and only kept as a placeholder to align the API surface between old and new version of model. Args: x: A floating point `numpy.array` or a `tf.Tensor`. data_format: Optional data format of the image tensor/array. Defaults to None, in which case the global setting `tf.keras.backend.image_data_format()` is used (unless you changed it, it defaults to ""channels_last"").{mode} Returns: Unchanged `numpy.array` or `tf.Tensor`. ",95,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def preprocess_input(x, data_format=None): # pylint: disable=unused-argument return x @keras_export(""keras.applications.regnet.decode_predictions"") ``` ###Assistant : A placeholder method for backward compatibility. The preprocessing logic has been included in the regnet model implementation. Users are no longer required to call this method to normalize the input data. This method does nothing and only kept as a placeholder to align the API surface between old and new version of model. Args: x: A floating point `numpy.array` or a `tf.Tensor`. data_format: Optional data format of the image tensor/array. Defaults to None, in which case the global setting `tf.keras.backend.image_data_format()` is used (unless you changed it, it defaults to ""channels_last"").{mode} Returns: Unchanged `numpy.array` or `tf.Tensor`. " 1776,"def test_edit_post(self): # Send request response = self.client.post( reverse(""wagtaildocs:edit_multiple"", args=(self.doc.id,)), { ""doc-%d-%s"" % (self.doc.id, field): data for field, data in self.edit_post_data.items() }, ) # Check response self.assertEqual(response.status_code, 200) self.assertEqual(response[""Content-Type""], ""application/json"") # Check JSON response_json = json.loads(response.content.decode()) self.assertIn(""doc_id"", response_json) self.assertNotIn(""form"", response_json) self.assertIn(""success"", response_json) self.assertEqual(response_json[""doc_id""], self.doc.id) self.assertTrue(response_json[""success""]) self.check_doc_after_edit() "," This tests that a POST request to the edit view edits the document ",13,46,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_edit_post(self): # Send request response = self.client.post( reverse(""wagtaildocs:edit_multiple"", args=(self.doc.id,)), { ""doc-%d-%s"" % (self.doc.id, field): data for field, data in self.edit_post_data.items() }, ) # Check response self.assertEqual(response.status_code, 200) self.assertEqual(response[""Content-Type""], ""application/json"") # Check JSON response_json = json.loads(response.content.decode()) self.assertIn(""doc_id"", response_json) self.assertNotIn(""form"", response_json) self.assertIn(""success"", response_json) self.assertEqual(response_json[""doc_id""], self.doc.id) self.assertTrue(response_json[""success""]) self.check_doc_after_edit() ``` ###Assistant : This tests that a POST request to the edit view edits the document " 1777,"def test_message_hiding(qtbot, view): with qtbot.wait_signal(view._clear_timer.timeout): view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test')) assert not view._messages ",Messages should be hidden after the timer times out.,9,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_message_hiding(qtbot, view): with qtbot.wait_signal(view._clear_timer.timeout): view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test')) assert not view._messages ``` ###Assistant : Messages should be hidden after the timer times out. " 1778,"def test_avatar_allowed_mime_type_per_room(self): self._setup_local_files( { ""good"": {""mimetype"": ""image/png""}, ""bad"": {""mimetype"": ""application/octet-stream""}, } ) room_id = self.helper.create_room_as(tok=self.owner_tok) channel = self.make_request( ""PUT"", f""/rooms/{room_id}/state/m.room.member/{self.owner}"", content={""membership"": ""join"", ""avatar_url"": ""mxc://test/bad""}, access_token=self.owner_tok, ) self.assertEqual(channel.code, 403, channel.result) self.assertEqual( channel.json_body[""errcode""], Codes.FORBIDDEN, channel.json_body ) channel = self.make_request( ""PUT"", f""/rooms/{room_id}/state/m.room.member/{self.owner}"", content={""membership"": ""join"", ""avatar_url"": ""mxc://test/good""}, access_token=self.owner_tok, ) self.assertEqual(channel.code, 200, channel.result) ","Tests that the MIME type whitelist for avatars is enforced when updating a per-room profile. ",15,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_avatar_allowed_mime_type_per_room(self): self._setup_local_files( { ""good"": {""mimetype"": ""image/png""}, ""bad"": {""mimetype"": ""application/octet-stream""}, } ) room_id = self.helper.create_room_as(tok=self.owner_tok) channel = self.make_request( ""PUT"", f""/rooms/{room_id}/state/m.room.member/{self.owner}"", content={""membership"": ""join"", ""avatar_url"": ""mxc://test/bad""}, access_token=self.owner_tok, ) self.assertEqual(channel.code, 403, channel.result) self.assertEqual( channel.json_body[""errcode""], Codes.FORBIDDEN, channel.json_body ) channel = self.make_request( ""PUT"", f""/rooms/{room_id}/state/m.room.member/{self.owner}"", content={""membership"": ""join"", ""avatar_url"": ""mxc://test/good""}, access_token=self.owner_tok, ) self.assertEqual(channel.code, 200, channel.result) ``` ###Assistant : Tests that the MIME type whitelist for avatars is enforced when updating a per-room profile. " 1779,"def container_name(self) -> t.Optional[str]: return self.state.get('container_name') ","Return the stored container name, if any, otherwise None.",9,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def container_name(self) -> t.Optional[str]: return self.state.get('container_name') ``` ###Assistant : Return the stored container name, if any, otherwise None. " 1780,"def get_scorer(scoring): if isinstance(scoring, str): try: scorer = copy.deepcopy(_SCORERS[scoring]) except KeyError: raise ValueError( ""%r is not a valid scoring value. "" ""Use sklearn.metrics.get_scorer_names() "" ""to get valid options."" % scoring ) else: scorer = scoring return scorer ","Get a scorer from string. Read more in the :ref:`User Guide `. :func:`~sklearn.metrics.get_scorer_names` can be used to retrieve the names of all available scorers. Parameters ---------- scoring : str or callable Scoring method as string. If callable it is returned as is. Returns ------- scorer : callable The scorer. Notes ----- When passed a string, this function always returns a copy of the scorer object. Calling `get_scorer` twice for the same scorer results in two separate scorer objects. ",78,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_scorer(scoring): if isinstance(scoring, str): try: scorer = copy.deepcopy(_SCORERS[scoring]) except KeyError: raise ValueError( ""%r is not a valid scoring value. "" ""Use sklearn.metrics.get_scorer_names() "" ""to get valid options."" % scoring ) else: scorer = scoring return scorer ``` ###Assistant : Get a scorer from string. Read more in the :ref:`User Guide `. :func:`~sklearn.metrics.get_scorer_names` can be used to retrieve the names of all available scorers. Parameters ---------- scoring : str or callable Scoring method as string. If callable it is returned as is. Returns ------- scorer : callable The scorer. Notes ----- When passed a string, this function always returns a copy of the scorer object. Calling `get_scorer` twice for the same scorer results in two separate scorer objects. " 1781,"def fit(self, X, y): X, y = self._validate_data( X, y, ensure_min_samples=2, dtype=[np.float64, np.float32] ) self.classes_ = unique_labels(y) n_samples, _ = X.shape n_classes = len(self.classes_) if n_samples == n_classes: raise ValueError( ""The number of samples must be more than the number of classes."" ) if self.priors is None: # estimate priors from sample _, y_t = np.unique(y, return_inverse=True) # non-negative ints self.priors_ = np.bincount(y_t) / float(len(y)) else: self.priors_ = np.asarray(self.priors) if (self.priors_ < 0).any(): raise ValueError(""priors must be non-negative"") if not np.isclose(self.priors_.sum(), 1.0): warnings.warn(""The priors do not sum to 1. Renormalizing"", UserWarning) self.priors_ = self.priors_ / self.priors_.sum() # Maximum number of components no matter what n_components is # specified: max_components = min(len(self.classes_) - 1, X.shape[1]) if self.n_components is None: self._max_components = max_components else: if self.n_components > max_components: raise ValueError( ""n_components cannot be larger than min(n_features, n_classes - 1)."" ) self._max_components = self.n_components if self.solver == ""svd"": if self.shrinkage is not None: raise NotImplementedError(""shrinkage not supported"") if self.covariance_estimator is not None: raise ValueError( ""covariance estimator "" ""is not supported "" ""with svd solver. Try another solver"" ) self._solve_svd(X, y) elif self.solver == ""lsqr"": self._solve_lsqr( X, y, shrinkage=self.shrinkage, covariance_estimator=self.covariance_estimator, ) elif self.solver == ""eigen"": self._solve_eigen( X, y, shrinkage=self.shrinkage, covariance_estimator=self.covariance_estimator, ) else: raise ValueError( ""unknown solver {} (valid solvers are 'svd', "" ""'lsqr', and 'eigen')."".format(self.solver) ) if self.classes_.size == 2: # treat binary case as a special case self.coef_ = np.array( self.coef_[1, :] - self.coef_[0, :], ndmin=2, dtype=X.dtype ) self.intercept_ = np.array( self.intercept_[1] - self.intercept_[0], ndmin=1, dtype=X.dtype ) self._n_features_out = self._max_components return self ","Fit the Linear Discriminant Analysis model. .. versionchanged:: 0.19 *store_covariance* has been moved to main constructor. .. versionchanged:: 0.19 *tol* has been moved to main constructor. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. Returns ------- self : object Fitted estimator. ",52,249,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fit(self, X, y): X, y = self._validate_data( X, y, ensure_min_samples=2, dtype=[np.float64, np.float32] ) self.classes_ = unique_labels(y) n_samples, _ = X.shape n_classes = len(self.classes_) if n_samples == n_classes: raise ValueError( ""The number of samples must be more than the number of classes."" ) if self.priors is None: # estimate priors from sample _, y_t = np.unique(y, return_inverse=True) # non-negative ints self.priors_ = np.bincount(y_t) / float(len(y)) else: self.priors_ = np.asarray(self.priors) if (self.priors_ < 0).any(): raise ValueError(""priors must be non-negative"") if not np.isclose(self.priors_.sum(), 1.0): warnings.warn(""The priors do not sum to 1. Renormalizing"", UserWarning) self.priors_ = self.priors_ / self.priors_.sum() # Maximum number of components no matter what n_components is # specified: max_components = min(len(self.classes_) - 1, X.shape[1]) if self.n_components is None: self._max_components = max_components else: if self.n_components > max_components: raise ValueError( ""n_components cannot be larger than min(n_features, n_classes - 1)."" ) self._max_components = self.n_components if self.solver == ""svd"": if self.shrinkage is not None: raise NotImplementedError(""shrinkage not supported"") if self.covariance_estimator is not None: raise ValueError( ""covariance estimator "" ""is not supported "" ""with svd solver. Try another solver"" ) self._solve_svd(X, y) elif self.solver == ""lsqr"": self._solve_lsqr( X, y, shrinkage=self.shrinkage, covariance_estimator=self.covariance_estimator, ) elif self.solver == ""eigen"": self._solve_eigen( X, y, shrinkage=self.shrinkage, covariance_estimator=self.covariance_estimator, ) else: raise ValueError( ""unknown solver {} (valid solvers are 'svd', "" ""'lsqr', and 'eigen')."".format(self.solver) ) if self.classes_.size == 2: # treat binary case as a special case self.coef_ = np.array( self.coef_[1, :] - self.coef_[0, :], ndmin=2, dtype=X.dtype ) self.intercept_ = np.array( self.intercept_[1] - self.intercept_[0], ndmin=1, dtype=X.dtype ) self._n_features_out = self._max_components return self ``` ###Assistant : Fit the Linear Discriminant Analysis model. .. versionchanged:: 0.19 *store_covariance* has been moved to main constructor. .. versionchanged:: 0.19 *tol* has been moved to main constructor. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. Returns ------- self : object Fitted estimator. " 1782,"def test_pr_opened_with_multiple_reviewers(self) -> None: expected_topic = ""sandbox / PR #6 sample_file: Add sample_file.txt."" expected_message = self.check_webhook( ""pull_request_opened_with_multiple_reviewers"", expected_topic, expected_message ) ","[hypro999](http://139.59.64.214:7990/users/hypro999) opened [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6) from `master` to `master` (assigned to [sougo](http://139.59.64.214:7990/users/sougo), [zura](http://139.59.64.214:7990/users/zura) and [shimura](http://139.59.64.214:7990/users/shimura) for review):\n\n~~~ quote\nAdd a simple text file for further testing purposes.\n~~~",25,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_pr_opened_with_multiple_reviewers(self) -> None: expected_topic = ""sandbox / PR #6 sample_file: Add sample_file.txt."" expected_message = self.check_webhook( ""pull_request_opened_with_multiple_reviewers"", expected_topic, expected_message ) ``` ###Assistant : [hypro999](http://139.59.64.214:7990/users/hypro999) opened [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6) from `master` to `master` (assigned to [sougo](http://139.59.64.214:7990/users/sougo), [zura](http://139.59.64.214:7990/users/zura) and [shimura](http://139.59.64.214:7990/users/shimura) for review):\n\n~~~ quote\nAdd a simple text file for further testing purposes.\n~~~ " 1783,"def make_grouping_by_key(schema, source, default=None): return map_grouping(lambda s: source.get(s, default), schema) "," Create a grouping from a schema by using the schema's scalar values to look up items in the provided source object. :param schema: A grouping of potential keys in source :param source: Dict-like object to use to look up scalar grouping value using scalar grouping values as keys :param default: Default scalar value to use if grouping scalar key is not present in source :return: grouping ",66,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def make_grouping_by_key(schema, source, default=None): return map_grouping(lambda s: source.get(s, default), schema) ``` ###Assistant : Create a grouping from a schema by using the schema's scalar values to look up items in the provided source object. :param schema: A grouping of potential keys in source :param source: Dict-like object to use to look up scalar grouping value using scalar grouping values as keys :param default: Default scalar value to use if grouping scalar key is not present in source :return: grouping " 1784,"def taxicab_distance(self, p): s, p = Point._normalize_dimension(self, Point(p)) return Add(*(abs(a - b) for a, b in zip(s, p))) ","The Taxicab Distance from self to point p. Returns the sum of the horizontal and vertical distances to point p. Parameters ========== p : Point Returns ======= taxicab_distance : The sum of the horizontal and vertical distances to point p. See Also ======== sympy.geometry.point.Point.distance Examples ======== >>> from sympy import Point >>> p1, p2 = Point(1, 1), Point(4, 5) >>> p1.taxicab_distance(p2) 7 ",62,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def taxicab_distance(self, p): s, p = Point._normalize_dimension(self, Point(p)) return Add(*(abs(a - b) for a, b in zip(s, p))) ``` ###Assistant : The Taxicab Distance from self to point p. Returns the sum of the horizontal and vertical distances to point p. Parameters ========== p : Point Returns ======= taxicab_distance : The sum of the horizontal and vertical distances to point p. See Also ======== sympy.geometry.point.Point.distance Examples ======== >>> from sympy import Point >>> p1, p2 = Point(1, 1), Point(4, 5) >>> p1.taxicab_distance(p2) 7 " 1785,"def cosine_similarity(y_true, y_pred, axis=-1): y_true = tf.linalg.l2_normalize(y_true, axis=axis) y_pred = tf.linalg.l2_normalize(y_pred, axis=axis) return tf.reduce_sum(y_true * y_pred, axis=axis) ","Computes the cosine similarity between labels and predictions. Args: y_true: The ground truth values. y_pred: The prediction values. axis: (Optional) Defaults to -1. The dimension along which the cosine similarity is computed. Returns: Cosine similarity value. ",36,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cosine_similarity(y_true, y_pred, axis=-1): y_true = tf.linalg.l2_normalize(y_true, axis=axis) y_pred = tf.linalg.l2_normalize(y_pred, axis=axis) return tf.reduce_sum(y_true * y_pred, axis=axis) ``` ###Assistant : Computes the cosine similarity between labels and predictions. Args: y_true: The ground truth values. y_pred: The prediction values. axis: (Optional) Defaults to -1. The dimension along which the cosine similarity is computed. Returns: Cosine similarity value. " 1786,"def rotate(self, theta): a = math.cos(theta) b = math.sin(theta) mtx = self._mtx # Operating and assigning one scalar at a time is much faster. (xx, xy, x0), (yx, yy, y0), _ = mtx.tolist() # mtx = [[a -b 0], [b a 0], [0 0 1]] * mtx mtx[0, 0] = a * xx - b * yx mtx[0, 1] = a * xy - b * yy mtx[0, 2] = a * x0 - b * y0 mtx[1, 0] = b * xx + a * yx mtx[1, 1] = b * xy + a * yy mtx[1, 2] = b * x0 + a * y0 self.invalidate() return self "," Add a rotation (in radians) to this transform in place. Returns *self*, so this method can easily be chained with more calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate` and :meth:`scale`. ",28,110,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rotate(self, theta): a = math.cos(theta) b = math.sin(theta) mtx = self._mtx # Operating and assigning one scalar at a time is much faster. (xx, xy, x0), (yx, yy, y0), _ = mtx.tolist() # mtx = [[a -b 0], [b a 0], [0 0 1]] * mtx mtx[0, 0] = a * xx - b * yx mtx[0, 1] = a * xy - b * yy mtx[0, 2] = a * x0 - b * y0 mtx[1, 0] = b * xx + a * yx mtx[1, 1] = b * xy + a * yy mtx[1, 2] = b * x0 + a * y0 self.invalidate() return self ``` ###Assistant : Add a rotation (in radians) to this transform in place. Returns *self*, so this method can easily be chained with more calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate` and :meth:`scale`. " 1787,"def call_load(self, other_args): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog=""load"", description=, ) parser.add_argument( ""-c"", ""--coin"", help=""Coin to get. Must be coin symbol (e.g., btc, eth)"", dest=""coin"", type=str, required=""-h"" not in other_args, ) parser.add_argument( ""-s"", ""--start"", type=valid_date, default=(datetime.now() - timedelta(days=1100)).strftime(""%Y-%m-%d""), dest=""start"", help=""The starting date (format YYYY-MM-DD) of the crypto"", ) parser.add_argument( ""--exchange"", help=""Exchange to search"", dest=""exchange"", type=str, default=""binance"", choices=self.exchanges, ) parser.add_argument( ""-e"", ""--end"", type=valid_date, default=datetime.now().strftime(""%Y-%m-%d""), dest=""end"", help=""The ending date (format YYYY-MM-DD) of the crypto"", ) parser.add_argument( ""-i"", ""--interval"", action=""store"", dest=""interval"", type=str, default=""1440"", choices=[""1"", ""5"", ""15"", ""30"", ""60"", ""240"", ""1440"", ""10080"", ""43200""], help=""The interval of the crypto"", ) parser.add_argument( ""--vs"", help=""Quote currency (what to view coin vs). e.g., usdc, usdt, ... if source is ccxt, usd, eur, ... otherwise"", # noqa dest=""vs"", default=""usdt"", type=str, ) if other_args and ""-"" not in other_args[0][0]: other_args.insert(0, ""-c"") ns_parser = self.parse_known_args_and_warn( parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: if ns_parser.source in (""YahooFinance"", ""CoinGecko""): if ns_parser.vs == ""usdt"": ns_parser.vs = ""usd"" (self.current_df) = cryptocurrency_helpers.load( symbol=ns_parser.coin.lower(), vs_currency=ns_parser.vs, end_date=ns_parser.end.strftime(""%Y-%m-%d""), start_date=ns_parser.start.strftime(""%Y-%m-%d""), interval=ns_parser.interval, source=ns_parser.source, exchange=ns_parser.exchange, ) if not self.current_df.empty: self.vs = ns_parser.vs self.exchange = ns_parser.exchange self.source = ns_parser.source self.current_interval = ns_parser.interval self.current_currency = ns_parser.vs self.symbol = ns_parser.coin.lower() cryptocurrency_helpers.show_quick_performance( self.current_df, self.symbol, self.current_currency, ns_parser.source, ns_parser.exchange, self.current_interval, ) export_data( ns_parser.export, os.path.dirname(os.path.abspath(__file__)), ""load"", self.current_df.copy(), ) ","Process load command.Load crypto currency to perform analysis on. Yahoo Finance is used as default source. Other sources can be used such as 'ccxt' or 'cg' with --source. If you select 'ccxt', you can then select any exchange with --exchange. You can also select a specific interval with --interval.",49,198,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call_load(self, other_args): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog=""load"", description=, ) parser.add_argument( ""-c"", ""--coin"", help=""Coin to get. Must be coin symbol (e.g., btc, eth)"", dest=""coin"", type=str, required=""-h"" not in other_args, ) parser.add_argument( ""-s"", ""--start"", type=valid_date, default=(datetime.now() - timedelta(days=1100)).strftime(""%Y-%m-%d""), dest=""start"", help=""The starting date (format YYYY-MM-DD) of the crypto"", ) parser.add_argument( ""--exchange"", help=""Exchange to search"", dest=""exchange"", type=str, default=""binance"", choices=self.exchanges, ) parser.add_argument( ""-e"", ""--end"", type=valid_date, default=datetime.now().strftime(""%Y-%m-%d""), dest=""end"", help=""The ending date (format YYYY-MM-DD) of the crypto"", ) parser.add_argument( ""-i"", ""--interval"", action=""store"", dest=""interval"", type=str, default=""1440"", choices=[""1"", ""5"", ""15"", ""30"", ""60"", ""240"", ""1440"", ""10080"", ""43200""], help=""The interval of the crypto"", ) parser.add_argument( ""--vs"", help=""Quote currency (what to view coin vs). e.g., usdc, usdt, ... if source is ccxt, usd, eur, ... otherwise"", # noqa dest=""vs"", default=""usdt"", type=str, ) if other_args and ""-"" not in other_args[0][0]: other_args.insert(0, ""-c"") ns_parser = self.parse_known_args_and_warn( parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: if ns_parser.source in (""YahooFinance"", ""CoinGecko""): if ns_parser.vs == ""usdt"": ns_parser.vs = ""usd"" (self.current_df) = cryptocurrency_helpers.load( symbol=ns_parser.coin.lower(), vs_currency=ns_parser.vs, end_date=ns_parser.end.strftime(""%Y-%m-%d""), start_date=ns_parser.start.strftime(""%Y-%m-%d""), interval=ns_parser.interval, source=ns_parser.source, exchange=ns_parser.exchange, ) if not self.current_df.empty: self.vs = ns_parser.vs self.exchange = ns_parser.exchange self.source = ns_parser.source self.current_interval = ns_parser.interval self.current_currency = ns_parser.vs self.symbol = ns_parser.coin.lower() cryptocurrency_helpers.show_quick_performance( self.current_df, self.symbol, self.current_currency, ns_parser.source, ns_parser.exchange, self.current_interval, ) export_data( ns_parser.export, os.path.dirname(os.path.abspath(__file__)), ""load"", self.current_df.copy(), ) ``` ###Assistant : Process load command.Load crypto currency to perform analysis on. Yahoo Finance is used as default source. Other sources can be used such as 'ccxt' or 'cg' with --source. If you select 'ccxt', you can then select any exchange with --exchange. You can also select a specific interval with --interval. " 1788,"def render_markdown(value): schemes = '|'.join(get_config().ALLOWED_URL_SCHEMES) # Strip HTML tags value = strip_tags(value) # Sanitize Markdown links pattern = fr'\[([^\]]+)\]\((?!({schemes})).*:(.+)\)' value = re.sub(pattern, '[\\1](\\3)', value, flags=re.IGNORECASE) # Sanitize Markdown reference links pattern = fr'\[(.+)\]:\s*(?!({schemes}))\w*:(.+)' value = re.sub(pattern, '[\\1]: \\3', value, flags=re.IGNORECASE) # Render Markdown html = markdown(value, extensions=['fenced_code', 'tables', StrikethroughExtension()]) # If the string is not empty wrap it in rendered-markdown to style tables if html: html = f'
    {html}
    ' return mark_safe(html) @register.filter('json')"," Render a string as Markdown. This filter is invoked as ""markdown"": {{ md_source_text|markdown }} ",14,72,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def render_markdown(value): schemes = '|'.join(get_config().ALLOWED_URL_SCHEMES) # Strip HTML tags value = strip_tags(value) # Sanitize Markdown links pattern = fr'\[([^\]]+)\]\((?!({schemes})).*:(.+)\)' value = re.sub(pattern, '[\\1](\\3)', value, flags=re.IGNORECASE) # Sanitize Markdown reference links pattern = fr'\[(.+)\]:\s*(?!({schemes}))\w*:(.+)' value = re.sub(pattern, '[\\1]: \\3', value, flags=re.IGNORECASE) # Render Markdown html = markdown(value, extensions=['fenced_code', 'tables', StrikethroughExtension()]) # If the string is not empty wrap it in rendered-markdown to style tables if html: html = f'
    {html}
    ' return mark_safe(html) @register.filter('json') ``` ###Assistant : Render a string as Markdown. This filter is invoked as ""markdown"": {{ md_source_text|markdown }} " 1789,"def _try_breadth_first(tasks, user): tasks = tasks.annotate(annotations_count=Count('annotations')) max_annotations_count = tasks.aggregate(Max('annotations_count'))['annotations_count__max'] if max_annotations_count == 0: # there is no any labeled tasks found return # find any task with maximal amount of created annotations not_solved_tasks_labeling_started = tasks.annotate( reach_max_annotations_count=Case( When(annotations_count=max_annotations_count, then=Value(True)), default=Value(False), output_field=BooleanField(), ) ) not_solved_tasks_labeling_with_max_annotations = not_solved_tasks_labeling_started.filter( reach_max_annotations_count=True ) if not_solved_tasks_labeling_with_max_annotations.exists(): # try to complete tasks that are already in progress return _get_random_unlocked(not_solved_tasks_labeling_with_max_annotations, user) ","Try to find tasks with maximum amount of annotations, since we are trying to label tasks as fast as possible ",20,62,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _try_breadth_first(tasks, user): tasks = tasks.annotate(annotations_count=Count('annotations')) max_annotations_count = tasks.aggregate(Max('annotations_count'))['annotations_count__max'] if max_annotations_count == 0: # there is no any labeled tasks found return # find any task with maximal amount of created annotations not_solved_tasks_labeling_started = tasks.annotate( reach_max_annotations_count=Case( When(annotations_count=max_annotations_count, then=Value(True)), default=Value(False), output_field=BooleanField(), ) ) not_solved_tasks_labeling_with_max_annotations = not_solved_tasks_labeling_started.filter( reach_max_annotations_count=True ) if not_solved_tasks_labeling_with_max_annotations.exists(): # try to complete tasks that are already in progress return _get_random_unlocked(not_solved_tasks_labeling_with_max_annotations, user) ``` ###Assistant : Try to find tasks with maximum amount of annotations, since we are trying to label tasks as fast as possible " 1790,"def test_pad_batch_dynamic_max(self): view_requirements = { ""state_in_0"": ViewRequirement( ""state_out_0"", shift=[-1], used_for_training=False, used_for_compute_actions=True, batch_repeat_value=1, ) } max_seq_len = 20 num_seqs = np.random.randint(1, 20) seq_lens = np.random.randint(1, max_seq_len, size=(num_seqs)) max_len = np.max(seq_lens) sum_seq_lens = np.sum(seq_lens) s1 = SampleBatch( { ""a"": np.arange(sum_seq_lens), ""b"": np.arange(sum_seq_lens), ""seq_lens"": seq_lens, ""state_in_0"": [[0]] * num_seqs, }, _max_seq_len=max_seq_len, ) pad_batch_to_sequences_of_same_size( s1, max_seq_len=max_seq_len, feature_keys=[""a"", ""b""], view_requirements=view_requirements, ) check(s1.max_seq_len, max_len) check(s1[""a""].shape[0], max_len * num_seqs) check(s1[""b""].shape[0], max_len * num_seqs) ",Test pad_batch_to_sequences_of_same_size when dynamic_max = True,6,66,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_pad_batch_dynamic_max(self): view_requirements = { ""state_in_0"": ViewRequirement( ""state_out_0"", shift=[-1], used_for_training=False, used_for_compute_actions=True, batch_repeat_value=1, ) } max_seq_len = 20 num_seqs = np.random.randint(1, 20) seq_lens = np.random.randint(1, max_seq_len, size=(num_seqs)) max_len = np.max(seq_lens) sum_seq_lens = np.sum(seq_lens) s1 = SampleBatch( { ""a"": np.arange(sum_seq_lens), ""b"": np.arange(sum_seq_lens), ""seq_lens"": seq_lens, ""state_in_0"": [[0]] * num_seqs, }, _max_seq_len=max_seq_len, ) pad_batch_to_sequences_of_same_size( s1, max_seq_len=max_seq_len, feature_keys=[""a"", ""b""], view_requirements=view_requirements, ) check(s1.max_seq_len, max_len) check(s1[""a""].shape[0], max_len * num_seqs) check(s1[""b""].shape[0], max_len * num_seqs) ``` ###Assistant : Test pad_batch_to_sequences_of_same_size when dynamic_max = True " 1791,"def sensors_fans(): ret = collections.defaultdict(list) basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*') if not basenames: # CentOS has an intermediate /device directory: # https://github.com/giampaolo/psutil/issues/971 basenames = glob.glob('/sys/class/hwmon/hwmon*/device/fan*_*') basenames = sorted(set([x.split('_')[0] for x in basenames])) for base in basenames: try: current = int(bcat(base + '_input')) except (IOError, OSError) as err: debug(err) continue unit_name = cat(os.path.join(os.path.dirname(base), 'name')) label = cat(base + '_label', fallback='') ret[unit_name].append(_common.sfan(label, current)) return dict(ret) ","Return hardware fans info (for CPU and other peripherals) as a dict including hardware label and current speed. Implementation notes: - /sys/class/hwmon looks like the most recent interface to retrieve this info, and this implementation relies on it only (old distros will probably use something else) - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon ",54,61,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def sensors_fans(): ret = collections.defaultdict(list) basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*') if not basenames: # CentOS has an intermediate /device directory: # https://github.com/giampaolo/psutil/issues/971 basenames = glob.glob('/sys/class/hwmon/hwmon*/device/fan*_*') basenames = sorted(set([x.split('_')[0] for x in basenames])) for base in basenames: try: current = int(bcat(base + '_input')) except (IOError, OSError) as err: debug(err) continue unit_name = cat(os.path.join(os.path.dirname(base), 'name')) label = cat(base + '_label', fallback='') ret[unit_name].append(_common.sfan(label, current)) return dict(ret) ``` ###Assistant : Return hardware fans info (for CPU and other peripherals) as a dict including hardware label and current speed. Implementation notes: - /sys/class/hwmon looks like the most recent interface to retrieve this info, and this implementation relies on it only (old distros will probably use something else) - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon " 1792,"def factory(cls, loader): cls.__check_eager_loader(loader) return lambda *args, **kwargs: cls(loader(*args, **kwargs)) ",Construct a callable which returns the eager loader made lazy.,10,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def factory(cls, loader): cls.__check_eager_loader(loader) return lambda *args, **kwargs: cls(loader(*args, **kwargs)) ``` ###Assistant : Construct a callable which returns the eager loader made lazy. " 1793,"def test_table_block_caption_render(self): value = { ""table_caption"": ""caption"", ""first_row_is_table_header"": False, ""first_col_is_header"": False, ""data"": [ [""Test 1"", ""Test 2"", ""Test 3""], [None, None, None], [None, None, None], ], } block = TableBlock() result = block.render(value) expected = self.assertHTMLEqual(result, expected) self.assertIn(""Test 2"", result) "," Test a generic render with caption.
    caption
    Test 1Test 2Test 3
    ",17,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_table_block_caption_render(self): value = { ""table_caption"": ""caption"", ""first_row_is_table_header"": False, ""first_col_is_header"": False, ""data"": [ [""Test 1"", ""Test 2"", ""Test 3""], [None, None, None], [None, None, None], ], } block = TableBlock() result = block.render(value) expected = self.assertHTMLEqual(result, expected) self.assertIn(""Test 2"", result) ``` ###Assistant : Test a generic render with caption.
    caption
    Test 1Test 2Test 3
    " 1794,"def stream_config_without_start_date(): return { ""client_id"": ""fake_client_id"", ""client_secret"": ""fake_client_secret"", ""refresh_token"": ""fake_refresh_token"", ""is_sandbox"": False, ""wait_timeout"": 15, } ",Generates streams settings for REST logic without start_date,8,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def stream_config_without_start_date(): return { ""client_id"": ""fake_client_id"", ""client_secret"": ""fake_client_secret"", ""refresh_token"": ""fake_refresh_token"", ""is_sandbox"": False, ""wait_timeout"": 15, } ``` ###Assistant : Generates streams settings for REST logic without start_date " 1795,"async def relay(self): while True: message = await self.queue.get() try: await self.send(message) self.queue.task_done() except RuntimeError: # The connection was closed, just exit the task return "," Relay messages from the channel's queue and send them out. This is started as a task. ",16,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def relay(self): while True: message = await self.queue.get() try: await self.send(message) self.queue.task_done() except RuntimeError: # The connection was closed, just exit the task return ``` ###Assistant : Relay messages from the channel's queue and send them out. This is started as a task. " 1796,"def q_sample(self, x_start, t, noise=None): if noise is None: # noise = th.randn_like(x_start) noise = paddle.randn(x_start.shape, x_start.dtype) assert noise.shape == x_start.shape return (_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) "," Diffuse the data for a given number of diffusion steps. In other words, sample from q(x_t | x_0). :param x_start: the initial data batch. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :param noise: if specified, the split-out normal noise. :return: A noisy version of x_start. ",52,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def q_sample(self, x_start, t, noise=None): if noise is None: # noise = th.randn_like(x_start) noise = paddle.randn(x_start.shape, x_start.dtype) assert noise.shape == x_start.shape return (_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) ``` ###Assistant : Diffuse the data for a given number of diffusion steps. In other words, sample from q(x_t | x_0). :param x_start: the initial data batch. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :param noise: if specified, the split-out normal noise. :return: A noisy version of x_start. " 1797,"def get_views(self): query = f""SELECT * FROM information_schema.views WHERE table_schema NOT IN ('information_schema', 'pg_catalog')"" result = self.run_native_query(query) return result "," List all views in PostgreSQL without the system views information_schema and pg_catalog ",12,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_views(self): query = f""SELECT * FROM information_schema.views WHERE table_schema NOT IN ('information_schema', 'pg_catalog')"" result = self.run_native_query(query) return result ``` ###Assistant : List all views in PostgreSQL without the system views information_schema and pg_catalog " 1798,"def verify_ogr_field(self, ogr_field, model_field): if isinstance(ogr_field, OFTString) and isinstance( model_field, (models.CharField, models.TextField) ): if self.encoding and ogr_field.value is not None: # The encoding for OGR data sources may be specified here # (e.g., 'cp437' for Census Bureau boundary files). val = force_str(ogr_field.value, self.encoding) else: val = ogr_field.value if ( model_field.max_length and val is not None and len(val) > model_field.max_length ): raise InvalidString( ""%s model field maximum string length is %s, given %s characters."" % (model_field.name, model_field.max_length, len(val)) ) elif isinstance(ogr_field, OFTReal) and isinstance( model_field, models.DecimalField ): try: # Creating an instance of the Decimal value to use. d = Decimal(str(ogr_field.value)) except DecimalInvalidOperation: raise InvalidDecimal( ""Could not construct decimal from: %s"" % ogr_field.value ) # Getting the decimal value as a tuple. dtup = d.as_tuple() digits = dtup[1] d_idx = dtup[2] # index where the decimal is # Maximum amount of precision, or digits to the left of the decimal. max_prec = model_field.max_digits - model_field.decimal_places # Getting the digits to the left of the decimal place for the # given decimal. if d_idx < 0: n_prec = len(digits[:d_idx]) else: n_prec = len(digits) + d_idx # If we have more than the maximum digits allowed, then throw an # InvalidDecimal exception. if n_prec > max_prec: raise InvalidDecimal( ""A DecimalField with max_digits %d, decimal_places %d must "" ""round to an absolute value less than 10^%d."" % (model_field.max_digits, model_field.decimal_places, max_prec) ) val = d elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance( model_field, models.IntegerField ): # Attempt to convert any OFTReal and OFTString value to an OFTInteger. try: val = int(ogr_field.value) except ValueError: raise InvalidInteger( ""Could not construct integer from: %s"" % ogr_field.value ) else: val = ogr_field.value return val "," Verify if the OGR Field contents are acceptable to the model field. If they are, return the verified value, otherwise raise an exception. ",23,274,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def verify_ogr_field(self, ogr_field, model_field): if isinstance(ogr_field, OFTString) and isinstance( model_field, (models.CharField, models.TextField) ): if self.encoding and ogr_field.value is not None: # The encoding for OGR data sources may be specified here # (e.g., 'cp437' for Census Bureau boundary files). val = force_str(ogr_field.value, self.encoding) else: val = ogr_field.value if ( model_field.max_length and val is not None and len(val) > model_field.max_length ): raise InvalidString( ""%s model field maximum string length is %s, given %s characters."" % (model_field.name, model_field.max_length, len(val)) ) elif isinstance(ogr_field, OFTReal) and isinstance( model_field, models.DecimalField ): try: # Creating an instance of the Decimal value to use. d = Decimal(str(ogr_field.value)) except DecimalInvalidOperation: raise InvalidDecimal( ""Could not construct decimal from: %s"" % ogr_field.value ) # Getting the decimal value as a tuple. dtup = d.as_tuple() digits = dtup[1] d_idx = dtup[2] # index where the decimal is # Maximum amount of precision, or digits to the left of the decimal. max_prec = model_field.max_digits - model_field.decimal_places # Getting the digits to the left of the decimal place for the # given decimal. if d_idx < 0: n_prec = len(digits[:d_idx]) else: n_prec = len(digits) + d_idx # If we have more than the maximum digits allowed, then throw an # InvalidDecimal exception. if n_prec > max_prec: raise InvalidDecimal( ""A DecimalField with max_digits %d, decimal_places %d must "" ""round to an absolute value less than 10^%d."" % (model_field.max_digits, model_field.decimal_places, max_prec) ) val = d elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance( model_field, models.IntegerField ): # Attempt to convert any OFTReal and OFTString value to an OFTInteger. try: val = int(ogr_field.value) except ValueError: raise InvalidInteger( ""Could not construct integer from: %s"" % ogr_field.value ) else: val = ogr_field.value return val ``` ###Assistant : Verify if the OGR Field contents are acceptable to the model field. If they are, return the verified value, otherwise raise an exception. " 1799,"def get_streamer(): if 'JINA_STREAMER_ARGS' in os.environ: args_dict = json.loads(os.environ['JINA_STREAMER_ARGS']) return GatewayStreamer(**args_dict) else: raise OSError('JINA_STREAMER_ARGS environment variable is not set') "," Return a streamer object based on the current environment context. The streamer object is contructed using runtime arguments stored in the `JINA_STREAMER_ARGS` environment variable. If this method is used outside a Jina context (process not controlled/orchestrated by jina), this method will raise an error. The streamer object does not have tracing/instrumentation capabilities. :return: Returns an instance of `GatewayStreamer` ",58,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_streamer(): if 'JINA_STREAMER_ARGS' in os.environ: args_dict = json.loads(os.environ['JINA_STREAMER_ARGS']) return GatewayStreamer(**args_dict) else: raise OSError('JINA_STREAMER_ARGS environment variable is not set') ``` ###Assistant : Return a streamer object based on the current environment context. The streamer object is contructed using runtime arguments stored in the `JINA_STREAMER_ARGS` environment variable. If this method is used outside a Jina context (process not controlled/orchestrated by jina), this method will raise an error. The streamer object does not have tracing/instrumentation capabilities. :return: Returns an instance of `GatewayStreamer` " 1800,"def enter_positions(self) -> int: trades_created = 0 whitelist = copy.deepcopy(self.active_pair_whitelist) if not whitelist: logger.info(""Active pair whitelist is empty."") return trades_created # Remove pairs for currently opened trades from the whitelist for trade in Trade.get_open_trades(): if trade.pair in whitelist: whitelist.remove(trade.pair) logger.debug('Ignoring %s in pair whitelist', trade.pair) if not whitelist: logger.info(""No currency pair in active pair whitelist, "" ""but checking to exit open trades."") return trades_created if PairLocks.is_global_lock(side='*'): # This only checks for total locks (both sides). # per-side locks will be evaluated by `is_pair_locked` within create_trade, # once the direction for the trade is clear. lock = PairLocks.get_pair_longest_lock('*') if lock: self.log_once(f""Global pairlock active until "" f""{lock.lock_end_time.strftime(constants.DATETIME_PRINT_FORMAT)}. "" f""Not creating new trades, reason: {lock.reason}."", logger.info) else: self.log_once(""Global pairlock active. Not creating new trades."", logger.info) return trades_created # Create entity and execute trade for each pair from whitelist for pair in whitelist: try: trades_created += self.create_trade(pair) except DependencyException as exception: logger.warning('Unable to create trade for %s: %s', pair, exception) if not trades_created: logger.debug(""Found no enter signals for whitelisted currencies. Trying again..."") return trades_created "," Tries to execute entry orders for new trades (positions) ",9,170,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def enter_positions(self) -> int: trades_created = 0 whitelist = copy.deepcopy(self.active_pair_whitelist) if not whitelist: logger.info(""Active pair whitelist is empty."") return trades_created # Remove pairs for currently opened trades from the whitelist for trade in Trade.get_open_trades(): if trade.pair in whitelist: whitelist.remove(trade.pair) logger.debug('Ignoring %s in pair whitelist', trade.pair) if not whitelist: logger.info(""No currency pair in active pair whitelist, "" ""but checking to exit open trades."") return trades_created if PairLocks.is_global_lock(side='*'): # This only checks for total locks (both sides). # per-side locks will be evaluated by `is_pair_locked` within create_trade, # once the direction for the trade is clear. lock = PairLocks.get_pair_longest_lock('*') if lock: self.log_once(f""Global pairlock active until "" f""{lock.lock_end_time.strftime(constants.DATETIME_PRINT_FORMAT)}. "" f""Not creating new trades, reason: {lock.reason}."", logger.info) else: self.log_once(""Global pairlock active. Not creating new trades."", logger.info) return trades_created # Create entity and execute trade for each pair from whitelist for pair in whitelist: try: trades_created += self.create_trade(pair) except DependencyException as exception: logger.warning('Unable to create trade for %s: %s', pair, exception) if not trades_created: logger.debug(""Found no enter signals for whitelisted currencies. Trying again..."") return trades_created ``` ###Assistant : Tries to execute entry orders for new trades (positions) " 1801,"def validate_js_path(registered_paths, package_name, path_in_package_dist): if package_name not in registered_paths: raise exceptions.DependencyException( f ) if path_in_package_dist not in registered_paths[package_name]: raise exceptions.DependencyException( f ) "," Error loading dependency. ""{package_name}"" is not a registered library. Registered libraries are: {list(registered_paths.keys())} ""{package_name}"" is registered but the path requested is not valid. The path requested: ""{path_in_package_dist}"" List of registered paths: {registered_paths} ",32,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def validate_js_path(registered_paths, package_name, path_in_package_dist): if package_name not in registered_paths: raise exceptions.DependencyException( f ) if path_in_package_dist not in registered_paths[package_name]: raise exceptions.DependencyException( f ) ``` ###Assistant : Error loading dependency. ""{package_name}"" is not a registered library. Registered libraries are: {list(registered_paths.keys())} ""{package_name}"" is registered but the path requested is not valid. The path requested: ""{path_in_package_dist}"" List of registered paths: {registered_paths} " 1802,"def get_all_node_ids() -> List[Tuple[str, str]]: node_ids = [] # Sort on NodeID to ensure the ordering is deterministic across the cluster. for node in sorted(ray.nodes(), key=lambda entry: entry[""NodeID""]): # print(node) if node[""Alive""]: node_ids.append((node[""NodeID""], node[""NodeName""])) return node_ids ","Get IDs for all live nodes in the cluster. Returns a list of (node_id: str, ip_address: str). The node_id can be passed into the Ray SchedulingPolicy API. ",27,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_all_node_ids() -> List[Tuple[str, str]]: node_ids = [] # Sort on NodeID to ensure the ordering is deterministic across the cluster. for node in sorted(ray.nodes(), key=lambda entry: entry[""NodeID""]): # print(node) if node[""Alive""]: node_ids.append((node[""NodeID""], node[""NodeName""])) return node_ids ``` ###Assistant : Get IDs for all live nodes in the cluster. Returns a list of (node_id: str, ip_address: str). The node_id can be passed into the Ray SchedulingPolicy API. " 1803,"def _ignore_comments(self, block): comment_spans = False while True: comment_start = block.find(b""#"") # look for next comment if comment_start == -1: # no comment found break comment_end = self._find_comment_end(block, comment_start) if comment_end != -1: # comment ends in this block block = ( block[:comment_start] + block[comment_end + 1 :] ) # delete comment else: # last comment continues to next block(s) block = block[:comment_start] comment_spans = True break return block, comment_spans "," Deletes comments from block. If comment does not end in this block, raises a flag. ",15,70,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _ignore_comments(self, block): comment_spans = False while True: comment_start = block.find(b""#"") # look for next comment if comment_start == -1: # no comment found break comment_end = self._find_comment_end(block, comment_start) if comment_end != -1: # comment ends in this block block = ( block[:comment_start] + block[comment_end + 1 :] ) # delete comment else: # last comment continues to next block(s) block = block[:comment_start] comment_spans = True break return block, comment_spans ``` ###Assistant : Deletes comments from block. If comment does not end in this block, raises a flag. " 1804,"def test_torch_auto_gpu_to_cpu(ray_start_4_cpus_2_gpus): num_workers = 2 assert os.environ[""CUDA_VISIBLE_DEVICES""] == """" ",Tests if GPU tensors are auto converted to CPU on driver.,11,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_torch_auto_gpu_to_cpu(ray_start_4_cpus_2_gpus): num_workers = 2 assert os.environ[""CUDA_VISIBLE_DEVICES""] == """" ``` ###Assistant : Tests if GPU tensors are auto converted to CPU on driver. " 1805,"def formatyear(self, theyear, width=3): v = [] a = v.append width = max(width, 1) a('' % self.cssclass_year) a('\n') a('' % ( width, self.cssclass_year_head, theyear)) for i in range(January, January+12, width): # months in this row months = range(i, min(i+width, 13)) a('') for m in months: a('') a('') a('
    %s
    ') a(self.formatmonth(theyear, m, withyear=False)) a('
    ') return ''.join(v) "," Return a formatted year as a table of tables. ",9,60,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def formatyear(self, theyear, width=3): v = [] a = v.append width = max(width, 1) a('' % self.cssclass_year) a('\n') a('' % ( width, self.cssclass_year_head, theyear)) for i in range(January, January+12, width): # months in this row months = range(i, min(i+width, 13)) a('') for m in months: a('') a('') a('
    %s
    ') a(self.formatmonth(theyear, m, withyear=False)) a('
    ') return ''.join(v) ``` ###Assistant : Return a formatted year as a table of tables. " 1806,"def predict(self, inputs): training = False user = inputs[""user""] input_seq = inputs[""input_seq""] candidate = inputs[""candidate""] mask = tf.expand_dims(tf.cast(tf.not_equal(input_seq, 0), tf.float32), -1) seq_embeddings, positional_embeddings = self.embedding(input_seq) # (1, s, h) u0_latent = self.user_embedding_layer(user) u0_latent = u0_latent * (self.user_embedding_dim ** 0.5) # (1, 1, h) u0_latent = tf.squeeze(u0_latent, axis=0) # (1, h) test_user_emb = tf.tile(u0_latent, [1 + self.num_neg_test, 1]) # (101, h) u_latent = self.user_embedding_layer(user) u_latent = u_latent * (self.user_embedding_dim ** 0.5) # (b, 1, h) u_latent = tf.tile(u_latent, [1, tf.shape(input_seq)[1], 1]) # (b, s, h) seq_embeddings = tf.reshape( tf.concat([seq_embeddings, u_latent], 2), [tf.shape(input_seq)[0], -1, self.hidden_units], ) seq_embeddings += positional_embeddings # (b, s, h1 + h2) seq_embeddings *= mask seq_attention = seq_embeddings seq_attention = self.encoder(seq_attention, training, mask) seq_attention = self.layer_normalization(seq_attention) # (b, s, h1+h2) seq_emb = tf.reshape( seq_attention, [tf.shape(input_seq)[0] * self.seq_max_len, self.hidden_units], ) # (b*s1, h1+h2) candidate_emb = self.item_embedding_layer(candidate) # (b, s2, h2) candidate_emb = tf.squeeze(candidate_emb, axis=0) # (s2, h2) candidate_emb = tf.reshape( tf.concat([candidate_emb, test_user_emb], 1), [-1, self.hidden_units] ) # (b*s2, h1+h2) candidate_emb = tf.transpose(candidate_emb, perm=[1, 0]) # (h1+h2, b*s2) test_logits = tf.matmul(seq_emb, candidate_emb) # (b*s1, b*s2) test_logits = tf.reshape( test_logits, [tf.shape(input_seq)[0], self.seq_max_len, 1 + self.num_neg_test], ) # (1, s, 101) test_logits = test_logits[:, -1, :] # (1, 101) return test_logits "," Model prediction for candidate (negative) items ",6,198,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def predict(self, inputs): training = False user = inputs[""user""] input_seq = inputs[""input_seq""] candidate = inputs[""candidate""] mask = tf.expand_dims(tf.cast(tf.not_equal(input_seq, 0), tf.float32), -1) seq_embeddings, positional_embeddings = self.embedding(input_seq) # (1, s, h) u0_latent = self.user_embedding_layer(user) u0_latent = u0_latent * (self.user_embedding_dim ** 0.5) # (1, 1, h) u0_latent = tf.squeeze(u0_latent, axis=0) # (1, h) test_user_emb = tf.tile(u0_latent, [1 + self.num_neg_test, 1]) # (101, h) u_latent = self.user_embedding_layer(user) u_latent = u_latent * (self.user_embedding_dim ** 0.5) # (b, 1, h) u_latent = tf.tile(u_latent, [1, tf.shape(input_seq)[1], 1]) # (b, s, h) seq_embeddings = tf.reshape( tf.concat([seq_embeddings, u_latent], 2), [tf.shape(input_seq)[0], -1, self.hidden_units], ) seq_embeddings += positional_embeddings # (b, s, h1 + h2) seq_embeddings *= mask seq_attention = seq_embeddings seq_attention = self.encoder(seq_attention, training, mask) seq_attention = self.layer_normalization(seq_attention) # (b, s, h1+h2) seq_emb = tf.reshape( seq_attention, [tf.shape(input_seq)[0] * self.seq_max_len, self.hidden_units], ) # (b*s1, h1+h2) candidate_emb = self.item_embedding_layer(candidate) # (b, s2, h2) candidate_emb = tf.squeeze(candidate_emb, axis=0) # (s2, h2) candidate_emb = tf.reshape( tf.concat([candidate_emb, test_user_emb], 1), [-1, self.hidden_units] ) # (b*s2, h1+h2) candidate_emb = tf.transpose(candidate_emb, perm=[1, 0]) # (h1+h2, b*s2) test_logits = tf.matmul(seq_emb, candidate_emb) # (b*s1, b*s2) test_logits = tf.reshape( test_logits, [tf.shape(input_seq)[0], self.seq_max_len, 1 + self.num_neg_test], ) # (1, s, 101) test_logits = test_logits[:, -1, :] # (1, 101) return test_logits ``` ###Assistant : Model prediction for candidate (negative) items " 1807,"def tobitmap(self, name=""image""): self.load() if self.mode != ""1"": msg = ""not a bitmap"" raise ValueError(msg) data = self.tobytes(""xbm"") return b"""".join( [ f""#define {name}_width {self.size[0]}\n"".encode(""ascii""), f""#define {name}_height {self.size[1]}\n"".encode(""ascii""), f""static char {name}_bits[] = {{\n"".encode(""ascii""), data, b""};"", ] ) "," Returns the image converted to an X11 bitmap. .. note:: This method only works for mode ""1"" images. :param name: The name prefix to use for the bitmap variables. :returns: A string containing an X11 bitmap. :raises ValueError: If the mode is not ""1"" ",44,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def tobitmap(self, name=""image""): self.load() if self.mode != ""1"": msg = ""not a bitmap"" raise ValueError(msg) data = self.tobytes(""xbm"") return b"""".join( [ f""#define {name}_width {self.size[0]}\n"".encode(""ascii""), f""#define {name}_height {self.size[1]}\n"".encode(""ascii""), f""static char {name}_bits[] = {{\n"".encode(""ascii""), data, b""};"", ] ) ``` ###Assistant : Returns the image converted to an X11 bitmap. .. note:: This method only works for mode ""1"" images. :param name: The name prefix to use for the bitmap variables. :returns: A string containing an X11 bitmap. :raises ValueError: If the mode is not ""1"" " 1808,"def add_edges_from(self, ebunch_to_add, **attr): for e in ebunch_to_add: ne = len(e) if ne == 3: u, v, dd = e elif ne == 2: u, v = e dd = {} else: raise NetworkXError(f""Edge tuple {e} must be a 2-tuple or 3-tuple."") if u not in self._succ: if u is None: raise ValueError(""None cannot be a node"") self._succ[u] = self.adjlist_inner_dict_factory() self._pred[u] = self.adjlist_inner_dict_factory() self._node[u] = self.node_attr_dict_factory() if v not in self._succ: if v is None: raise ValueError(""None cannot be a node"") self._succ[v] = self.adjlist_inner_dict_factory() self._pred[v] = self.adjlist_inner_dict_factory() self._node[v] = self.node_attr_dict_factory() datadict = self._adj[u].get(v, self.edge_attr_dict_factory()) datadict.update(attr) datadict.update(dd) self._succ[u][v] = datadict self._pred[v][u] = datadict ","Add all the edges in ebunch_to_add. Parameters ---------- ebunch_to_add : container of edges Each edge given in the container will be added to the graph. The edges must be given as 2-tuples (u, v) or 3-tuples (u, v, d) where d is a dictionary containing edge data. attr : keyword arguments, optional Edge data (or labels or objects) can be assigned using keyword arguments. See Also -------- add_edge : add a single edge add_weighted_edges_from : convenient way to add weighted edges Notes ----- Adding the same edge twice has no effect but any edge data will be updated when each duplicate edge is added. Edge attributes specified in an ebunch take precedence over attributes specified via keyword arguments. When adding edges from an iterator over the graph you are changing, a `RuntimeError` can be raised with message: `RuntimeError: dictionary changed size during iteration`. This happens when the graph's underlying dictionary is modified during iteration. To avoid this error, evaluate the iterator into a separate object, e.g. by using `list(iterator_of_edges)`, and pass this object to `G.add_edges_from`. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples >>> e = zip(range(0, 3), range(1, 4)) >>> G.add_edges_from(e) # Add the path graph 0-1-2-3 Associate data to edges >>> G.add_edges_from([(1, 2), (2, 3)], weight=3) >>> G.add_edges_from([(3, 4), (1, 4)], label=""WN2898"") Evaluate an iterator over a graph if using it to modify the same graph >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) >>> # Grow graph by one new node, adding edges to all existing nodes. >>> # wrong way - will raise RuntimeError >>> # G.add_edges_from(((5, n) for n in G.nodes)) >>> # right way - note that there will be no self-edge for node 5 >>> G.add_edges_from(list((5, n) for n in G.nodes)) ",305,102,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add_edges_from(self, ebunch_to_add, **attr): for e in ebunch_to_add: ne = len(e) if ne == 3: u, v, dd = e elif ne == 2: u, v = e dd = {} else: raise NetworkXError(f""Edge tuple {e} must be a 2-tuple or 3-tuple."") if u not in self._succ: if u is None: raise ValueError(""None cannot be a node"") self._succ[u] = self.adjlist_inner_dict_factory() self._pred[u] = self.adjlist_inner_dict_factory() self._node[u] = self.node_attr_dict_factory() if v not in self._succ: if v is None: raise ValueError(""None cannot be a node"") self._succ[v] = self.adjlist_inner_dict_factory() self._pred[v] = self.adjlist_inner_dict_factory() self._node[v] = self.node_attr_dict_factory() datadict = self._adj[u].get(v, self.edge_attr_dict_factory()) datadict.update(attr) datadict.update(dd) self._succ[u][v] = datadict self._pred[v][u] = datadict ``` ###Assistant : Add all the edges in ebunch_to_add. Parameters ---------- ebunch_to_add : container of edges Each edge given in the container will be added to the graph. The edges must be given as 2-tuples (u, v) or 3-tuples (u, v, d) where d is a dictionary containing edge data. attr : keyword arguments, optional Edge data (or labels or objects) can be assigned using keyword arguments. See Also -------- add_edge : add a single edge add_weighted_edges_from : convenient way to add weighted edges Notes ----- Adding the same edge twice has no effect but any edge data will be updated when each duplicate edge is added. Edge attributes specified in an ebunch take precedence over attributes specified via keyword arguments. When adding edges from an iterator over the graph you are changing, a `RuntimeError` can be raised with message: `RuntimeError: dictionary changed size during iteration`. This happens when the graph's underlying dictionary is modified during iteration. To avoid this error, evaluate the iterator into a separate object, e.g. by using `list(iterator_of_edges)`, and pass this object to `G.add_edges_from`. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples >>> e = zip(range(0, 3), range(1, 4)) >>> G.add_edges_from(e) # Add the path graph 0-1-2-3 Associate data to edges >>> G.add_edges_from([(1, 2), (2, 3)], weight=3) >>> G.add_edges_from([(3, 4), (1, 4)], label=""WN2898"") Evaluate an iterator over a graph if using it to modify the same graph >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) >>> # Grow graph by one new node, adding edges to all existing nodes. >>> # wrong way - will raise RuntimeError >>> # G.add_edges_from(((5, n) for n in G.nodes)) >>> # right way - note that there will be no self-edge for node 5 >>> G.add_edges_from(list((5, n) for n in G.nodes)) " 1809,"def _select_device(self) -> None: if os.path.exists(plaidml.settings.user_settings): # pylint:disable=no-member self._log(""debug"", ""Setting PlaidML devices from user_settings"") else: self._select_largest_gpu() "," If the plaidml user configuration settings exist, then set the default GPU from the settings file, Otherwise set the GPU to be the one with most VRAM. ",27,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _select_device(self) -> None: if os.path.exists(plaidml.settings.user_settings): # pylint:disable=no-member self._log(""debug"", ""Setting PlaidML devices from user_settings"") else: self._select_largest_gpu() ``` ###Assistant : If the plaidml user configuration settings exist, then set the default GPU from the settings file, Otherwise set the GPU to be the one with most VRAM. " 1810,"def test_readlink_non_canonical(file, source): intermediate = source.parent / ""intermediate.lnk"" intermediate.symlink_to(source) target = source.parent / ""symlink.lnk"" target.symlink_to(intermediate) try: result = file.readlink(path=target) assert result == str(intermediate) finally: intermediate.unlink() target.unlink() "," Test readlink where there are nested symlinks and canonicalize=False Should resolve to the first symlink ",15,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_readlink_non_canonical(file, source): intermediate = source.parent / ""intermediate.lnk"" intermediate.symlink_to(source) target = source.parent / ""symlink.lnk"" target.symlink_to(intermediate) try: result = file.readlink(path=target) assert result == str(intermediate) finally: intermediate.unlink() target.unlink() ``` ###Assistant : Test readlink where there are nested symlinks and canonicalize=False Should resolve to the first symlink " 1811,"def select_proxy(url, proxies): proxies = proxies or {} urlparts = urlparse(url) if urlparts.hostname is None: return proxies.get(urlparts.scheme, proxies.get(""all"")) proxy_keys = [ urlparts.scheme + ""://"" + urlparts.hostname, urlparts.scheme, ""all://"" + urlparts.hostname, ""all"", ] proxy = None for proxy_key in proxy_keys: if proxy_key in proxies: proxy = proxies[proxy_key] break return proxy ","Select a proxy for the url, if applicable. :param url: The url being for the request :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs ",29,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def select_proxy(url, proxies): proxies = proxies or {} urlparts = urlparse(url) if urlparts.hostname is None: return proxies.get(urlparts.scheme, proxies.get(""all"")) proxy_keys = [ urlparts.scheme + ""://"" + urlparts.hostname, urlparts.scheme, ""all://"" + urlparts.hostname, ""all"", ] proxy = None for proxy_key in proxy_keys: if proxy_key in proxies: proxy = proxies[proxy_key] break return proxy ``` ###Assistant : Select a proxy for the url, if applicable. :param url: The url being for the request :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs " 1812,"def eof_received(self): try: if self._loop.get_debug(): logger.debug(""%r received EOF"", self) self._wakeup_waiter(ConnectionResetError) if not self._in_handshake: keep_open = self._app_protocol.eof_received() if keep_open: logger.warning('returning true from eof_received() ' 'has no effect when using ssl') finally: self._transport.close() ","Called when the other end of the low-level stream is half-closed. If this returns a false value (including None), the transport will close itself. If it returns a true value, closing the transport is up to the protocol. ",38,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def eof_received(self): try: if self._loop.get_debug(): logger.debug(""%r received EOF"", self) self._wakeup_waiter(ConnectionResetError) if not self._in_handshake: keep_open = self._app_protocol.eof_received() if keep_open: logger.warning('returning true from eof_received() ' 'has no effect when using ssl') finally: self._transport.close() ``` ###Assistant : Called when the other end of the low-level stream is half-closed. If this returns a false value (including None), the transport will close itself. If it returns a true value, closing the transport is up to the protocol. " 1813,"def url_result(url, ie=None, video_id=None, video_title=None, *, url_transparent=False, **kwargs): if ie is not None: kwargs['ie_key'] = ie if isinstance(ie, str) else ie.ie_key() if video_id is not None: kwargs['id'] = video_id if video_title is not None: kwargs['title'] = video_title return { **kwargs, '_type': 'url_transparent' if url_transparent else 'url', 'url': url, } ",Returns a URL that points to a page that should be processed,12,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def url_result(url, ie=None, video_id=None, video_title=None, *, url_transparent=False, **kwargs): if ie is not None: kwargs['ie_key'] = ie if isinstance(ie, str) else ie.ie_key() if video_id is not None: kwargs['id'] = video_id if video_title is not None: kwargs['title'] = video_title return { **kwargs, '_type': 'url_transparent' if url_transparent else 'url', 'url': url, } ``` ###Assistant : Returns a URL that points to a page that should be processed " 1814,"def _shade_colors(color, normals, lightsource=None): if lightsource is None: # chosen for backwards-compatibility lightsource = mcolors.LightSource(azdeg=225, altdeg=19.4712) with np.errstate(invalid=""ignore""): shade = ((normals / np.linalg.norm(normals, axis=1, keepdims=True)) @ lightsource.direction) mask = ~np.isnan(shade) if mask.any(): # convert dot product to allowed shading fractions in_norm = mcolors.Normalize(-1, 1) out_norm = mcolors.Normalize(0.3, 1).inverse "," Shade *color* using normal vectors given by *normals*, assuming a *lightsource* (using default position if not given). *color* can also be an array of the same length as *normals*. ",29,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _shade_colors(color, normals, lightsource=None): if lightsource is None: # chosen for backwards-compatibility lightsource = mcolors.LightSource(azdeg=225, altdeg=19.4712) with np.errstate(invalid=""ignore""): shade = ((normals / np.linalg.norm(normals, axis=1, keepdims=True)) @ lightsource.direction) mask = ~np.isnan(shade) if mask.any(): # convert dot product to allowed shading fractions in_norm = mcolors.Normalize(-1, 1) out_norm = mcolors.Normalize(0.3, 1).inverse ``` ###Assistant : Shade *color* using normal vectors given by *normals*, assuming a *lightsource* (using default position if not given). *color* can also be an array of the same length as *normals*. " 1815,"def score(self, X, y, **fit_params): check_is_fitted(self) return self.estimator_.score(self.transform(X), y, **fit_params) ","Reduce X to the selected features and return the score of the estimator. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The target values. **fit_params : dict Parameters to pass to the `score` method of the underlying estimator. .. versionadded:: 1.0 Returns ------- score : float Score of the underlying base estimator computed with the selected features returned by `rfe.transform(X)` and `y`. ",72,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def score(self, X, y, **fit_params): check_is_fitted(self) return self.estimator_.score(self.transform(X), y, **fit_params) ``` ###Assistant : Reduce X to the selected features and return the score of the estimator. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The target values. **fit_params : dict Parameters to pass to the `score` method of the underlying estimator. .. versionadded:: 1.0 Returns ------- score : float Score of the underlying base estimator computed with the selected features returned by `rfe.transform(X)` and `y`. " 1816,"def reset(self, pos): self.value = pos self.velocity = 0 if self.history: val = self.history[-1][1] self.history = [(time(), val)] ","(internal) Reset the value and the velocity to the `pos`. Mostly used when the bounds are checked. ",17,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def reset(self, pos): self.value = pos self.velocity = 0 if self.history: val = self.history[-1][1] self.history = [(time(), val)] ``` ###Assistant : (internal) Reset the value and the velocity to the `pos`. Mostly used when the bounds are checked. " 1817,"def test_from_fan_speed(fan_speed, expected_result): assert _from_fan_speed(fan_speed) == expected_result @pytest.mark.parametrize( ""percentage, expected_result"", [ (1, 2), (100, 50), (50, 26), ], )",Test that we can convert fan speed to percentage value.,10,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_from_fan_speed(fan_speed, expected_result): assert _from_fan_speed(fan_speed) == expected_result @pytest.mark.parametrize( ""percentage, expected_result"", [ (1, 2), (100, 50), (50, 26), ], ) ``` ###Assistant : Test that we can convert fan speed to percentage value. " 1818,"def task_runner(): from sentry.testutils.helpers.task_runner import TaskRunner return TaskRunner @pytest.fixture","Context manager that ensures Celery tasks run directly inline where invoked. While this context manager is active any Celery tasks created will run immediately at the callsite rather than being sent to RabbitMQ and handled by a worker. ",38,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def task_runner(): from sentry.testutils.helpers.task_runner import TaskRunner return TaskRunner @pytest.fixture ``` ###Assistant : Context manager that ensures Celery tasks run directly inline where invoked. While this context manager is active any Celery tasks created will run immediately at the callsite rather than being sent to RabbitMQ and handled by a worker. " 1819,"def get_random_string(length, allowed_chars=RANDOM_STRING_CHARS): return """".join(secrets.choice(allowed_chars) for i in range(length)) "," Return a securely generated random string. The bit length of the returned value can be calculated with the formula: log_2(len(allowed_chars)^length) For example, with default `allowed_chars` (26+26+10), this gives: * length: 12, bit length =~ 71 bits * length: 22, bit length =~ 131 bits ",44,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_random_string(length, allowed_chars=RANDOM_STRING_CHARS): return """".join(secrets.choice(allowed_chars) for i in range(length)) ``` ###Assistant : Return a securely generated random string. The bit length of the returned value can be calculated with the formula: log_2(len(allowed_chars)^length) For example, with default `allowed_chars` (26+26+10), this gives: * length: 12, bit length =~ 71 bits * length: 22, bit length =~ 131 bits " 1820,"def test_command_reply(tplaybook): tplaybook >> TEvent() tplaybook << TCommand() tplaybook >> tutils.reply() assert tplaybook assert tplaybook.actual[1] == tplaybook.actual[2].command ",CommandReplies can use relative offsets to point to the matching command.,11,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_command_reply(tplaybook): tplaybook >> TEvent() tplaybook << TCommand() tplaybook >> tutils.reply() assert tplaybook assert tplaybook.actual[1] == tplaybook.actual[2].command ``` ###Assistant : CommandReplies can use relative offsets to point to the matching command. " 1821,"def _mirrored_cross_device_ops(all_reduce_alg, num_packs): if all_reduce_alg is None: return None mirrored_all_reduce_options = { ""nccl"": tf.distribute.NcclAllReduce, ""hierarchical_copy"": tf.distribute.HierarchicalCopyAllReduce, } if all_reduce_alg not in mirrored_all_reduce_options: raise ValueError( ""When used with `mirrored`, valid values for all_reduce_alg are "" ""[`nccl`, `hierarchical_copy`]. Supplied value: {}"".format( all_reduce_alg ) ) cross_device_ops_class = mirrored_all_reduce_options[all_reduce_alg] return cross_device_ops_class(num_packs=num_packs) ","Return a CrossDeviceOps based on all_reduce_alg and num_packs. Args: all_reduce_alg: a string specifying which cross device op to pick, or None. num_packs: an integer specifying number of packs for the cross device op. Returns: tf.distribute.CrossDeviceOps object or None. Raises: ValueError: if `all_reduce_alg` not in [None, ""nccl"", ""hierarchical_copy""]. ",47,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _mirrored_cross_device_ops(all_reduce_alg, num_packs): if all_reduce_alg is None: return None mirrored_all_reduce_options = { ""nccl"": tf.distribute.NcclAllReduce, ""hierarchical_copy"": tf.distribute.HierarchicalCopyAllReduce, } if all_reduce_alg not in mirrored_all_reduce_options: raise ValueError( ""When used with `mirrored`, valid values for all_reduce_alg are "" ""[`nccl`, `hierarchical_copy`]. Supplied value: {}"".format( all_reduce_alg ) ) cross_device_ops_class = mirrored_all_reduce_options[all_reduce_alg] return cross_device_ops_class(num_packs=num_packs) ``` ###Assistant : Return a CrossDeviceOps based on all_reduce_alg and num_packs. Args: all_reduce_alg: a string specifying which cross device op to pick, or None. num_packs: an integer specifying number of packs for the cross device op. Returns: tf.distribute.CrossDeviceOps object or None. Raises: ValueError: if `all_reduce_alg` not in [None, ""nccl"", ""hierarchical_copy""]. " 1822,"def test_delete_queue(self, mock_sb_admin_client): hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id) hook.delete_queue(self.queue_name) expected_calls = [mock.call().__enter__().delete_queue(self.queue_name)] mock_sb_admin_client.assert_has_calls(expected_calls) "," Test Delete queue functionality by passing queue name, assert the function with values, mock the azure service bus function `delete_queue` ",20,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_delete_queue(self, mock_sb_admin_client): hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id) hook.delete_queue(self.queue_name) expected_calls = [mock.call().__enter__().delete_queue(self.queue_name)] mock_sb_admin_client.assert_has_calls(expected_calls) ``` ###Assistant : Test Delete queue functionality by passing queue name, assert the function with values, mock the azure service bus function `delete_queue` " 1823,"def get_content_disposition(self): value = self.get('content-disposition') if value is None: return None c_d = _splitparam(value)[0].lower() return c_d # I.e. def walk(self): ... from email.iterators import walk ","Return the message's content-disposition if it exists, or None. The return values can be either 'inline', 'attachment' or None according to the rfc2183. ",23,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_content_disposition(self): value = self.get('content-disposition') if value is None: return None c_d = _splitparam(value)[0].lower() return c_d # I.e. def walk(self): ... from email.iterators import walk ``` ###Assistant : Return the message's content-disposition if it exists, or None. The return values can be either 'inline', 'attachment' or None according to the rfc2183. " 1824,"def _get_or_create(self, s, name=None, dtype=None, broadcastable=None): # Defaults if name is None: name = s.name if dtype is None: dtype = 'floatX' if broadcastable is None: broadcastable = () key = self._get_key(s, name, dtype=dtype, broadcastable=broadcastable) if key in self.cache: return self.cache[key] value = aet.tensor(name=name, dtype=dtype, shape=broadcastable) self.cache[key] = value return value "," Get the Aesara variable for a SymPy symbol from the cache, or create it if it does not exist. ",19,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_or_create(self, s, name=None, dtype=None, broadcastable=None): # Defaults if name is None: name = s.name if dtype is None: dtype = 'floatX' if broadcastable is None: broadcastable = () key = self._get_key(s, name, dtype=dtype, broadcastable=broadcastable) if key in self.cache: return self.cache[key] value = aet.tensor(name=name, dtype=dtype, shape=broadcastable) self.cache[key] = value return value ``` ###Assistant : Get the Aesara variable for a SymPy symbol from the cache, or create it if it does not exist. " 1825,"def defer(self, *fields): self._not_support_combined_queries(""defer"") if self._fields is not None: raise TypeError(""Cannot call defer() after .values() or .values_list()"") clone = self._chain() if fields == (None,): clone.query.clear_deferred_loading() else: clone.query.add_deferred_loading(fields) return clone "," Defer the loading of data for certain fields until they are accessed. Add the set of deferred fields to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case removal all deferrals. ",46,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def defer(self, *fields): self._not_support_combined_queries(""defer"") if self._fields is not None: raise TypeError(""Cannot call defer() after .values() or .values_list()"") clone = self._chain() if fields == (None,): clone.query.clear_deferred_loading() else: clone.query.add_deferred_loading(fields) return clone ``` ###Assistant : Defer the loading of data for certain fields until they are accessed. Add the set of deferred fields to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case removal all deferrals. " 1826,"def match_seq(self, nodes, results=None): if len(nodes) != 1: return False return self.match(nodes[0], results) "," Does this pattern exactly match a sequence of nodes? Default implementation for non-wildcard patterns. ",14,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def match_seq(self, nodes, results=None): if len(nodes) != 1: return False return self.match(nodes[0], results) ``` ###Assistant : Does this pattern exactly match a sequence of nodes? Default implementation for non-wildcard patterns. " 1827,"def team_ids_with_membership(self) -> FrozenSet[int]: return frozenset(team.id for team in self._team_memberships.keys()) ","Return the IDs of teams in which the user has actual membership. This represents the set of all teams for which `has_team_membership` returns true. Use that method where possible and use this property only when you need to iterate or query for all such teams. Compare to accessible_team_ids, which is equal to this property in the typical case but represents a superset of IDs in case of superuser access. ",69,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def team_ids_with_membership(self) -> FrozenSet[int]: return frozenset(team.id for team in self._team_memberships.keys()) ``` ###Assistant : Return the IDs of teams in which the user has actual membership. This represents the set of all teams for which `has_team_membership` returns true. Use that method where possible and use this property only when you need to iterate or query for all such teams. Compare to accessible_team_ids, which is equal to this property in the typical case but represents a superset of IDs in case of superuser access. " 1828,"def test_gumbel_softmax(self): for fw, sess in framework_iterator(frameworks=(""tf2"", ""tf""), session=True): batch_size = 1000 num_categories = 5 input_space = Box(-1.0, 1.0, shape=(batch_size, num_categories)) input_space.seed(42) # Batch of size=n and deterministic. inputs = input_space.sample() gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0) expected = softmax(inputs) # Sample n times, expect always mean value (deterministic draw). out = gumbel_softmax.deterministic_sample() check(out, expected) # Batch of size=n and non-deterministic -> expect roughly that # the max-likelihood (argmax) ints are output (most of the time). inputs = input_space.sample() gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0) expected_mean = np.mean(np.argmax(inputs, -1)).astype(np.float32) outs = gumbel_softmax.sample() if sess: outs = sess.run(outs) check(np.mean(np.argmax(outs, -1)), expected_mean, rtol=0.08) ",Tests the GumbelSoftmax ActionDistribution (tf + eager only).,8,99,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_gumbel_softmax(self): for fw, sess in framework_iterator(frameworks=(""tf2"", ""tf""), session=True): batch_size = 1000 num_categories = 5 input_space = Box(-1.0, 1.0, shape=(batch_size, num_categories)) input_space.seed(42) # Batch of size=n and deterministic. inputs = input_space.sample() gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0) expected = softmax(inputs) # Sample n times, expect always mean value (deterministic draw). out = gumbel_softmax.deterministic_sample() check(out, expected) # Batch of size=n and non-deterministic -> expect roughly that # the max-likelihood (argmax) ints are output (most of the time). inputs = input_space.sample() gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0) expected_mean = np.mean(np.argmax(inputs, -1)).astype(np.float32) outs = gumbel_softmax.sample() if sess: outs = sess.run(outs) check(np.mean(np.argmax(outs, -1)), expected_mean, rtol=0.08) ``` ###Assistant : Tests the GumbelSoftmax ActionDistribution (tf + eager only). " 1829,"def get_confusion_matrix(gt_label, pred_label, num_classes): index = (gt_label * num_classes + pred_label).astype('int32') label_count = np.bincount(index) confusion_matrix = np.zeros((num_classes, num_classes)) for i_label in range(num_classes): for i_pred_label in range(num_classes): cur_index = i_label * num_classes + i_pred_label if cur_index < len(label_count): confusion_matrix[i_label, i_pred_label] = label_count[cur_index] return confusion_matrix "," Calcute the confusion matrix by given label and pred :param gt_label: the ground truth label :param pred_label: the pred label :param num_classes: the nunber of class :return: the confusion matrix ",30,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_confusion_matrix(gt_label, pred_label, num_classes): index = (gt_label * num_classes + pred_label).astype('int32') label_count = np.bincount(index) confusion_matrix = np.zeros((num_classes, num_classes)) for i_label in range(num_classes): for i_pred_label in range(num_classes): cur_index = i_label * num_classes + i_pred_label if cur_index < len(label_count): confusion_matrix[i_label, i_pred_label] = label_count[cur_index] return confusion_matrix ``` ###Assistant : Calcute the confusion matrix by given label and pred :param gt_label: the ground truth label :param pred_label: the pred label :param num_classes: the nunber of class :return: the confusion matrix " 1830,"def make_pad_mask(lengths, xs=None, length_dim=-1): if length_dim == 0: raise ValueError('length_dim cannot be 0: {}'.format(length_dim)) if not isinstance(lengths, list): lengths = lengths.tolist() bs = int(len(lengths)) if xs is None: maxlen = int(max(lengths)) else: maxlen = xs.size(length_dim) seq_range = torch.arange(0, maxlen, dtype=torch.int64) seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen) seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1) mask = seq_range_expand >= seq_length_expand if xs is not None: assert xs.size(0) == bs, (xs.size(0), bs) if length_dim < 0: length_dim = xs.dim() + length_dim # ind = (:, None, ..., None, :, , None, ..., None) ind = tuple(slice(None) if i in (0, length_dim) else None for i in range(xs.dim())) mask = mask[ind].expand_as(xs).to(xs.device) return mask ","Make mask tensor containing indices of padded part. Args: lengths (LongTensor or List): Batch of lengths (B,). xs (Tensor, optional): The reference tensor. If set, masks will be the same shape as this tensor. length_dim (int, optional): Dimension indicator of the above tensor. See the example. Returns: Tensor: Mask tensor containing indices of padded part. dtype=torch.uint8 in PyTorch 1.2- dtype=torch.bool in PyTorch 1.2+ (including 1.2) Examples: With only lengths. >>> lengths = [5, 3, 2] >>> make_non_pad_mask(lengths) masks = [[0, 0, 0, 0 ,0], [0, 0, 0, 1, 1], [0, 0, 1, 1, 1]] With the reference tensor. >>> xs = torch.zeros((3, 2, 4)) >>> make_pad_mask(lengths, xs) tensor([[[0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 1], [0, 0, 0, 1]], [[0, 0, 1, 1], [0, 0, 1, 1]]], dtype=torch.uint8) >>> xs = torch.zeros((3, 2, 6)) >>> make_pad_mask(lengths, xs) tensor([[[0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1]], [[0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1]], [[0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8) With the reference tensor and dimension indicator. >>> xs = torch.zeros((3, 6, 6)) >>> make_pad_mask(lengths, xs, 1) tensor([[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1]], [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]], [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]]], dtype=torch.uint8) >>> make_pad_mask(lengths, xs, 2) tensor([[[0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1]], [[0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1]], [[0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8) ",417,103,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def make_pad_mask(lengths, xs=None, length_dim=-1): if length_dim == 0: raise ValueError('length_dim cannot be 0: {}'.format(length_dim)) if not isinstance(lengths, list): lengths = lengths.tolist() bs = int(len(lengths)) if xs is None: maxlen = int(max(lengths)) else: maxlen = xs.size(length_dim) seq_range = torch.arange(0, maxlen, dtype=torch.int64) seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen) seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1) mask = seq_range_expand >= seq_length_expand if xs is not None: assert xs.size(0) == bs, (xs.size(0), bs) if length_dim < 0: length_dim = xs.dim() + length_dim # ind = (:, None, ..., None, :, , None, ..., None) ind = tuple(slice(None) if i in (0, length_dim) else None for i in range(xs.dim())) mask = mask[ind].expand_as(xs).to(xs.device) return mask ``` ###Assistant : Make mask tensor containing indices of padded part. Args: lengths (LongTensor or List): Batch of lengths (B,). xs (Tensor, optional): The reference tensor. If set, masks will be the same shape as this tensor. length_dim (int, optional): Dimension indicator of the above tensor. See the example. Returns: Tensor: Mask tensor containing indices of padded part. dtype=torch.uint8 in PyTorch 1.2- dtype=torch.bool in PyTorch 1.2+ (including 1.2) Examples: With only lengths. >>> lengths = [5, 3, 2] >>> make_non_pad_mask(lengths) masks = [[0, 0, 0, 0 ,0], [0, 0, 0, 1, 1], [0, 0, 1, 1, 1]] With the reference tensor. >>> xs = torch.zeros((3, 2, 4)) >>> make_pad_mask(lengths, xs) tensor([[[0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 1], [0, 0, 0, 1]], [[0, 0, 1, 1], [0, 0, 1, 1]]], dtype=torch.uint8) >>> xs = torch.zeros((3, 2, 6)) >>> make_pad_mask(lengths, xs) tensor([[[0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1]], [[0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1]], [[0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8) With the reference tensor and dimension indicator. >>> xs = torch.zeros((3, 6, 6)) >>> make_pad_mask(lengths, xs, 1) tensor([[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1]], [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]], [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]]], dtype=torch.uint8) >>> make_pad_mask(lengths, xs, 2) tensor([[[0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1]], [[0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1]], [[0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8) " 1831,"def test_guess_content_type_from_filename(self) -> None: data, content_type = encode_multipart_formdata({""file"": (""somefile"", b""zulip!"", None)}) result = self.api_post( self.example_user(""hamlet""), ""/api/v1/user_uploads"", data, content_type=content_type ) self.assert_json_success(result) data, content_type = encode_multipart_formdata({""file"": (""somefile.txt"", b""zulip!"", None)}) result = self.api_post( self.example_user(""hamlet""), ""/api/v1/user_uploads"", data, content_type=content_type ) self.assert_json_success(result) # This test will go through the code path for uploading files onto LOCAL storage # when Zulip is in DEVELOPMENT mode."," Test coverage for files without content-type in the metadata; in which case we try to guess the content-type from the filename. ",21,58,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_guess_content_type_from_filename(self) -> None: data, content_type = encode_multipart_formdata({""file"": (""somefile"", b""zulip!"", None)}) result = self.api_post( self.example_user(""hamlet""), ""/api/v1/user_uploads"", data, content_type=content_type ) self.assert_json_success(result) data, content_type = encode_multipart_formdata({""file"": (""somefile.txt"", b""zulip!"", None)}) result = self.api_post( self.example_user(""hamlet""), ""/api/v1/user_uploads"", data, content_type=content_type ) self.assert_json_success(result) # This test will go through the code path for uploading files onto LOCAL storage # when Zulip is in DEVELOPMENT mode. ``` ###Assistant : Test coverage for files without content-type in the metadata; in which case we try to guess the content-type from the filename. " 1832,"def get_member(name, members): # look first for a generic match - prepend lib and append .so expr = rf'lib{name}\.so' member = get_one_match(expr, members) if member: return member elif AIX_ABI == 64: expr = rf'lib{name}64\.so' member = get_one_match(expr, members) if member: return member # since an exact match with .so as suffix was not found # look for a versioned name # If a versioned name is not found, look for AIX legacy member name member = get_version(name, members) if member: return member else: return get_legacy(members) "," Return an archive member matching the request in name. Name is the library name without any prefix like lib, suffix like .so, or version number. Given a list of members find and return the most appropriate result Priority is given to generic libXXX.so, then a versioned libXXX.so.a.b.c and finally, legacy AIX naming scheme. ",53,85,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_member(name, members): # look first for a generic match - prepend lib and append .so expr = rf'lib{name}\.so' member = get_one_match(expr, members) if member: return member elif AIX_ABI == 64: expr = rf'lib{name}64\.so' member = get_one_match(expr, members) if member: return member # since an exact match with .so as suffix was not found # look for a versioned name # If a versioned name is not found, look for AIX legacy member name member = get_version(name, members) if member: return member else: return get_legacy(members) ``` ###Assistant : Return an archive member matching the request in name. Name is the library name without any prefix like lib, suffix like .so, or version number. Given a list of members find and return the most appropriate result Priority is given to generic libXXX.so, then a versioned libXXX.so.a.b.c and finally, legacy AIX naming scheme. " 1833,"def check_original_docker_image(): if not os.path.isfile('/.dockerenv') or os.environ.get('PYTHON_BASE_IMAGE') is None: raise pytest.skip( ) ","Adding/removing a user as part of a test is very bad for host os (especially if the user already existed to begin with on the OS), therefore we check if we run inside a the official docker container and only allow to run the test there. This is done by checking /.dockerenv file (always present inside container) and checking for PYTHON_BASE_IMAGE variable. ",62,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_original_docker_image(): if not os.path.isfile('/.dockerenv') or os.environ.get('PYTHON_BASE_IMAGE') is None: raise pytest.skip( ) ``` ###Assistant : Adding/removing a user as part of a test is very bad for host os (especially if the user already existed to begin with on the OS), therefore we check if we run inside a the official docker container and only allow to run the test there. This is done by checking /.dockerenv file (always present inside container) and checking for PYTHON_BASE_IMAGE variable. " 1834,"def _useWizardInterface(): if not conf.wizard: return logger.info(""starting wizard interface"") while not conf.url: message = ""Please enter full target URL (-u): "" conf.url = readInput(message, default=None) message = ""%s data (--data) [Enter for None]: "" % ((conf.method if conf.method != HTTPMETHOD.GET else None) or HTTPMETHOD.POST) conf.data = readInput(message, default=None) if not (any('=' in _ for _ in (conf.url, conf.data)) or '*' in conf.url): warnMsg = ""no GET and/or %s parameter(s) found for testing "" % ((conf.method if conf.method != HTTPMETHOD.GET else None) or HTTPMETHOD.POST) warnMsg += ""(e.g. GET parameter 'id' in 'http://www.site.com/vuln.php?id=1'). "" if not conf.crawlDepth and not conf.forms: warnMsg += ""Will search for forms"" conf.forms = True logger.warning(warnMsg) choice = None while choice is None or choice not in ("""", ""1"", ""2"", ""3""): message = ""Injection difficulty (--level/--risk). Please choose:\n"" message += ""[1] Normal (default)\n[2] Medium\n[3] Hard"" choice = readInput(message, default='1') if choice == '2': conf.risk = 2 conf.level = 3 elif choice == '3': conf.risk = 3 conf.level = 5 else: conf.risk = 1 conf.level = 1 if not conf.getAll: choice = None while choice is None or choice not in ("""", ""1"", ""2"", ""3""): message = ""Enumeration (--banner/--current-user/etc). Please choose:\n"" message += ""[1] Basic (default)\n[2] Intermediate\n[3] All"" choice = readInput(message, default='1') if choice == '2': options = WIZARD.INTERMEDIATE elif choice == '3': options = WIZARD.ALL else: options = WIZARD.BASIC for _ in options: conf.__setitem__(_, True) logger.debug(""muting sqlmap.. it will do the magic for you"") conf.verbose = 0 conf.batch = True conf.threads = 4 dataToStdout(""\nsqlmap is running, please wait..\n\n"") kb.wizardMode = True "," Presents simple wizard interface for beginner users ",7,253,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _useWizardInterface(): if not conf.wizard: return logger.info(""starting wizard interface"") while not conf.url: message = ""Please enter full target URL (-u): "" conf.url = readInput(message, default=None) message = ""%s data (--data) [Enter for None]: "" % ((conf.method if conf.method != HTTPMETHOD.GET else None) or HTTPMETHOD.POST) conf.data = readInput(message, default=None) if not (any('=' in _ for _ in (conf.url, conf.data)) or '*' in conf.url): warnMsg = ""no GET and/or %s parameter(s) found for testing "" % ((conf.method if conf.method != HTTPMETHOD.GET else None) or HTTPMETHOD.POST) warnMsg += ""(e.g. GET parameter 'id' in 'http://www.site.com/vuln.php?id=1'). "" if not conf.crawlDepth and not conf.forms: warnMsg += ""Will search for forms"" conf.forms = True logger.warning(warnMsg) choice = None while choice is None or choice not in ("""", ""1"", ""2"", ""3""): message = ""Injection difficulty (--level/--risk). Please choose:\n"" message += ""[1] Normal (default)\n[2] Medium\n[3] Hard"" choice = readInput(message, default='1') if choice == '2': conf.risk = 2 conf.level = 3 elif choice == '3': conf.risk = 3 conf.level = 5 else: conf.risk = 1 conf.level = 1 if not conf.getAll: choice = None while choice is None or choice not in ("""", ""1"", ""2"", ""3""): message = ""Enumeration (--banner/--current-user/etc). Please choose:\n"" message += ""[1] Basic (default)\n[2] Intermediate\n[3] All"" choice = readInput(message, default='1') if choice == '2': options = WIZARD.INTERMEDIATE elif choice == '3': options = WIZARD.ALL else: options = WIZARD.BASIC for _ in options: conf.__setitem__(_, True) logger.debug(""muting sqlmap.. it will do the magic for you"") conf.verbose = 0 conf.batch = True conf.threads = 4 dataToStdout(""\nsqlmap is running, please wait..\n\n"") kb.wizardMode = True ``` ###Assistant : Presents simple wizard interface for beginner users " 1835,"async def run_migrations_online() -> None: engine = await db_interface.engine() versions_dir = context.get_x_argument(as_dictionary=True).get(""versions_dir"", None) if versions_dir is None: # if version dir is not explicitly provided determine versions location from dialect dialect = get_dialect(engine=engine) if dialect.name == ""postgresql"": versions_dir = Path(context.script.dir / ""postgresql"") elif dialect.name == ""sqlite"": versions_dir = Path(context.script.dir / ""sqlite"") else: raise ValueError(f""No versions dir exists for dialect: {dialect.name}"") context.script.version_locations = [versions_dir] "," Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. ",21,63,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def run_migrations_online() -> None: engine = await db_interface.engine() versions_dir = context.get_x_argument(as_dictionary=True).get(""versions_dir"", None) if versions_dir is None: # if version dir is not explicitly provided determine versions location from dialect dialect = get_dialect(engine=engine) if dialect.name == ""postgresql"": versions_dir = Path(context.script.dir / ""postgresql"") elif dialect.name == ""sqlite"": versions_dir = Path(context.script.dir / ""sqlite"") else: raise ValueError(f""No versions dir exists for dialect: {dialect.name}"") context.script.version_locations = [versions_dir] ``` ###Assistant : Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. " 1836,"def get_mop_query(doctype, txt, searchfield, start, page_len, filters): return frappe.db.sql( , {""parent"": filters.get(""parent""), ""start"": start, ""page_len"": page_len, ""txt"": ""%%%s%%"" % txt}, ) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs"," select mode_of_payment from `tabPayment Order Reference` where parent = %(parent)s and mode_of_payment like %(txt)s limit %(start)s, %(page_len)s",17,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_mop_query(doctype, txt, searchfield, start, page_len, filters): return frappe.db.sql( , {""parent"": filters.get(""parent""), ""start"": start, ""page_len"": page_len, ""txt"": ""%%%s%%"" % txt}, ) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs ``` ###Assistant : select mode_of_payment from `tabPayment Order Reference` where parent = %(parent)s and mode_of_payment like %(txt)s limit %(start)s, %(page_len)s " 1837,"def test_doc_pyplot_summary(): pyplot_docs = Path(__file__).parent / '../../../doc/api/pyplot_summary.rst' if not pyplot_docs.exists(): pytest.skip(""Documentation sources not available"") lines = pyplot_docs.read_text() m = re.search(r':nosignatures:\n\n(.*?)\n\n', lines, re.DOTALL) doc_functions = set(line.strip() for line in m.group(1).split('\n')) plot_commands = set(plt.get_plot_commands()) missing = plot_commands.difference(doc_functions) if missing: raise AssertionError( f""The following pyplot functions are not listed in the "" f""documentation. Please add them to doc/api/pyplot_summary.rst: "" f""{missing!r}"") extra = doc_functions.difference(plot_commands) if extra: raise AssertionError( f""The following functions are listed in the pyplot documentation, "" f""but they do not exist in pyplot. "" f""Please remove them from doc/api/pyplot_summary.rst: {extra!r}"") ",Test that pyplot_summary lists all the plot functions.,8,88,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_doc_pyplot_summary(): pyplot_docs = Path(__file__).parent / '../../../doc/api/pyplot_summary.rst' if not pyplot_docs.exists(): pytest.skip(""Documentation sources not available"") lines = pyplot_docs.read_text() m = re.search(r':nosignatures:\n\n(.*?)\n\n', lines, re.DOTALL) doc_functions = set(line.strip() for line in m.group(1).split('\n')) plot_commands = set(plt.get_plot_commands()) missing = plot_commands.difference(doc_functions) if missing: raise AssertionError( f""The following pyplot functions are not listed in the "" f""documentation. Please add them to doc/api/pyplot_summary.rst: "" f""{missing!r}"") extra = doc_functions.difference(plot_commands) if extra: raise AssertionError( f""The following functions are listed in the pyplot documentation, "" f""but they do not exist in pyplot. "" f""Please remove them from doc/api/pyplot_summary.rst: {extra!r}"") ``` ###Assistant : Test that pyplot_summary lists all the plot functions. " 1838,"def validate_child_on_delete(row, parent): if parent.doctype == ""Sales Order"": if flt(row.delivered_qty): frappe.throw( _(""Row #{0}: Cannot delete item {1} which has already been delivered"").format( row.idx, row.item_code ) ) if flt(row.work_order_qty): frappe.throw( _(""Row #{0}: Cannot delete item {1} which has work order assigned to it."").format( row.idx, row.item_code ) ) if flt(row.ordered_qty): frappe.throw( _(""Row #{0}: Cannot delete item {1} which is assigned to customer's purchase order."").format( row.idx, row.item_code ) ) if parent.doctype == ""Purchase Order"" and flt(row.received_qty): frappe.throw( _(""Row #{0}: Cannot delete item {1} which has already been received"").format( row.idx, row.item_code ) ) if flt(row.billed_amt): frappe.throw( _(""Row #{0}: Cannot delete item {1} which has already been billed."").format( row.idx, row.item_code ) ) ",Check if partially transacted item (row) is being deleted.,9,107,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def validate_child_on_delete(row, parent): if parent.doctype == ""Sales Order"": if flt(row.delivered_qty): frappe.throw( _(""Row #{0}: Cannot delete item {1} which has already been delivered"").format( row.idx, row.item_code ) ) if flt(row.work_order_qty): frappe.throw( _(""Row #{0}: Cannot delete item {1} which has work order assigned to it."").format( row.idx, row.item_code ) ) if flt(row.ordered_qty): frappe.throw( _(""Row #{0}: Cannot delete item {1} which is assigned to customer's purchase order."").format( row.idx, row.item_code ) ) if parent.doctype == ""Purchase Order"" and flt(row.received_qty): frappe.throw( _(""Row #{0}: Cannot delete item {1} which has already been received"").format( row.idx, row.item_code ) ) if flt(row.billed_amt): frappe.throw( _(""Row #{0}: Cannot delete item {1} which has already been billed."").format( row.idx, row.item_code ) ) ``` ###Assistant : Check if partially transacted item (row) is being deleted. " 1839,"def _get_items(self): postprocess_items = {} # Debug Landmarks if (hasattr(self._args, 'debug_landmarks') and self._args.debug_landmarks): postprocess_items[""DebugLandmarks""] = None # Face Filter post processing if ((hasattr(self._args, ""filter"") and self._args.filter is not None) or (hasattr(self._args, ""nfilter"") and self._args.nfilter is not None)): if hasattr(self._args, ""detector""): detector = self._args.detector.replace(""-"", ""_"").lower() else: detector = ""cv2_dnn"" if hasattr(self._args, ""aligner""): aligner = self._args.aligner.replace(""-"", ""_"").lower() else: aligner = ""cv2_dnn"" face_filter = dict(detector=detector, aligner=aligner, multiprocess=not self._args.singleprocess) filter_lists = {} if hasattr(self._args, ""ref_threshold""): face_filter[""ref_threshold""] = self._args.ref_threshold for filter_type in ('filter', 'nfilter'): filter_args = getattr(self._args, filter_type, None) filter_args = None if not filter_args else filter_args filter_lists[filter_type] = filter_args face_filter[""filter_lists""] = filter_lists postprocess_items[""FaceFilter""] = {""kwargs"": face_filter} logger.debug(""Postprocess Items: %s"", postprocess_items) return postprocess_items "," Check the passed in command line arguments for requested actions, For any requested actions, add the item to the actions list along with any relevant arguments and keyword arguments. Returns ------- dict The name of the action to be performed as the key. Any action specific arguments and keyword arguments as the value. ",53,108,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_items(self): postprocess_items = {} # Debug Landmarks if (hasattr(self._args, 'debug_landmarks') and self._args.debug_landmarks): postprocess_items[""DebugLandmarks""] = None # Face Filter post processing if ((hasattr(self._args, ""filter"") and self._args.filter is not None) or (hasattr(self._args, ""nfilter"") and self._args.nfilter is not None)): if hasattr(self._args, ""detector""): detector = self._args.detector.replace(""-"", ""_"").lower() else: detector = ""cv2_dnn"" if hasattr(self._args, ""aligner""): aligner = self._args.aligner.replace(""-"", ""_"").lower() else: aligner = ""cv2_dnn"" face_filter = dict(detector=detector, aligner=aligner, multiprocess=not self._args.singleprocess) filter_lists = {} if hasattr(self._args, ""ref_threshold""): face_filter[""ref_threshold""] = self._args.ref_threshold for filter_type in ('filter', 'nfilter'): filter_args = getattr(self._args, filter_type, None) filter_args = None if not filter_args else filter_args filter_lists[filter_type] = filter_args face_filter[""filter_lists""] = filter_lists postprocess_items[""FaceFilter""] = {""kwargs"": face_filter} logger.debug(""Postprocess Items: %s"", postprocess_items) return postprocess_items ``` ###Assistant : Check the passed in command line arguments for requested actions, For any requested actions, add the item to the actions list along with any relevant arguments and keyword arguments. Returns ------- dict The name of the action to be performed as the key. Any action specific arguments and keyword arguments as the value. " 1840,"def _collapse_addresses_internal(addresses): # First merge to_merge = list(addresses) subnets = {} while to_merge: net = to_merge.pop() supernet = net.supernet() existing = subnets.get(supernet) if existing is None: subnets[supernet] = net elif existing != net: # Merge consecutive subnets del subnets[supernet] to_merge.append(supernet) # Then iterate over resulting networks, skipping subsumed subnets last = None for net in sorted(subnets.values()): if last is not None: # Since they are sorted, last.network_address <= net.network_address # is a given. if last.broadcast_address >= net.broadcast_address: continue yield net last = net ","Loops through the addresses, collapsing concurrent netblocks. Example: ip1 = IPv4Network('192.0.2.0/26') ip2 = IPv4Network('192.0.2.64/26') ip3 = IPv4Network('192.0.2.128/26') ip4 = IPv4Network('192.0.2.192/26') _collapse_addresses_internal([ip1, ip2, ip3, ip4]) -> [IPv4Network('192.0.2.0/24')] This shouldn't be called directly; it is called via collapse_addresses([]). Args: addresses: A list of IPv4Network's or IPv6Network's Returns: A list of IPv4Network's or IPv6Network's depending on what we were passed. ",57,83,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _collapse_addresses_internal(addresses): # First merge to_merge = list(addresses) subnets = {} while to_merge: net = to_merge.pop() supernet = net.supernet() existing = subnets.get(supernet) if existing is None: subnets[supernet] = net elif existing != net: # Merge consecutive subnets del subnets[supernet] to_merge.append(supernet) # Then iterate over resulting networks, skipping subsumed subnets last = None for net in sorted(subnets.values()): if last is not None: # Since they are sorted, last.network_address <= net.network_address # is a given. if last.broadcast_address >= net.broadcast_address: continue yield net last = net ``` ###Assistant : Loops through the addresses, collapsing concurrent netblocks. Example: ip1 = IPv4Network('192.0.2.0/26') ip2 = IPv4Network('192.0.2.64/26') ip3 = IPv4Network('192.0.2.128/26') ip4 = IPv4Network('192.0.2.192/26') _collapse_addresses_internal([ip1, ip2, ip3, ip4]) -> [IPv4Network('192.0.2.0/24')] This shouldn't be called directly; it is called via collapse_addresses([]). Args: addresses: A list of IPv4Network's or IPv6Network's Returns: A list of IPv4Network's or IPv6Network's depending on what we were passed. " 1841,"def inaxes(self, xy): axes_list = [a for a in self.figure.get_axes() if a.patch.contains_point(xy) and a.get_visible()] if axes_list: axes = cbook._topmost_artist(axes_list) else: axes = None return axes "," Return the topmost visible `~.axes.Axes` containing the point *xy*. Parameters ---------- xy : (float, float) (x, y) pixel positions from left/bottom of the canvas. Returns ------- `~matplotlib.axes.Axes` or None The topmost visible Axes containing the point, or None if there is no Axes at the point. ",46,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def inaxes(self, xy): axes_list = [a for a in self.figure.get_axes() if a.patch.contains_point(xy) and a.get_visible()] if axes_list: axes = cbook._topmost_artist(axes_list) else: axes = None return axes ``` ###Assistant : Return the topmost visible `~.axes.Axes` containing the point *xy*. Parameters ---------- xy : (float, float) (x, y) pixel positions from left/bottom of the canvas. Returns ------- `~matplotlib.axes.Axes` or None The topmost visible Axes containing the point, or None if there is no Axes at the point. " 1842,"def upgrade(): conn = op.get_bind() is_sqlite = bool(conn.dialect.name == ""sqlite"") is_mssql = bool(conn.dialect.name == ""mssql"") if is_sqlite: op.execute(""PRAGMA foreign_keys=off"") with op.batch_alter_table('dag_run', schema=None) as batch_op: batch_op.add_column(sa.Column('last_scheduling_decision', TIMESTAMP, nullable=True)) batch_op.create_index('idx_last_scheduling_decision', ['last_scheduling_decision'], unique=False) batch_op.add_column(sa.Column('dag_hash', sa.String(32), nullable=True)) with op.batch_alter_table('dag', schema=None) as batch_op: batch_op.add_column(sa.Column('next_dagrun', TIMESTAMP, nullable=True)) batch_op.add_column(sa.Column('next_dagrun_create_after', TIMESTAMP, nullable=True)) # Create with nullable and no default, then ALTER to set values, to avoid table level lock batch_op.add_column(sa.Column('concurrency', sa.Integer(), nullable=True)) batch_op.add_column(sa.Column('has_task_concurrency_limits', sa.Boolean(), nullable=True)) batch_op.create_index('idx_next_dagrun_create_after', ['next_dagrun_create_after'], unique=False) try: from airflow.configuration import conf concurrency = conf.getint('core', 'dag_concurrency', fallback=16) except: # noqa concurrency = 16 # Set it to true here as it makes us take the slow/more complete path, and when it's next parsed by the # DagParser it will get set to correct value. op.execute( f ) with op.batch_alter_table('dag', schema=None) as batch_op: batch_op.alter_column('concurrency', type_=sa.Integer(), nullable=False) batch_op.alter_column('has_task_concurrency_limits', type_=sa.Boolean(), nullable=False) if is_sqlite: op.execute(""PRAGMA foreign_keys=on"") ","Apply Add ``scheduling_decision`` to ``DagRun`` and ``DAG`` UPDATE dag SET concurrency={concurrency}, has_task_concurrency_limits={1 if is_sqlite or is_mssql else sa.true()} where concurrency IS NULL ",22,135,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def upgrade(): conn = op.get_bind() is_sqlite = bool(conn.dialect.name == ""sqlite"") is_mssql = bool(conn.dialect.name == ""mssql"") if is_sqlite: op.execute(""PRAGMA foreign_keys=off"") with op.batch_alter_table('dag_run', schema=None) as batch_op: batch_op.add_column(sa.Column('last_scheduling_decision', TIMESTAMP, nullable=True)) batch_op.create_index('idx_last_scheduling_decision', ['last_scheduling_decision'], unique=False) batch_op.add_column(sa.Column('dag_hash', sa.String(32), nullable=True)) with op.batch_alter_table('dag', schema=None) as batch_op: batch_op.add_column(sa.Column('next_dagrun', TIMESTAMP, nullable=True)) batch_op.add_column(sa.Column('next_dagrun_create_after', TIMESTAMP, nullable=True)) # Create with nullable and no default, then ALTER to set values, to avoid table level lock batch_op.add_column(sa.Column('concurrency', sa.Integer(), nullable=True)) batch_op.add_column(sa.Column('has_task_concurrency_limits', sa.Boolean(), nullable=True)) batch_op.create_index('idx_next_dagrun_create_after', ['next_dagrun_create_after'], unique=False) try: from airflow.configuration import conf concurrency = conf.getint('core', 'dag_concurrency', fallback=16) except: # noqa concurrency = 16 # Set it to true here as it makes us take the slow/more complete path, and when it's next parsed by the # DagParser it will get set to correct value. op.execute( f ) with op.batch_alter_table('dag', schema=None) as batch_op: batch_op.alter_column('concurrency', type_=sa.Integer(), nullable=False) batch_op.alter_column('has_task_concurrency_limits', type_=sa.Boolean(), nullable=False) if is_sqlite: op.execute(""PRAGMA foreign_keys=on"") ``` ###Assistant : Apply Add ``scheduling_decision`` to ``DagRun`` and ``DAG`` UPDATE dag SET concurrency={concurrency}, has_task_concurrency_limits={1 if is_sqlite or is_mssql else sa.true()} where concurrency IS NULL " 1843,"def enrich_ledger_entries_with_event_data(self, ledger_entries): # Build up a list of the subset of ledger entries we are expected # to enrich with event metadata. event_id_to_ledger_entry = {} for entry in ledger_entries: maybe_event_id: Optional[str] = entry.get(""event_id"") if maybe_event_id: event_id_to_ledger_entry[maybe_event_id] = entry # Nothing to enrich; short-circuit if len(event_id_to_ledger_entry) == 0: return ledger_entries "," Enriches a list of ledger entries with event metadata (applies only to decrements that have an event_id property set, i.e. automated decrements to the ledger applied by Orb). ",28,50,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def enrich_ledger_entries_with_event_data(self, ledger_entries): # Build up a list of the subset of ledger entries we are expected # to enrich with event metadata. event_id_to_ledger_entry = {} for entry in ledger_entries: maybe_event_id: Optional[str] = entry.get(""event_id"") if maybe_event_id: event_id_to_ledger_entry[maybe_event_id] = entry # Nothing to enrich; short-circuit if len(event_id_to_ledger_entry) == 0: return ledger_entries ``` ###Assistant : Enriches a list of ledger entries with event metadata (applies only to decrements that have an event_id property set, i.e. automated decrements to the ledger applied by Orb). " 1844,"def test_cache() -> None: ledger_store = DictLedgerStore() user_key = b""1322"" ledger = DataSubjectLedger.get_or_create(store=ledger_store, user_key=user_key) assert ( ledger._cache_constant2epsilon[0] == 0.05372712063485988 ), ""The first value in the cache is incorrect"" assert ( ledger._cache_constant2epsilon[1] == 0.07773597369831031 ), ""Has the DP cache been changed?"" rdp_700k = convert_constants_to_indices(np.array([700_000])) assert ( ledger._cache_constant2epsilon.take(rdp_700k)[0] == 706213.1816144075 ), ""Has the DP cache been changed?"" rdp_50 = convert_constants_to_indices(np.array([50])) assert ( ledger._cache_constant2epsilon.take(rdp_50)[0] == 100.68990516105825 ), ""Has the DP cache been changed?"" assert ( len(ledger._cache_constant2epsilon) >= 1_200_000 ), ""Has the cache been changed?"" ",Ensure the most up to date RDP-to-epsilon cache is being used.,11,81,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_cache() -> None: ledger_store = DictLedgerStore() user_key = b""1322"" ledger = DataSubjectLedger.get_or_create(store=ledger_store, user_key=user_key) assert ( ledger._cache_constant2epsilon[0] == 0.05372712063485988 ), ""The first value in the cache is incorrect"" assert ( ledger._cache_constant2epsilon[1] == 0.07773597369831031 ), ""Has the DP cache been changed?"" rdp_700k = convert_constants_to_indices(np.array([700_000])) assert ( ledger._cache_constant2epsilon.take(rdp_700k)[0] == 706213.1816144075 ), ""Has the DP cache been changed?"" rdp_50 = convert_constants_to_indices(np.array([50])) assert ( ledger._cache_constant2epsilon.take(rdp_50)[0] == 100.68990516105825 ), ""Has the DP cache been changed?"" assert ( len(ledger._cache_constant2epsilon) >= 1_200_000 ), ""Has the cache been changed?"" ``` ###Assistant : Ensure the most up to date RDP-to-epsilon cache is being used. " 1845,"def get_lexer_for_mimetype(_mime, **options): for modname, name, _, _, mimetypes in LEXERS.values(): if _mime in mimetypes: if name not in _lexer_cache: _load_lexers(modname) return _lexer_cache[name](**options) for cls in find_plugin_lexers(): if _mime in cls.mimetypes: return cls(**options) raise ClassNotFound('no lexer for mimetype %r found' % _mime) ","Get a lexer for a mimetype. Raises ClassNotFound if not found. ",11,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_lexer_for_mimetype(_mime, **options): for modname, name, _, _, mimetypes in LEXERS.values(): if _mime in mimetypes: if name not in _lexer_cache: _load_lexers(modname) return _lexer_cache[name](**options) for cls in find_plugin_lexers(): if _mime in cls.mimetypes: return cls(**options) raise ClassNotFound('no lexer for mimetype %r found' % _mime) ``` ###Assistant : Get a lexer for a mimetype. Raises ClassNotFound if not found. " 1846,"def test_glm_regression(solver, fit_intercept, glm_dataset): model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset params = dict( alpha=alpha, fit_intercept=fit_intercept, # While _GeneralizedLinearRegressor exposes the solver parameter, public # estimators currently do not, and lbfgs is the only solver anyway. # TODO: Expose solver as soon as we have a second solver to choose from. # solver=solver, # only lbfgs available tol=1e-12, max_iter=1000, ) model = clone(model).set_params(**params) X = X[:, :-1] # remove intercept if fit_intercept: coef = coef_with_intercept intercept = coef[-1] coef = coef[:-1] else: coef = coef_without_intercept intercept = 0 model.fit(X, y) rtol = 5e-5 assert model.intercept_ == pytest.approx(intercept, rel=rtol) assert_allclose(model.coef_, coef, rtol=rtol) # Same with sample_weight. model = ( clone(model).set_params(**params).fit(X, y, sample_weight=np.ones(X.shape[0])) ) assert model.intercept_ == pytest.approx(intercept, rel=rtol) assert_allclose(model.coef_, coef, rtol=rtol) @pytest.mark.parametrize(""solver"", SOLVERS) @pytest.mark.parametrize(""fit_intercept"", [True, False])","Test that GLM converges for all solvers to correct solution. We work with a simple constructed data set with known solution. ",21,127,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_glm_regression(solver, fit_intercept, glm_dataset): model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset params = dict( alpha=alpha, fit_intercept=fit_intercept, # While _GeneralizedLinearRegressor exposes the solver parameter, public # estimators currently do not, and lbfgs is the only solver anyway. # TODO: Expose solver as soon as we have a second solver to choose from. # solver=solver, # only lbfgs available tol=1e-12, max_iter=1000, ) model = clone(model).set_params(**params) X = X[:, :-1] # remove intercept if fit_intercept: coef = coef_with_intercept intercept = coef[-1] coef = coef[:-1] else: coef = coef_without_intercept intercept = 0 model.fit(X, y) rtol = 5e-5 assert model.intercept_ == pytest.approx(intercept, rel=rtol) assert_allclose(model.coef_, coef, rtol=rtol) # Same with sample_weight. model = ( clone(model).set_params(**params).fit(X, y, sample_weight=np.ones(X.shape[0])) ) assert model.intercept_ == pytest.approx(intercept, rel=rtol) assert_allclose(model.coef_, coef, rtol=rtol) @pytest.mark.parametrize(""solver"", SOLVERS) @pytest.mark.parametrize(""fit_intercept"", [True, False]) ``` ###Assistant : Test that GLM converges for all solvers to correct solution. We work with a simple constructed data set with known solution. " 1847,"def _cuda_check(self): with Popen(""nvcc -V"", shell=True, stdout=PIPE, stderr=PIPE) as chk: stdout, stderr = chk.communicate() if not stderr: version = re.search(r"".*release (?P\d+\.\d+)"", stdout.decode(locale.getpreferredencoding())) self.cuda_version = version.groupdict().get(""cuda"", None) locate = ""where"" if self._os == ""windows"" else ""which"" path = os.popen(f""{locate} nvcc"").read() if path: path = path.split(""\n"")[0] # Split multiple entries and take first found while True: # Get Cuda root folder path, split = os.path.split(path) if split == ""bin"": break self.cuda_path = path return # Failed to load nvcc, manual check getattr(self, f""_cuda_check_{self._os}"")() "," Obtain the location and version of Cuda and populate :attr:`cuda_version` and :attr:`cuda_path` Initially just calls `nvcc -V` to get the installed version of Cuda currently in use. If this fails, drills down to more OS specific checking methods. ",38,81,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _cuda_check(self): with Popen(""nvcc -V"", shell=True, stdout=PIPE, stderr=PIPE) as chk: stdout, stderr = chk.communicate() if not stderr: version = re.search(r"".*release (?P\d+\.\d+)"", stdout.decode(locale.getpreferredencoding())) self.cuda_version = version.groupdict().get(""cuda"", None) locate = ""where"" if self._os == ""windows"" else ""which"" path = os.popen(f""{locate} nvcc"").read() if path: path = path.split(""\n"")[0] # Split multiple entries and take first found while True: # Get Cuda root folder path, split = os.path.split(path) if split == ""bin"": break self.cuda_path = path return # Failed to load nvcc, manual check getattr(self, f""_cuda_check_{self._os}"")() ``` ###Assistant : Obtain the location and version of Cuda and populate :attr:`cuda_version` and :attr:`cuda_path` Initially just calls `nvcc -V` to get the installed version of Cuda currently in use. If this fails, drills down to more OS specific checking methods. " 1848,"def test_map_product_same(self, dag_maker, session): outputs = [] with dag_maker(dag_id=""product_same"", session=session) as dag: ",Test a mapped task can refer to the same source multiple times.,12,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_map_product_same(self, dag_maker, session): outputs = [] with dag_maker(dag_id=""product_same"", session=session) as dag: ``` ###Assistant : Test a mapped task can refer to the same source multiple times. " 1849,"def get_deepbooru_tags(pil_image, threshold=0.5): from modules import shared # prevents circular reference create_deepbooru_process(threshold) shared.deepbooru_process_return[""value""] = -1 shared.deepbooru_process_queue.put(pil_image) while shared.deepbooru_process_return[""value""] == -1: time.sleep(0.2) release_process() return ret "," This method is for running only one image at a time for simple use. Used to the img2img interrogate. ",19,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_deepbooru_tags(pil_image, threshold=0.5): from modules import shared # prevents circular reference create_deepbooru_process(threshold) shared.deepbooru_process_return[""value""] = -1 shared.deepbooru_process_queue.put(pil_image) while shared.deepbooru_process_return[""value""] == -1: time.sleep(0.2) release_process() return ret ``` ###Assistant : This method is for running only one image at a time for simple use. Used to the img2img interrogate. " 1850,"def test_update_from_select(self, mock_handler): self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # --- use predictor --- predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical, 'c': dtype.datetime }, 'predicted_value': 'ccc' } self.set_predictor(predictor) sql = ret = self.command_executor.execute_command( parse_sql(sql, dialect='mindsdb')) assert ret.error_code is None # 1 select and 2 updates assert mock_handler().query.call_count == 3 # second is update assert mock_handler().query.call_args_list[1][0][0].to_string() == ""update table2 set a1=1, c1='ccc' where (a1 = 1) AND (b1 = 'ccc')"" # @patch('mindsdb.integrations.handlers.postgres_handler.Handler') # def test_union_type_mismatch(self, mock_handler): # self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # # sql = # from mindsdb.api.mysql.mysql_proxy.utilities import ErSqlWrongArguments # with pytest.raises(ErSqlWrongArguments): # self.command_executor.execute_command(parse_sql(sql, dialect='mindsdb')) "," update pg.table2 set a1 = df.a, c1 = df.c from ( SELECT model.a as a, model.b as b, model.p as c FROM pg.tasks as t JOIN mindsdb.task_model as model WHERE t.a=1 ) as df where table2.a1 = df.a and table2.b1 = df.b # SELECT a, b FROM pg.tasks # UNION # SELECT b, a FROM pg.tasks # ",57,101,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_update_from_select(self, mock_handler): self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # --- use predictor --- predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical, 'c': dtype.datetime }, 'predicted_value': 'ccc' } self.set_predictor(predictor) sql = ret = self.command_executor.execute_command( parse_sql(sql, dialect='mindsdb')) assert ret.error_code is None # 1 select and 2 updates assert mock_handler().query.call_count == 3 # second is update assert mock_handler().query.call_args_list[1][0][0].to_string() == ""update table2 set a1=1, c1='ccc' where (a1 = 1) AND (b1 = 'ccc')"" # @patch('mindsdb.integrations.handlers.postgres_handler.Handler') # def test_union_type_mismatch(self, mock_handler): # self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # # sql = # from mindsdb.api.mysql.mysql_proxy.utilities import ErSqlWrongArguments # with pytest.raises(ErSqlWrongArguments): # self.command_executor.execute_command(parse_sql(sql, dialect='mindsdb')) ``` ###Assistant : update pg.table2 set a1 = df.a, c1 = df.c from ( SELECT model.a as a, model.b as b, model.p as c FROM pg.tasks as t JOIN mindsdb.task_model as model WHERE t.a=1 ) as df where table2.a1 = df.a and table2.b1 = df.b # SELECT a, b FROM pg.tasks # UNION # SELECT b, a FROM pg.tasks # " 1851,"def get_total_shipments(scorecard): supplier = frappe.get_doc(""Supplier"", scorecard.supplier) # Look up all PO Items with delivery dates between our dates data = frappe.db.sql( , {""supplier"": supplier.name, ""start_date"": scorecard.start_date, ""end_date"": scorecard.end_date}, as_dict=0, )[0][0] if not data: data = 0 return data ","Gets the total number of ordered shipments to arrive in the period (based on Purchase Receipts) SELECT COUNT(po_item.base_amount) FROM `tabPurchase Order Item` po_item, `tabPurchase Order` po WHERE po.supplier = %(supplier)s AND po_item.schedule_date BETWEEN %(start_date)s AND %(end_date)s AND po_item.docstatus = 1 AND po_item.parent = po.name",44,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_total_shipments(scorecard): supplier = frappe.get_doc(""Supplier"", scorecard.supplier) # Look up all PO Items with delivery dates between our dates data = frappe.db.sql( , {""supplier"": supplier.name, ""start_date"": scorecard.start_date, ""end_date"": scorecard.end_date}, as_dict=0, )[0][0] if not data: data = 0 return data ``` ###Assistant : Gets the total number of ordered shipments to arrive in the period (based on Purchase Receipts) SELECT COUNT(po_item.base_amount) FROM `tabPurchase Order Item` po_item, `tabPurchase Order` po WHERE po.supplier = %(supplier)s AND po_item.schedule_date BETWEEN %(start_date)s AND %(end_date)s AND po_item.docstatus = 1 AND po_item.parent = po.name " 1852,"def source(object): print('In file: %s' % inspect.getsourcefile(object)) print(inspect.getsource(object)) "," Prints the source code of a given object. .. deprecated:: 1.3 The ``source()`` function is deprecated. Use ``inspect.getsource()`` or ``??`` in IPython/Jupyter instead. ",23,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def source(object): print('In file: %s' % inspect.getsourcefile(object)) print(inspect.getsource(object)) ``` ###Assistant : Prints the source code of a given object. .. deprecated:: 1.3 The ``source()`` function is deprecated. Use ``inspect.getsource()`` or ``??`` in IPython/Jupyter instead. " 1853,"def validate_axis(axis, input_shape): input_shape = tf.TensorShape(input_shape) rank = input_shape.rank if not rank: raise ValueError( f""Input has undefined rank. Received: input_shape={input_shape}"" ) # Convert axis to list and resolve negatives if isinstance(axis, int): axis = [axis] else: axis = list(axis) for idx, x in enumerate(axis): if x < 0: axis[idx] = rank + x # Validate axes for x in axis: if x < 0 or x >= rank: raise ValueError( ""Invalid value for `axis` argument. "" ""Expected 0 <= axis < inputs.rank (with "" f""inputs.rank={rank}). Received: axis={tuple(axis)}"" ) if len(axis) != len(set(axis)): raise ValueError(f""Duplicate axis: {tuple(axis)}"") return axis ","Validate an axis value and returns its standardized form. Args: axis: Value to validate. Can be an integer or a list/tuple of integers. Integers may be negative. input_shape: Reference input shape that the axis/axes refer to. Returns: Normalized form of `axis`, i.e. a list with all-positive values. ",47,98,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def validate_axis(axis, input_shape): input_shape = tf.TensorShape(input_shape) rank = input_shape.rank if not rank: raise ValueError( f""Input has undefined rank. Received: input_shape={input_shape}"" ) # Convert axis to list and resolve negatives if isinstance(axis, int): axis = [axis] else: axis = list(axis) for idx, x in enumerate(axis): if x < 0: axis[idx] = rank + x # Validate axes for x in axis: if x < 0 or x >= rank: raise ValueError( ""Invalid value for `axis` argument. "" ""Expected 0 <= axis < inputs.rank (with "" f""inputs.rank={rank}). Received: axis={tuple(axis)}"" ) if len(axis) != len(set(axis)): raise ValueError(f""Duplicate axis: {tuple(axis)}"") return axis ``` ###Assistant : Validate an axis value and returns its standardized form. Args: axis: Value to validate. Can be an integer or a list/tuple of integers. Integers may be negative. input_shape: Reference input shape that the axis/axes refer to. Returns: Normalized form of `axis`, i.e. a list with all-positive values. " 1854,"def _filter_to(self, it, pred): buf = '' idx = 0 for i, t, v in it: if pred(t): if buf: yield idx, None, buf buf = '' yield i, t, v else: if not buf: idx = i buf += v if buf: yield idx, None, buf "," Keep only the tokens that match `pred`, merge the others together ",11,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _filter_to(self, it, pred): buf = '' idx = 0 for i, t, v in it: if pred(t): if buf: yield idx, None, buf buf = '' yield i, t, v else: if not buf: idx = i buf += v if buf: yield idx, None, buf ``` ###Assistant : Keep only the tokens that match `pred`, merge the others together " 1855,"def entity_registry_enabled_default(self) -> bool: return bool(self._config[CONF_ENABLED_BY_DEFAULT]) ",Return if the entity should be enabled when first added to the entity registry.,14,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def entity_registry_enabled_default(self) -> bool: return bool(self._config[CONF_ENABLED_BY_DEFAULT]) ``` ###Assistant : Return if the entity should be enabled when first added to the entity registry. " 1856,"def bisectors(self): # use lines containing sides so containment check during # intersection calculation can be avoided, thus reducing # the processing time for calculating the bisectors s = [Line(l) for l in self.sides] v = self.vertices c = self.incenter l1 = Segment(v[0], Line(v[0], c).intersection(s[1])[0]) l2 = Segment(v[1], Line(v[1], c).intersection(s[2])[0]) l3 = Segment(v[2], Line(v[2], c).intersection(s[0])[0]) return {v[0]: l1, v[1]: l2, v[2]: l3} ","The angle bisectors of the triangle. An angle bisector of a triangle is a straight line through a vertex which cuts the corresponding angle in half. Returns ======= bisectors : dict Each key is a vertex (Point) and each value is the corresponding bisector (Segment). See Also ======== sympy.geometry.point.Point, sympy.geometry.line.Segment Examples ======== >>> from sympy import Point, Triangle, Segment >>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1) >>> t = Triangle(p1, p2, p3) >>> from sympy import sqrt >>> t.bisectors()[p2] == Segment(Point(1, 0), Point(0, sqrt(2) - 1)) True ",91,62,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def bisectors(self): # use lines containing sides so containment check during # intersection calculation can be avoided, thus reducing # the processing time for calculating the bisectors s = [Line(l) for l in self.sides] v = self.vertices c = self.incenter l1 = Segment(v[0], Line(v[0], c).intersection(s[1])[0]) l2 = Segment(v[1], Line(v[1], c).intersection(s[2])[0]) l3 = Segment(v[2], Line(v[2], c).intersection(s[0])[0]) return {v[0]: l1, v[1]: l2, v[2]: l3} ``` ###Assistant : The angle bisectors of the triangle. An angle bisector of a triangle is a straight line through a vertex which cuts the corresponding angle in half. Returns ======= bisectors : dict Each key is a vertex (Point) and each value is the corresponding bisector (Segment). See Also ======== sympy.geometry.point.Point, sympy.geometry.line.Segment Examples ======== >>> from sympy import Point, Triangle, Segment >>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1) >>> t = Triangle(p1, p2, p3) >>> from sympy import sqrt >>> t.bisectors()[p2] == Segment(Point(1, 0), Point(0, sqrt(2) - 1)) True " 1857,"def query(query, filters={}, top_k_reader=5, top_k_retriever=5) -> Tuple[List[Dict[str, Any]], Dict[str, str]]: url = f""{API_ENDPOINT}/{DOC_REQUEST}"" params = {""filters"": filters, ""Retriever"": {""top_k"": top_k_retriever}, ""Reader"": {""top_k"": top_k_reader}} req = {""query"": query, ""params"": params} response_raw = requests.post(url, json=req) if response_raw.status_code >= 400 and response_raw.status_code != 503: raise Exception(f""{vars(response_raw)}"") response = response_raw.json() if ""errors"" in response: raise Exception("", "".join(response[""errors""])) # Format response results = [] answers = response[""answers""] for answer in answers: if answer.get(""answer"", None): results.append( { ""context"": ""..."" + answer[""context""] + ""..."", ""answer"": answer.get(""answer"", None), ""source"": answer[""meta""][""name""], ""relevance"": round(answer[""score""] * 100, 2), ""document"": [doc for doc in response[""documents""] if doc[""id""] == answer[""document_id""]][0], ""offset_start_in_doc"": answer[""offsets_in_document""][0][""start""], ""_raw"": answer, } ) else: results.append( { ""context"": None, ""answer"": None, ""document"": None, ""relevance"": round(answer[""score""] * 100, 2), ""_raw"": answer, } ) return results, response "," Send a query to the REST API and parse the answer. Returns both a ready-to-use representation of the results and the raw JSON. ",23,124,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def query(query, filters={}, top_k_reader=5, top_k_retriever=5) -> Tuple[List[Dict[str, Any]], Dict[str, str]]: url = f""{API_ENDPOINT}/{DOC_REQUEST}"" params = {""filters"": filters, ""Retriever"": {""top_k"": top_k_retriever}, ""Reader"": {""top_k"": top_k_reader}} req = {""query"": query, ""params"": params} response_raw = requests.post(url, json=req) if response_raw.status_code >= 400 and response_raw.status_code != 503: raise Exception(f""{vars(response_raw)}"") response = response_raw.json() if ""errors"" in response: raise Exception("", "".join(response[""errors""])) # Format response results = [] answers = response[""answers""] for answer in answers: if answer.get(""answer"", None): results.append( { ""context"": ""..."" + answer[""context""] + ""..."", ""answer"": answer.get(""answer"", None), ""source"": answer[""meta""][""name""], ""relevance"": round(answer[""score""] * 100, 2), ""document"": [doc for doc in response[""documents""] if doc[""id""] == answer[""document_id""]][0], ""offset_start_in_doc"": answer[""offsets_in_document""][0][""start""], ""_raw"": answer, } ) else: results.append( { ""context"": None, ""answer"": None, ""document"": None, ""relevance"": round(answer[""score""] * 100, 2), ""_raw"": answer, } ) return results, response ``` ###Assistant : Send a query to the REST API and parse the answer. Returns both a ready-to-use representation of the results and the raw JSON. " 1858,"def format_target_temperature(target_temperature): return str(round(float(target_temperature) * 2, 0) / 2).rstrip(""0"").rstrip(""."") ","Format target temperature to be sent to the Daikin unit, rounding to nearest half degree.",15,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def format_target_temperature(target_temperature): return str(round(float(target_temperature) * 2, 0) / 2).rstrip(""0"").rstrip(""."") ``` ###Assistant : Format target temperature to be sent to the Daikin unit, rounding to nearest half degree. " 1859,"def get_data(filters=None): data = [] emirates, amounts_by_emirate = append_vat_on_sales(data, filters) append_vat_on_expenses(data, filters) return data, emirates, amounts_by_emirate ",Returns the list of dictionaries. Each dictionary is a row in the datatable and chart data.,16,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_data(filters=None): data = [] emirates, amounts_by_emirate = append_vat_on_sales(data, filters) append_vat_on_expenses(data, filters) return data, emirates, amounts_by_emirate ``` ###Assistant : Returns the list of dictionaries. Each dictionary is a row in the datatable and chart data. " 1860,"def dict(self, *args, **kwargs): kwargs.setdefault(""exclude_none"", True) return super().dict(*args, **kwargs) ","Exclude `None` fields by default to comply with the OpenAPI spec. ",11,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dict(self, *args, **kwargs): kwargs.setdefault(""exclude_none"", True) return super().dict(*args, **kwargs) ``` ###Assistant : Exclude `None` fields by default to comply with the OpenAPI spec. " 1861,"def extract_column_names(self) -> Dict[str, Tuple[str, str]]: fields = [] for field in self.properties.keys(): if not is_airbyte_column(field): fields.append(field) result = {} field_names = set() for field in fields: field_name = self.name_transformer.normalize_column_name(field, in_jinja=False) field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name) jinja_name = self.name_transformer.normalize_column_name(field, in_jinja=True) if field_name_lookup in field_names: # TODO handle column name duplicates or collisions deterministically in this stream for i in range(1, 1000): field_name = self.name_transformer.normalize_column_name(f""{field}_{i}"", in_jinja=False) field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name) jinja_name = self.name_transformer.normalize_column_name(f""{field}_{i}"", in_jinja=True) if field_name_lookup not in field_names: break field_names.add(field_name_lookup) result[field] = (field_name, jinja_name) return result "," Generate a mapping of JSON properties to normalized SQL Column names, handling collisions and avoid duplicate names The mapped value to a field property is a tuple where: - the first value is the normalized ""raw"" column name - the second value is the normalized quoted column name to be used in jinja context ",54,83,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def extract_column_names(self) -> Dict[str, Tuple[str, str]]: fields = [] for field in self.properties.keys(): if not is_airbyte_column(field): fields.append(field) result = {} field_names = set() for field in fields: field_name = self.name_transformer.normalize_column_name(field, in_jinja=False) field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name) jinja_name = self.name_transformer.normalize_column_name(field, in_jinja=True) if field_name_lookup in field_names: # TODO handle column name duplicates or collisions deterministically in this stream for i in range(1, 1000): field_name = self.name_transformer.normalize_column_name(f""{field}_{i}"", in_jinja=False) field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name) jinja_name = self.name_transformer.normalize_column_name(f""{field}_{i}"", in_jinja=True) if field_name_lookup not in field_names: break field_names.add(field_name_lookup) result[field] = (field_name, jinja_name) return result ``` ###Assistant : Generate a mapping of JSON properties to normalized SQL Column names, handling collisions and avoid duplicate names The mapped value to a field property is a tuple where: - the first value is the normalized ""raw"" column name - the second value is the normalized quoted column name to be used in jinja context " 1862,"def _build_paths_from_predecessors(sources, target, pred): if target not in pred: raise nx.NetworkXNoPath(f""Target {target} cannot be reached from given sources"") seen = {target} stack = [[target, 0]] top = 0 while top >= 0: node, i = stack[top] if node in sources: yield [p for p, n in reversed(stack[: top + 1])] if len(pred[node]) > i: stack[top][1] = i + 1 next = pred[node][i] if next in seen: continue else: seen.add(next) top += 1 if top == len(stack): stack.append([next, 0]) else: stack[top][:] = [next, 0] else: seen.discard(node) top -= 1 ","Compute all simple paths to target, given the predecessors found in pred, terminating when any source in sources is found. Parameters ---------- sources : set Starting nodes for path. target : node Ending node for path. pred : dict A dictionary of predecessor lists, keyed by node Returns ------- paths : generator of lists A generator of all paths between source and target. Raises ------ NetworkXNoPath If `target` cannot be reached from `source`. Notes ----- There may be many paths between the sources and target. If there are cycles among the predecessors, this function will not produce all possible paths because doing so would produce infinitely many paths of unbounded length -- instead, we only produce simple paths. See Also -------- shortest_path single_source_shortest_path all_pairs_shortest_path all_shortest_paths bellman_ford_path ",126,88,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _build_paths_from_predecessors(sources, target, pred): if target not in pred: raise nx.NetworkXNoPath(f""Target {target} cannot be reached from given sources"") seen = {target} stack = [[target, 0]] top = 0 while top >= 0: node, i = stack[top] if node in sources: yield [p for p, n in reversed(stack[: top + 1])] if len(pred[node]) > i: stack[top][1] = i + 1 next = pred[node][i] if next in seen: continue else: seen.add(next) top += 1 if top == len(stack): stack.append([next, 0]) else: stack[top][:] = [next, 0] else: seen.discard(node) top -= 1 ``` ###Assistant : Compute all simple paths to target, given the predecessors found in pred, terminating when any source in sources is found. Parameters ---------- sources : set Starting nodes for path. target : node Ending node for path. pred : dict A dictionary of predecessor lists, keyed by node Returns ------- paths : generator of lists A generator of all paths between source and target. Raises ------ NetworkXNoPath If `target` cannot be reached from `source`. Notes ----- There may be many paths between the sources and target. If there are cycles among the predecessors, this function will not produce all possible paths because doing so would produce infinitely many paths of unbounded length -- instead, we only produce simple paths. See Also -------- shortest_path single_source_shortest_path all_pairs_shortest_path all_shortest_paths bellman_ford_path " 1863,"def is_connected(self) -> bool: return self._backend is not None and self._backend.is_connected ",Return True if the client is connected to a device.,10,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_connected(self) -> bool: return self._backend is not None and self._backend.is_connected ``` ###Assistant : Return True if the client is connected to a device. " 1864,"def _create_gnu_long_header(cls, name, type, encoding, errors): name = name.encode(encoding, errors) + NUL info = {} info[""name""] = ""././@LongLink"" info[""type""] = type info[""size""] = len(name) info[""magic""] = GNU_MAGIC # create extended header + name blocks. return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ cls._create_payload(name) ","Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence for name. ",8,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_gnu_long_header(cls, name, type, encoding, errors): name = name.encode(encoding, errors) + NUL info = {} info[""name""] = ""././@LongLink"" info[""type""] = type info[""size""] = len(name) info[""magic""] = GNU_MAGIC # create extended header + name blocks. return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ cls._create_payload(name) ``` ###Assistant : Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence for name. " 1865,"def genocchi_poly(n, x=None, polys=False): if n < 0: raise ValueError(""Cannot generate Genocchi polynomial of degree %s"" % (n-1)) poly = DMP(dup_genocchi(int(n), ZZ), ZZ) if x is not None: poly = Poly.new(poly, x) else: poly = PurePoly.new(poly, Dummy('x')) return poly if polys else poly.as_expr() ","Generates the Genocchi polynomial `\operatorname{G}_n(x)`. `\operatorname{G}_n(x)` is twice the difference between the plain and central Bernoulli polynomials, so has degree `n-1`: .. math :: \operatorname{G}_n(x) = 2 (\operatorname{B}_n(x) - \operatorname{B}_n^c(x)) The factor of 2 in the definition endows `\operatorname{G}_n(x)` with integer coefficients. Parameters ========== n : int Degree of the polynomial plus one. x : optional polys : bool, optional If True, return a Poly, otherwise (default) return an expression. ",70,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def genocchi_poly(n, x=None, polys=False): if n < 0: raise ValueError(""Cannot generate Genocchi polynomial of degree %s"" % (n-1)) poly = DMP(dup_genocchi(int(n), ZZ), ZZ) if x is not None: poly = Poly.new(poly, x) else: poly = PurePoly.new(poly, Dummy('x')) return poly if polys else poly.as_expr() ``` ###Assistant : Generates the Genocchi polynomial `\operatorname{G}_n(x)`. `\operatorname{G}_n(x)` is twice the difference between the plain and central Bernoulli polynomials, so has degree `n-1`: .. math :: \operatorname{G}_n(x) = 2 (\operatorname{B}_n(x) - \operatorname{B}_n^c(x)) The factor of 2 in the definition endows `\operatorname{G}_n(x)` with integer coefficients. Parameters ========== n : int Degree of the polynomial plus one. x : optional polys : bool, optional If True, return a Poly, otherwise (default) return an expression. " 1866,"def _triage_segments(window, nperseg, input_length): # parse window; if array like, then set nperseg = win.shape if isinstance(window, (str, tuple)): # if nperseg not specified if nperseg is None: nperseg = 256 # then change to default if nperseg > input_length: warnings.warn(f'nperseg = {nperseg} is greater than input length ' f' = {input_length}, using nperseg = {nperseg}') nperseg = input_length win = jnp.array(osp_signal.get_window(window, nperseg)) else: win = jnp.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') if input_length < win.shape[-1]: raise ValueError('window is longer than input signal') if nperseg is None: nperseg = win.shape[0] elif nperseg is not None: if nperseg != win.shape[0]: raise ValueError(""value specified for nperseg is different"" "" from length of window"") return win, nperseg "," Parses window and nperseg arguments for spectrogram and _spectral_helper. This is a helper function, not meant to be called externally. Parameters ---------- window : string, tuple, or ndarray If window is specified by a string or tuple and nperseg is not specified, nperseg is set to the default of 256 and returns a window of that length. If instead the window is array_like and nperseg is not specified, then nperseg is set to the length of the window. A ValueError is raised if the user supplies both an array_like window and a value for nperseg but nperseg does not equal the length of the window. nperseg : int Length of each segment input_length: int Length of input signal, i.e. x.shape[-1]. Used to test for errors. Returns ------- win : ndarray window. If function was called with string or tuple than this will hold the actual array used as a window. nperseg : int Length of each segment. If window is str or tuple, nperseg is set to 256. If window is array_like, nperseg is set to the length of the 6 window. ",182,118,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _triage_segments(window, nperseg, input_length): # parse window; if array like, then set nperseg = win.shape if isinstance(window, (str, tuple)): # if nperseg not specified if nperseg is None: nperseg = 256 # then change to default if nperseg > input_length: warnings.warn(f'nperseg = {nperseg} is greater than input length ' f' = {input_length}, using nperseg = {nperseg}') nperseg = input_length win = jnp.array(osp_signal.get_window(window, nperseg)) else: win = jnp.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') if input_length < win.shape[-1]: raise ValueError('window is longer than input signal') if nperseg is None: nperseg = win.shape[0] elif nperseg is not None: if nperseg != win.shape[0]: raise ValueError(""value specified for nperseg is different"" "" from length of window"") return win, nperseg ``` ###Assistant : Parses window and nperseg arguments for spectrogram and _spectral_helper. This is a helper function, not meant to be called externally. Parameters ---------- window : string, tuple, or ndarray If window is specified by a string or tuple and nperseg is not specified, nperseg is set to the default of 256 and returns a window of that length. If instead the window is array_like and nperseg is not specified, then nperseg is set to the length of the window. A ValueError is raised if the user supplies both an array_like window and a value for nperseg but nperseg does not equal the length of the window. nperseg : int Length of each segment input_length: int Length of input signal, i.e. x.shape[-1]. Used to test for errors. Returns ------- win : ndarray window. If function was called with string or tuple than this will hold the actual array used as a window. nperseg : int Length of each segment. If window is str or tuple, nperseg is set to 256. If window is array_like, nperseg is set to the length of the 6 window. " 1867,"def register_for_auto_class(cls, auto_class=""AutoModel""): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f""{auto_class} is not a valid auto class."") cls._auto_class = auto_class # To update the docstring, we need to copy the method, otherwise we change the original docstring. PreTrainedModel.push_to_hub = copy_func(PreTrainedModel.push_to_hub) PreTrainedModel.push_to_hub.__doc__ = PreTrainedModel.push_to_hub.__doc__.format( object=""model"", object_class=""AutoModel"", object_files=""model checkpoint"" ) "," Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class. Args: auto_class (`str` or `type`, *optional*, defaults to `""AutoModel""`): The auto class to register this new model with. ",47,57,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def register_for_auto_class(cls, auto_class=""AutoModel""): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f""{auto_class} is not a valid auto class."") cls._auto_class = auto_class # To update the docstring, we need to copy the method, otherwise we change the original docstring. PreTrainedModel.push_to_hub = copy_func(PreTrainedModel.push_to_hub) PreTrainedModel.push_to_hub.__doc__ = PreTrainedModel.push_to_hub.__doc__.format( object=""model"", object_class=""AutoModel"", object_files=""model checkpoint"" ) ``` ###Assistant : Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class. Args: auto_class (`str` or `type`, *optional*, defaults to `""AutoModel""`): The auto class to register this new model with. " 1868,"def is_accelerate_available(): return _accelerate_available # docstyle-ignore FLAX_IMPORT_ERROR = # docstyle-ignore INFLECT_IMPORT_ERROR = # docstyle-ignore PYTORCH_IMPORT_ERROR = # docstyle-ignore ONNX_IMPORT_ERROR = # docstyle-ignore SCIPY_IMPORT_ERROR = # docstyle-ignore TENSORFLOW_IMPORT_ERROR = # docstyle-ignore TRANSFORMERS_IMPORT_ERROR = # docstyle-ignore UNIDECODE_IMPORT_ERROR = BACKENDS_MAPPING = OrderedDict( [ (""flax"", (is_flax_available, FLAX_IMPORT_ERROR)), (""inflect"", (is_inflect_available, INFLECT_IMPORT_ERROR)), (""onnx"", (is_onnx_available, ONNX_IMPORT_ERROR)), (""scipy"", (is_scipy_available, SCIPY_IMPORT_ERROR)), (""tf"", (is_tf_available, TENSORFLOW_IMPORT_ERROR)), (""torch"", (is_torch_available, PYTORCH_IMPORT_ERROR)), (""transformers"", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)), (""unidecode"", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)), ] ) "," {0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the installation page: https://github.com/google/flax and follow the ones that match your environment. {0} requires the inflect library but it was not found in your environment. You can install it with pip: `pip install inflect` {0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. {0} requires the onnxruntime library but it was not found in your environment. You can install it with pip: `pip install onnxruntime` {0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install scipy` {0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the installation page: https://www.tensorflow.org/install and follow the ones that match your environment. {0} requires the transformers library but it was not found in your environment. You can install it with pip: `pip install transformers` {0} requires the unidecode library but it was not found in your environment. You can install it with pip: `pip install Unidecode` ",197,66,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_accelerate_available(): return _accelerate_available # docstyle-ignore FLAX_IMPORT_ERROR = # docstyle-ignore INFLECT_IMPORT_ERROR = # docstyle-ignore PYTORCH_IMPORT_ERROR = # docstyle-ignore ONNX_IMPORT_ERROR = # docstyle-ignore SCIPY_IMPORT_ERROR = # docstyle-ignore TENSORFLOW_IMPORT_ERROR = # docstyle-ignore TRANSFORMERS_IMPORT_ERROR = # docstyle-ignore UNIDECODE_IMPORT_ERROR = BACKENDS_MAPPING = OrderedDict( [ (""flax"", (is_flax_available, FLAX_IMPORT_ERROR)), (""inflect"", (is_inflect_available, INFLECT_IMPORT_ERROR)), (""onnx"", (is_onnx_available, ONNX_IMPORT_ERROR)), (""scipy"", (is_scipy_available, SCIPY_IMPORT_ERROR)), (""tf"", (is_tf_available, TENSORFLOW_IMPORT_ERROR)), (""torch"", (is_torch_available, PYTORCH_IMPORT_ERROR)), (""transformers"", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)), (""unidecode"", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)), ] ) ``` ###Assistant : {0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the installation page: https://github.com/google/flax and follow the ones that match your environment. {0} requires the inflect library but it was not found in your environment. You can install it with pip: `pip install inflect` {0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. {0} requires the onnxruntime library but it was not found in your environment. You can install it with pip: `pip install onnxruntime` {0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install scipy` {0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the installation page: https://www.tensorflow.org/install and follow the ones that match your environment. {0} requires the transformers library but it was not found in your environment. You can install it with pip: `pip install transformers` {0} requires the unidecode library but it was not found in your environment. You can install it with pip: `pip install Unidecode` " 1869,"def set_horizontalalignment(self, align): _api.check_in_list(['center', 'right', 'left'], align=align) self._horizontalalignment = align self.stale = True "," Set the horizontal alignment relative to the anchor point. See also :doc:`/gallery/text_labels_and_annotations/text_alignment`. Parameters ---------- align : {'left', 'center', 'right'} ",19,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_horizontalalignment(self, align): _api.check_in_list(['center', 'right', 'left'], align=align) self._horizontalalignment = align self.stale = True ``` ###Assistant : Set the horizontal alignment relative to the anchor point. See also :doc:`/gallery/text_labels_and_annotations/text_alignment`. Parameters ---------- align : {'left', 'center', 'right'} " 1870,"def _wrap_awaitable(awaitable): return (yield from awaitable.__await__()) _wrap_awaitable._is_coroutine = _is_coroutine ","Helper for asyncio.ensure_future(). Wraps awaitable (an object with __await__) into a coroutine that will later be wrapped in a Task by ensure_future(). ",22,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _wrap_awaitable(awaitable): return (yield from awaitable.__await__()) _wrap_awaitable._is_coroutine = _is_coroutine ``` ###Assistant : Helper for asyncio.ensure_future(). Wraps awaitable (an object with __await__) into a coroutine that will later be wrapped in a Task by ensure_future(). " 1871,"def normalize_file(file, separators=None): # Normalize path separators. if separators is None: separators = NORMALIZE_PATH_SEPS # Convert path object to string. norm_file = str(file) for sep in separators: norm_file = norm_file.replace(sep, posixpath.sep) # Remove current directory prefix. if norm_file.startswith('./'): norm_file = norm_file[2:] return norm_file "," Normalizes the file path to use the POSIX path separator (i.e., ``'/'``). *file* (:class:`str` or :class:`pathlib.PurePath`) is the file path. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. This does not need to include the POSIX path separator (``'/'``), but including it will not affect the results. Default is :data:`None` for :data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty container (e.g., an empty tuple ``()``). Returns the normalized file path (:class:`str`). ",75,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def normalize_file(file, separators=None): # Normalize path separators. if separators is None: separators = NORMALIZE_PATH_SEPS # Convert path object to string. norm_file = str(file) for sep in separators: norm_file = norm_file.replace(sep, posixpath.sep) # Remove current directory prefix. if norm_file.startswith('./'): norm_file = norm_file[2:] return norm_file ``` ###Assistant : Normalizes the file path to use the POSIX path separator (i.e., ``'/'``). *file* (:class:`str` or :class:`pathlib.PurePath`) is the file path. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. This does not need to include the POSIX path separator (``'/'``), but including it will not affect the results. Default is :data:`None` for :data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty container (e.g., an empty tuple ``()``). Returns the normalized file path (:class:`str`). " 1872,"def wildcard_types(self) -> List[str]: return [t for t, state_keys in self.types.items() if state_keys is None] ","Returns a list of event types which require us to fetch all state keys. This will be empty unless `has_wildcards` returns True. Returns: A list of event types. ",28,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def wildcard_types(self) -> List[str]: return [t for t, state_keys in self.types.items() if state_keys is None] ``` ###Assistant : Returns a list of event types which require us to fetch all state keys. This will be empty unless `has_wildcards` returns True. Returns: A list of event types. " 1873,"def get_columns(self, table_name) -> Response: q = f""SHOW COLUMNS IN TABLE {table_name};"" result = self.native_query(q) return result "," List the columns in the tabels for which the user have access ",12,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_columns(self, table_name) -> Response: q = f""SHOW COLUMNS IN TABLE {table_name};"" result = self.native_query(q) return result ``` ###Assistant : List the columns in the tabels for which the user have access " 1874,"def get_on_time_shipments(scorecard): supplier = frappe.get_doc(""Supplier"", scorecard.supplier) # Look up all PO Items with delivery dates between our dates total_items_delivered_on_time = frappe.db.sql( , {""supplier"": supplier.name, ""start_date"": scorecard.start_date, ""end_date"": scorecard.end_date}, as_dict=0, )[0][0] if not total_items_delivered_on_time: total_items_delivered_on_time = 0 return total_items_delivered_on_time ","Gets the number of late shipments (counting each item) in the period (based on Purchase Receipts vs POs) SELECT COUNT(pr_item.qty) FROM `tabPurchase Order Item` po_item, `tabPurchase Receipt Item` pr_item, `tabPurchase Order` po, `tabPurchase Receipt` pr WHERE po.supplier = %(supplier)s AND po_item.schedule_date BETWEEN %(start_date)s AND %(end_date)s AND po_item.schedule_date <= pr.posting_date AND po_item.qty = pr_item.qty AND pr_item.docstatus = 1 AND pr_item.purchase_order_item = po_item.name AND po_item.parent = po.name AND pr_item.parent = pr.name",69,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_on_time_shipments(scorecard): supplier = frappe.get_doc(""Supplier"", scorecard.supplier) # Look up all PO Items with delivery dates between our dates total_items_delivered_on_time = frappe.db.sql( , {""supplier"": supplier.name, ""start_date"": scorecard.start_date, ""end_date"": scorecard.end_date}, as_dict=0, )[0][0] if not total_items_delivered_on_time: total_items_delivered_on_time = 0 return total_items_delivered_on_time ``` ###Assistant : Gets the number of late shipments (counting each item) in the period (based on Purchase Receipts vs POs) SELECT COUNT(pr_item.qty) FROM `tabPurchase Order Item` po_item, `tabPurchase Receipt Item` pr_item, `tabPurchase Order` po, `tabPurchase Receipt` pr WHERE po.supplier = %(supplier)s AND po_item.schedule_date BETWEEN %(start_date)s AND %(end_date)s AND po_item.schedule_date <= pr.posting_date AND po_item.qty = pr_item.qty AND pr_item.docstatus = 1 AND pr_item.purchase_order_item = po_item.name AND po_item.parent = po.name AND pr_item.parent = pr.name " 1875,"def test_commit_comment_deleted(self) -> None: expected_message = self.check_webhook(""commit_comment_deleted"", TOPIC, expected_message) ",[hypro999](http://139.59.64.214:7990/users/hypro999) deleted their comment on [508d1b67f1f](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/commits/508d1b67f1f8f3a25f543a030a7a178894aa9907):\n~~~ quote\n~~Just an arbitrary comment on a commit. Nothing to see here...~~\n~~~,17,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_commit_comment_deleted(self) -> None: expected_message = self.check_webhook(""commit_comment_deleted"", TOPIC, expected_message) ``` ###Assistant : [hypro999](http://139.59.64.214:7990/users/hypro999) deleted their comment on [508d1b67f1f](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/commits/508d1b67f1f8f3a25f543a030a7a178894aa9907):\n~~~ quote\n~~Just an arbitrary comment on a commit. Nothing to see here...~~\n~~~ " 1876,"async def get_work_queues(self) -> Optional[UUID]: work_queues = [] for name in self.work_queues: try: # support IDs and names if isinstance(name, UUID): work_queue = await self.client.read_work_queue(id=name) else: work_queue = await self.client.read_work_queue_by_name(name) except ObjectNotFound: work_queue = await self.client.create_work_queue( name=name, return_id=False ) work_queues.append(work_queue) return work_queues "," Loads the work queue objects corresponding to the agent's target work queues. If any of them don't exist, they are created. ",21,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def get_work_queues(self) -> Optional[UUID]: work_queues = [] for name in self.work_queues: try: # support IDs and names if isinstance(name, UUID): work_queue = await self.client.read_work_queue(id=name) else: work_queue = await self.client.read_work_queue_by_name(name) except ObjectNotFound: work_queue = await self.client.create_work_queue( name=name, return_id=False ) work_queues.append(work_queue) return work_queues ``` ###Assistant : Loads the work queue objects corresponding to the agent's target work queues. If any of them don't exist, they are created. " 1877,"def test_read_nonexistent_stream_raises_exception(mocker): s1 = MockStream(name=""s1"") s2 = MockStream(name=""this_stream_doesnt_exist_in_the_source"") mocker.patch.object(MockStream, ""get_json_schema"", return_value={}) src = MockSource(streams=[s1]) catalog = ConfiguredAirbyteCatalog(streams=[_configured_stream(s2, SyncMode.full_refresh)]) with pytest.raises(KeyError): list(src.read(logger, {}, catalog)) GLOBAL_EMITTED_AT = 1 ",Tests that attempting to sync a stream which the source does not return from the `streams` method raises an exception,20,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_read_nonexistent_stream_raises_exception(mocker): s1 = MockStream(name=""s1"") s2 = MockStream(name=""this_stream_doesnt_exist_in_the_source"") mocker.patch.object(MockStream, ""get_json_schema"", return_value={}) src = MockSource(streams=[s1]) catalog = ConfiguredAirbyteCatalog(streams=[_configured_stream(s2, SyncMode.full_refresh)]) with pytest.raises(KeyError): list(src.read(logger, {}, catalog)) GLOBAL_EMITTED_AT = 1 ``` ###Assistant : Tests that attempting to sync a stream which the source does not return from the `streams` method raises an exception " 1878,"def _should_queue(self, link, referrer, rel): scheme, netloc, path, _, _, _ = urlparse(link) if path.endswith(self.source_extensions + self.binary_extensions + self.excluded_extensions): result = False elif self.skip_externals and not link.startswith(self.base_url): result = False elif not referrer.startswith(self.base_url): result = False elif rel not in ('homepage', 'download'): result = False elif scheme not in ('http', 'https', 'ftp'): result = False elif self._is_platform_dependent(link): result = False else: host = netloc.split(':', 1)[0] if host.lower() == 'localhost': result = False else: result = True logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, referrer, result) return result "," Determine whether a link URL from a referring page and with a particular ""rel"" attribute should be queued for scraping. ",20,89,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _should_queue(self, link, referrer, rel): scheme, netloc, path, _, _, _ = urlparse(link) if path.endswith(self.source_extensions + self.binary_extensions + self.excluded_extensions): result = False elif self.skip_externals and not link.startswith(self.base_url): result = False elif not referrer.startswith(self.base_url): result = False elif rel not in ('homepage', 'download'): result = False elif scheme not in ('http', 'https', 'ftp'): result = False elif self._is_platform_dependent(link): result = False else: host = netloc.split(':', 1)[0] if host.lower() == 'localhost': result = False else: result = True logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, referrer, result) return result ``` ###Assistant : Determine whether a link URL from a referring page and with a particular ""rel"" attribute should be queued for scraping. " 1879,"def receive(): header = _in_file.read(16) _logger.debug('Received command, header: [%s]', header) if header is None or len(header) < 16: # Pipe EOF encountered _logger.debug('Pipe EOF encountered') return None, None length = int(header[2:]) data = _in_file.read(length) command = CommandType(header[:2]) data = data.decode('utf8') _logger.debug('Received command, data: [%s]', data) return command, data ","Receive a command from Training Service. Returns a tuple of command (CommandType) and payload (str) ",15,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def receive(): header = _in_file.read(16) _logger.debug('Received command, header: [%s]', header) if header is None or len(header) < 16: # Pipe EOF encountered _logger.debug('Pipe EOF encountered') return None, None length = int(header[2:]) data = _in_file.read(length) command = CommandType(header[:2]) data = data.decode('utf8') _logger.debug('Received command, data: [%s]', data) return command, data ``` ###Assistant : Receive a command from Training Service. Returns a tuple of command (CommandType) and payload (str) " 1880,"def bettertitle(value): return ' '.join([w[0].upper() + w[1:] for w in value.split()]) @register.filter()"," Alternative to the builtin title(). Ensures that the first letter of each word is uppercase but retains the original case of all others. ",23,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def bettertitle(value): return ' '.join([w[0].upper() + w[1:] for w in value.split()]) @register.filter() ``` ###Assistant : Alternative to the builtin title(). Ensures that the first letter of each word is uppercase but retains the original case of all others. " 1881,"def load_pascal_annotation(index, pascal_root): classes = ('__background__', # always index 0 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') class_to_ind = dict(zip(classes, xrange(21))) filename = osp.join(pascal_root, 'Annotations', index + '.xml') # print 'Loading: {}'.format(filename) "," This code is borrowed from Ross Girshick's FAST-RCNN code (https://github.com/rbgirshick/fast-rcnn). It parses the PASCAL .xml metadata files. See publication for further details: (http://arxiv.org/abs/1504.08083). Thanks Ross! ",25,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_pascal_annotation(index, pascal_root): classes = ('__background__', # always index 0 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') class_to_ind = dict(zip(classes, xrange(21))) filename = osp.join(pascal_root, 'Annotations', index + '.xml') # print 'Loading: {}'.format(filename) ``` ###Assistant : This code is borrowed from Ross Girshick's FAST-RCNN code (https://github.com/rbgirshick/fast-rcnn). It parses the PASCAL .xml metadata files. See publication for further details: (http://arxiv.org/abs/1504.08083). Thanks Ross! " 1882,"def get(self): logger = get_logger() logger.debug(f""ENTER::Partition.get::{self._identity}"") if len(self.call_queue): self.drain_call_queue() result = UnidistWrapper.materialize(self._data) logger.debug(f""EXIT::Partition.get::{self._identity}"") return result "," Get the object wrapped by this partition out of the object store. Returns ------- pandas.DataFrame The object from the object store. ",21,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get(self): logger = get_logger() logger.debug(f""ENTER::Partition.get::{self._identity}"") if len(self.call_queue): self.drain_call_queue() result = UnidistWrapper.materialize(self._data) logger.debug(f""EXIT::Partition.get::{self._identity}"") return result ``` ###Assistant : Get the object wrapped by this partition out of the object store. Returns ------- pandas.DataFrame The object from the object store. " 1883,"def test_custom_function_action_no_perm_response(self): action_data = { ACTION_CHECKBOX_NAME: [self.s1.pk], ""action"": ""no_perm"", ""index"": 0, } response = self.client.post( reverse(""admin:admin_views_externalsubscriber_changelist""), action_data ) self.assertEqual(response.status_code, 403) self.assertEqual(response.content, b""No permission to perform this action"") ",A custom action may returns an HttpResponse with a 403 code.,11,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_custom_function_action_no_perm_response(self): action_data = { ACTION_CHECKBOX_NAME: [self.s1.pk], ""action"": ""no_perm"", ""index"": 0, } response = self.client.post( reverse(""admin:admin_views_externalsubscriber_changelist""), action_data ) self.assertEqual(response.status_code, 403) self.assertEqual(response.content, b""No permission to perform this action"") ``` ###Assistant : A custom action may returns an HttpResponse with a 403 code. " 1884,"def limit(self, *args): return self.applyfunc(lambda x: x.limit(*args)) # https://github.com/sympy/sympy/pull/12854","Calculate the limit of each element in the matrix. ``args`` will be passed to the ``limit`` function. Examples ======== >>> from sympy import Matrix >>> from sympy.abc import x, y >>> M = Matrix([[x, y], [1, 0]]) >>> M.limit(x, 2) Matrix([ [2, y], [1, 0]]) See Also ======== integrate diff ",50,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def limit(self, *args): return self.applyfunc(lambda x: x.limit(*args)) # https://github.com/sympy/sympy/pull/12854 ``` ###Assistant : Calculate the limit of each element in the matrix. ``args`` will be passed to the ``limit`` function. Examples ======== >>> from sympy import Matrix >>> from sympy.abc import x, y >>> M = Matrix([[x, y], [1, 0]]) >>> M.limit(x, 2) Matrix([ [2, y], [1, 0]]) See Also ======== integrate diff " 1885,"def test_device_classes_aligned(): non_numeric_device_classes = { SensorDeviceClass.DATE, SensorDeviceClass.DURATION, SensorDeviceClass.TIMESTAMP, } for device_class in SensorDeviceClass: if device_class in non_numeric_device_classes: continue assert hasattr(NumberDeviceClass, device_class.name) assert getattr(NumberDeviceClass, device_class.name).value == device_class.value ",Make sure all sensor device classes are also available in NumberDeviceClass.,11,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_device_classes_aligned(): non_numeric_device_classes = { SensorDeviceClass.DATE, SensorDeviceClass.DURATION, SensorDeviceClass.TIMESTAMP, } for device_class in SensorDeviceClass: if device_class in non_numeric_device_classes: continue assert hasattr(NumberDeviceClass, device_class.name) assert getattr(NumberDeviceClass, device_class.name).value == device_class.value ``` ###Assistant : Make sure all sensor device classes are also available in NumberDeviceClass. " 1886,"def valid_tess_config(outdir): cfg_file = outdir / 'test.cfg' with cfg_file.open('w') as f: f.write( ) yield cfg_file ","\ load_system_dawg 0 language_model_penalty_non_dict_word 0 language_model_penalty_non_freq_dict_word 0 ",7,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def valid_tess_config(outdir): cfg_file = outdir / 'test.cfg' with cfg_file.open('w') as f: f.write( ) yield cfg_file ``` ###Assistant : \ load_system_dawg 0 language_model_penalty_non_dict_word 0 language_model_penalty_non_freq_dict_word 0 " 1887,"def year_lookup_bounds_for_datetime_field(self, value, iso_year=False): if iso_year: first = datetime.datetime.fromisocalendar(value, 1, 1) second = datetime.datetime.fromisocalendar( value + 1, 1, 1 ) - datetime.timedelta(microseconds=1) else: first = datetime.datetime(value, 1, 1) second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999) if settings.USE_TZ: tz = timezone.get_current_timezone() first = timezone.make_aware(first, tz) second = timezone.make_aware(second, tz) first = self.adapt_datetimefield_value(first) second = self.adapt_datetimefield_value(second) return [first, second] "," Return a two-elements list with the lower and upper bound to be used with a BETWEEN operator to query a DateTimeField value using a year lookup. `value` is an int, containing the looked-up year. If `iso_year` is True, return bounds for ISO-8601 week-numbering years. ",44,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def year_lookup_bounds_for_datetime_field(self, value, iso_year=False): if iso_year: first = datetime.datetime.fromisocalendar(value, 1, 1) second = datetime.datetime.fromisocalendar( value + 1, 1, 1 ) - datetime.timedelta(microseconds=1) else: first = datetime.datetime(value, 1, 1) second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999) if settings.USE_TZ: tz = timezone.get_current_timezone() first = timezone.make_aware(first, tz) second = timezone.make_aware(second, tz) first = self.adapt_datetimefield_value(first) second = self.adapt_datetimefield_value(second) return [first, second] ``` ###Assistant : Return a two-elements list with the lower and upper bound to be used with a BETWEEN operator to query a DateTimeField value using a year lookup. `value` is an int, containing the looked-up year. If `iso_year` is True, return bounds for ISO-8601 week-numbering years. " 1888,"def gegenbauer_poly(n, a, x=None, polys=False): r return named_poly(n, dup_gegenbauer, None, ""Gegenbauer polynomial"", (x, a), polys) ","Generates the Gegenbauer polynomial `C_n^{(a)}(x)`. Parameters ========== n : int Degree of the polynomial. x : optional a Decides minimal domain for the list of coefficients. polys : bool, optional If True, return a Poly, otherwise (default) return an expression. ",40,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def gegenbauer_poly(n, a, x=None, polys=False): r return named_poly(n, dup_gegenbauer, None, ""Gegenbauer polynomial"", (x, a), polys) ``` ###Assistant : Generates the Gegenbauer polynomial `C_n^{(a)}(x)`. Parameters ========== n : int Degree of the polynomial. x : optional a Decides minimal domain for the list of coefficients. polys : bool, optional If True, return a Poly, otherwise (default) return an expression. " 1889,"async def async_update(self, now=None): if not self.pollable_characteristics: self.async_update_available_state() _LOGGER.debug( ""HomeKit connection not polling any characteristics: %s"", self.unique_id ) return if self._polling_lock.locked(): if not self._polling_lock_warned: _LOGGER.warning( ( ""HomeKit controller update skipped as previous poll still in"" "" flight: %s"" ), self.unique_id, ) self._polling_lock_warned = True return if self._polling_lock_warned: _LOGGER.info( ( ""HomeKit controller no longer detecting back pressure - not"" "" skipping poll: %s"" ), self.unique_id, ) self._polling_lock_warned = False ",Poll state of all entities attached to this bridge/accessory.,9,68,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_update(self, now=None): if not self.pollable_characteristics: self.async_update_available_state() _LOGGER.debug( ""HomeKit connection not polling any characteristics: %s"", self.unique_id ) return if self._polling_lock.locked(): if not self._polling_lock_warned: _LOGGER.warning( ( ""HomeKit controller update skipped as previous poll still in"" "" flight: %s"" ), self.unique_id, ) self._polling_lock_warned = True return if self._polling_lock_warned: _LOGGER.info( ( ""HomeKit controller no longer detecting back pressure - not"" "" skipping poll: %s"" ), self.unique_id, ) self._polling_lock_warned = False ``` ###Assistant : Poll state of all entities attached to this bridge/accessory. " 1890,"def _merge(self, start, end, left, right): # type: (int, int, int, int) -> Iterator[Tuple[int, int]] lslice, rslice = self._left[left:right], self._right[left:right] i = start = min([start]+lslice[:1]) end = max([end]+rslice[-1:]) for j, k in zip(lslice, rslice): if j > i: yield i, j-1 i = k + 1 if i <= end: yield i, end self._left[left:right], self._right[left:right] = [start], [end] ","Return an iterator of intervals to be fetched. Args: start (int): Start of needed interval end (int): End of needed interval left (int): Index of first overlapping downloaded data right (int): Index after last overlapping downloaded data ",37,58,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _merge(self, start, end, left, right): # type: (int, int, int, int) -> Iterator[Tuple[int, int]] lslice, rslice = self._left[left:right], self._right[left:right] i = start = min([start]+lslice[:1]) end = max([end]+rslice[-1:]) for j, k in zip(lslice, rslice): if j > i: yield i, j-1 i = k + 1 if i <= end: yield i, end self._left[left:right], self._right[left:right] = [start], [end] ``` ###Assistant : Return an iterator of intervals to be fetched. Args: start (int): Start of needed interval end (int): End of needed interval left (int): Index of first overlapping downloaded data right (int): Index after last overlapping downloaded data " 1891,"def is_python_identifier(self): # type: (str) -> bool # Ref: https://stackoverflow.com/a/55802320/595220 return bool(re.match(_VALID_IDENTIFIER_STRING_REGEX, self)) PB_EXTENSIONS = ('.yml', '.yaml') ",Determine whether the given string is a Python identifier.,9,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_python_identifier(self): # type: (str) -> bool # Ref: https://stackoverflow.com/a/55802320/595220 return bool(re.match(_VALID_IDENTIFIER_STRING_REGEX, self)) PB_EXTENSIONS = ('.yml', '.yaml') ``` ###Assistant : Determine whether the given string is a Python identifier. " 1892,"def _stream_response(self, start, end, base_headers=HEADERS): # type: (int, int, Dict[str, str]) -> Response headers = base_headers.copy() headers['Range'] = f'bytes={start}-{end}' # TODO: Get range requests to be correctly cached headers['Cache-Control'] = 'no-cache' return self._session.get(self._url, headers=headers, stream=True) ",Return HTTP response to a range request from start to end.,11,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _stream_response(self, start, end, base_headers=HEADERS): # type: (int, int, Dict[str, str]) -> Response headers = base_headers.copy() headers['Range'] = f'bytes={start}-{end}' # TODO: Get range requests to be correctly cached headers['Cache-Control'] = 'no-cache' return self._session.get(self._url, headers=headers, stream=True) ``` ###Assistant : Return HTTP response to a range request from start to end. " 1893,"def _inject_greasemonkey_scripts(self, scripts): if sip.isdeleted(self._widget): return # Since we are inserting scripts into a per-tab collection, # rather than just injecting scripts on page load, we need to # make sure we replace existing scripts, not just add new ones. # While, taking care not to remove any other scripts that might # have been added elsewhere, like the one for stylesheets. page_scripts = self._widget.page().scripts() self._remove_all_greasemonkey_scripts() seen_names = set() for script in scripts: while script.full_name() in seen_names: script.dedup_suffix += 1 seen_names.add(script.full_name()) new_script = QWebEngineScript() try: world = int(script.jsworld) if not 0 <= world <= qtutils.MAX_WORLD_ID: log.greasemonkey.error( f""script {script.name} has invalid value for '@qute-js-world'"" f"": {script.jsworld}, should be between 0 and "" f""{qtutils.MAX_WORLD_ID}"") continue except ValueError: try: world = _JS_WORLD_MAP[usertypes.JsWorld[script.jsworld.lower()]] except KeyError: log.greasemonkey.error( f""script {script.name} has invalid value for '@qute-js-world'"" f"": {script.jsworld}"") continue new_script.setWorldId(world) # Corresponds to ""@run-at document-end"" which is the default according to # https://wiki.greasespot.net/Metadata_Block#.40run-at - however, # QtWebEngine uses QWebEngineScript.InjectionPoint.Deferred (@run-at document-idle) as # default. # # NOTE that this needs to be done before setSourceCode, so that # QtWebEngine's parsing of GreaseMonkey tags will override it if there is a # @run-at comment. new_script.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady) new_script.setSourceCode(script.code()) new_script.setName(script.full_name()) new_script.setRunsOnSubFrames(script.runs_on_sub_frames) if script.needs_document_end_workaround(): log.greasemonkey.debug( f""Forcing @run-at document-end for {script.name}"") new_script.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady) log.greasemonkey.debug(f'adding script: {new_script.name()}') page_scripts.insert(new_script) ","Register user JavaScript files with the current tab. Args: scripts: A list of GreasemonkeyScripts. ",14,203,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _inject_greasemonkey_scripts(self, scripts): if sip.isdeleted(self._widget): return # Since we are inserting scripts into a per-tab collection, # rather than just injecting scripts on page load, we need to # make sure we replace existing scripts, not just add new ones. # While, taking care not to remove any other scripts that might # have been added elsewhere, like the one for stylesheets. page_scripts = self._widget.page().scripts() self._remove_all_greasemonkey_scripts() seen_names = set() for script in scripts: while script.full_name() in seen_names: script.dedup_suffix += 1 seen_names.add(script.full_name()) new_script = QWebEngineScript() try: world = int(script.jsworld) if not 0 <= world <= qtutils.MAX_WORLD_ID: log.greasemonkey.error( f""script {script.name} has invalid value for '@qute-js-world'"" f"": {script.jsworld}, should be between 0 and "" f""{qtutils.MAX_WORLD_ID}"") continue except ValueError: try: world = _JS_WORLD_MAP[usertypes.JsWorld[script.jsworld.lower()]] except KeyError: log.greasemonkey.error( f""script {script.name} has invalid value for '@qute-js-world'"" f"": {script.jsworld}"") continue new_script.setWorldId(world) # Corresponds to ""@run-at document-end"" which is the default according to # https://wiki.greasespot.net/Metadata_Block#.40run-at - however, # QtWebEngine uses QWebEngineScript.InjectionPoint.Deferred (@run-at document-idle) as # default. # # NOTE that this needs to be done before setSourceCode, so that # QtWebEngine's parsing of GreaseMonkey tags will override it if there is a # @run-at comment. new_script.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady) new_script.setSourceCode(script.code()) new_script.setName(script.full_name()) new_script.setRunsOnSubFrames(script.runs_on_sub_frames) if script.needs_document_end_workaround(): log.greasemonkey.debug( f""Forcing @run-at document-end for {script.name}"") new_script.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady) log.greasemonkey.debug(f'adding script: {new_script.name()}') page_scripts.insert(new_script) ``` ###Assistant : Register user JavaScript files with the current tab. Args: scripts: A list of GreasemonkeyScripts. " 1894,"def config(self) -> dict: global _CONFIG # pylint: disable=global-statement if not _CONFIG: model_name = self._config_section logger.debug(""Loading config for: %s"", model_name) _CONFIG = Config(model_name, configfile=self._configfile).config_dict return _CONFIG "," dict: The configuration dictionary for current plugin, as set by the user's configuration settings. ",14,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def config(self) -> dict: global _CONFIG # pylint: disable=global-statement if not _CONFIG: model_name = self._config_section logger.debug(""Loading config for: %s"", model_name) _CONFIG = Config(model_name, configfile=self._configfile).config_dict return _CONFIG ``` ###Assistant : dict: The configuration dictionary for current plugin, as set by the user's configuration settings. " 1895,"def execute(): frappe.reload_doc(""stock"", ""doctype"", ""stock_entry"") if frappe.db.has_column(""Stock Entry"", ""add_to_transit""): frappe.db.sql( ) frappe.db.sql( ) frappe.reload_doc(""stock"", ""doctype"", ""warehouse_type"") if not frappe.db.exists(""Warehouse Type"", ""Transit""): doc = frappe.new_doc(""Warehouse Type"") doc.name = ""Transit"" doc.insert() frappe.reload_doc(""stock"", ""doctype"", ""stock_entry_type"") frappe.delete_doc_if_exists(""Stock Entry Type"", ""Send to Warehouse"") frappe.delete_doc_if_exists(""Stock Entry Type"", ""Receive at Warehouse"") "," UPDATE `tabStock Entry` SET stock_entry_type = 'Material Transfer', purpose = 'Material Transfer', add_to_transit = 1 WHERE stock_entry_type = 'Send to Warehouse' UPDATE `tabStock Entry` SET stock_entry_type = 'Material Transfer', purpose = 'Material Transfer' WHERE stock_entry_type = 'Receive at Warehouse' ",39,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def execute(): frappe.reload_doc(""stock"", ""doctype"", ""stock_entry"") if frappe.db.has_column(""Stock Entry"", ""add_to_transit""): frappe.db.sql( ) frappe.db.sql( ) frappe.reload_doc(""stock"", ""doctype"", ""warehouse_type"") if not frappe.db.exists(""Warehouse Type"", ""Transit""): doc = frappe.new_doc(""Warehouse Type"") doc.name = ""Transit"" doc.insert() frappe.reload_doc(""stock"", ""doctype"", ""stock_entry_type"") frappe.delete_doc_if_exists(""Stock Entry Type"", ""Send to Warehouse"") frappe.delete_doc_if_exists(""Stock Entry Type"", ""Receive at Warehouse"") ``` ###Assistant : UPDATE `tabStock Entry` SET stock_entry_type = 'Material Transfer', purpose = 'Material Transfer', add_to_transit = 1 WHERE stock_entry_type = 'Send to Warehouse' UPDATE `tabStock Entry` SET stock_entry_type = 'Material Transfer', purpose = 'Material Transfer' WHERE stock_entry_type = 'Receive at Warehouse' " 1896,"def query_task(doctype, txt, searchfield, start, page_len, filters): from frappe.desk.reportview import build_match_conditions search_string = ""%%%s%%"" % txt order_by_string = ""%s%%"" % txt match_conditions = build_match_conditions(""Task"") match_conditions = (""and"" + match_conditions) if match_conditions else """" return frappe.db.sql( % (searchfield, ""%s"", ""%s"", match_conditions, ""%s"", searchfield, ""%s"", searchfield, ""%s"", ""%s""), (search_string, search_string, order_by_string, order_by_string, start, page_len), ) ","select name, subject from `tabTask` where (`%s` like %s or `subject` like %s) %s order by case when `subject` like %s then 0 else 1 end, case when `%s` like %s then 0 else 1 end, `%s`, subject limit %s, %s",41,53,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def query_task(doctype, txt, searchfield, start, page_len, filters): from frappe.desk.reportview import build_match_conditions search_string = ""%%%s%%"" % txt order_by_string = ""%s%%"" % txt match_conditions = build_match_conditions(""Task"") match_conditions = (""and"" + match_conditions) if match_conditions else """" return frappe.db.sql( % (searchfield, ""%s"", ""%s"", match_conditions, ""%s"", searchfield, ""%s"", searchfield, ""%s"", ""%s""), (search_string, search_string, order_by_string, order_by_string, start, page_len), ) ``` ###Assistant : select name, subject from `tabTask` where (`%s` like %s or `subject` like %s) %s order by case when `subject` like %s then 0 else 1 end, case when `%s` like %s then 0 else 1 end, `%s`, subject limit %s, %s " 1897,"def _parse_name(self, name): if name.endswith(""_float32_vars""): error_msg = ( ""Policies ending in '_float32_vars' have been removed "" ""from TensorFlow."" ) if name in (""infer_float32_vars"", ""infer_with_float32_vars""): error_msg += ( "" Please use the 'mixed_float16' or 'mixed_bfloat16' "" ""policy instead."" ) elif name == ""float16_with_float32_vars"": error_msg += "" Please use the 'mixed_float16' policy instead."" elif name == ""bfloat16_with_float32_vars"": error_msg += "" Please use the 'mixed_bfloat16' policy instead."" error_msg += "" Got policy name: '%s'"" % name raise ValueError(error_msg) if name == ""mixed_float16"": return ""float16"", ""float32"" elif name == ""mixed_bfloat16"": return ""bfloat16"", ""float32"" elif name == ""_infer"": # The ""_infer"" policy exists only for compatibility with TF 1, where # ""_infer"" is the default. The behavior matches the behavior of TF 1's # behavior before policies were introduced. With ""_infer"", the computation # and variable dtype are inferred from the first input the first time the # layer is called. Once the layer is called for the first time, the # layer's policy will change to the dtype of the first input, and it will # no longer have the ""_infer"" policy. # # The infer policy should be considered an implementation detail and may # be removed in the future. return None, None try: dtype = tf.as_dtype(name).name except TypeError: error = ( ""Cannot convert value %s to a mixed precision Policy. "" ""Valid policies include 'mixed_float16', 'mixed_bfloat16', "" ""and the name of any dtype such as 'float32'."" % (name,) ) raise ValueError(error) return dtype, dtype ","Parses a Policy name into a compute and variable dtype. Args: name: The name of the policy: Returns: The (compute_dtype, variable_dtype) pair. ",22,242,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _parse_name(self, name): if name.endswith(""_float32_vars""): error_msg = ( ""Policies ending in '_float32_vars' have been removed "" ""from TensorFlow."" ) if name in (""infer_float32_vars"", ""infer_with_float32_vars""): error_msg += ( "" Please use the 'mixed_float16' or 'mixed_bfloat16' "" ""policy instead."" ) elif name == ""float16_with_float32_vars"": error_msg += "" Please use the 'mixed_float16' policy instead."" elif name == ""bfloat16_with_float32_vars"": error_msg += "" Please use the 'mixed_bfloat16' policy instead."" error_msg += "" Got policy name: '%s'"" % name raise ValueError(error_msg) if name == ""mixed_float16"": return ""float16"", ""float32"" elif name == ""mixed_bfloat16"": return ""bfloat16"", ""float32"" elif name == ""_infer"": # The ""_infer"" policy exists only for compatibility with TF 1, where # ""_infer"" is the default. The behavior matches the behavior of TF 1's # behavior before policies were introduced. With ""_infer"", the computation # and variable dtype are inferred from the first input the first time the # layer is called. Once the layer is called for the first time, the # layer's policy will change to the dtype of the first input, and it will # no longer have the ""_infer"" policy. # # The infer policy should be considered an implementation detail and may # be removed in the future. return None, None try: dtype = tf.as_dtype(name).name except TypeError: error = ( ""Cannot convert value %s to a mixed precision Policy. "" ""Valid policies include 'mixed_float16', 'mixed_bfloat16', "" ""and the name of any dtype such as 'float32'."" % (name,) ) raise ValueError(error) return dtype, dtype ``` ###Assistant : Parses a Policy name into a compute and variable dtype. Args: name: The name of the policy: Returns: The (compute_dtype, variable_dtype) pair. " 1898,"def reraise_exceptions_as_crashes(): try: yield except BaseException as exc: state = exception_to_crashed_state(exc) raise Crash(message=state.message, cause=exc, state=state) from exc "," Detect crashes during this context, wrapping unexpected exceptions into `Crash` signals. ",11,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def reraise_exceptions_as_crashes(): try: yield except BaseException as exc: state = exception_to_crashed_state(exc) raise Crash(message=state.message, cause=exc, state=state) from exc ``` ###Assistant : Detect crashes during this context, wrapping unexpected exceptions into `Crash` signals. " 1899,"def resoure_check(): MINIMUM_ALLOWED_MEMORY = 4 MINIMUM_ALLOWED_CPUS = 2 MINIMUM_ALLOWED_DISK = 20 print(""\nChecking resources.\n"") # Memory current available svmem = psutil.virtual_memory() mem_available = get_size(svmem.available) # Cpus current available cpus_available = psutil.cpu_count(logical=True) # Disk current available partitions = psutil.disk_partitions() partition_usage = psutil.disk_usage(partitions[0].mountpoint) disk_available = get_size(partition_usage.free) resources: Dict[str, Resource] = { 'Memory': Resource(current=mem_available, minimumAllowed=MINIMUM_ALLOWED_MEMORY), 'Cpus': Resource(current=cpus_available, minimumAllowed=MINIMUM_ALLOWED_CPUS), 'Disk': Resource(current=disk_available, minimumAllowed=MINIMUM_ALLOWED_DISK), } return resources "," Use gsutil to get resources in bytes for memory and disk ",11,60,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def resoure_check(): MINIMUM_ALLOWED_MEMORY = 4 MINIMUM_ALLOWED_CPUS = 2 MINIMUM_ALLOWED_DISK = 20 print(""\nChecking resources.\n"") # Memory current available svmem = psutil.virtual_memory() mem_available = get_size(svmem.available) # Cpus current available cpus_available = psutil.cpu_count(logical=True) # Disk current available partitions = psutil.disk_partitions() partition_usage = psutil.disk_usage(partitions[0].mountpoint) disk_available = get_size(partition_usage.free) resources: Dict[str, Resource] = { 'Memory': Resource(current=mem_available, minimumAllowed=MINIMUM_ALLOWED_MEMORY), 'Cpus': Resource(current=cpus_available, minimumAllowed=MINIMUM_ALLOWED_CPUS), 'Disk': Resource(current=disk_available, minimumAllowed=MINIMUM_ALLOWED_DISK), } return resources ``` ###Assistant : Use gsutil to get resources in bytes for memory and disk " 1900,"def _pi_coeff(arg, cycles=1): r arg = sympify(arg) if arg is pi: return S.One elif not arg: return S.Zero elif arg.is_Mul: cx = arg.coeff(pi) if cx: c, x = cx.as_coeff_Mul() # pi is not included as coeff if c.is_Float: # recast exact binary fractions to Rationals f = abs(c) % 1 if f != 0: p = -int(round(log(f, 2).evalf())) m = 2**p cm = c*m i = int(cm) if i == cm: c = Rational(i, m) cx = c*x else: c = Rational(int(c)) cx = c*x if x.is_integer: c2 = c % 2 if c2 == 1: return x elif not c2: if x.is_even is not None: # known parity return S.Zero return Integer(2) else: return c2*x return cx elif arg.is_zero: return S.Zero "," When arg is a Number times $\pi$ (e.g. $3\pi/2$) then return the Number normalized to be in the range $[0, 2]$, else `None`. When an even multiple of $\pi$ is encountered, if it is multiplying something with known parity then the multiple is returned as 0 otherwise as 2. Examples ======== >>> from sympy.functions.elementary.trigonometric import _pi_coeff >>> from sympy import pi, Dummy >>> from sympy.abc import x >>> _pi_coeff(3*x*pi) 3*x >>> _pi_coeff(11*pi/7) 11/7 >>> _pi_coeff(-11*pi/7) 3/7 >>> _pi_coeff(4*pi) 0 >>> _pi_coeff(5*pi) 1 >>> _pi_coeff(5.0*pi) 1 >>> _pi_coeff(5.5*pi) 3/2 >>> _pi_coeff(2 + pi) >>> _pi_coeff(2*Dummy(integer=True)*pi) 2 >>> _pi_coeff(2*Dummy(even=True)*pi) 0 ",98,122,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _pi_coeff(arg, cycles=1): r arg = sympify(arg) if arg is pi: return S.One elif not arg: return S.Zero elif arg.is_Mul: cx = arg.coeff(pi) if cx: c, x = cx.as_coeff_Mul() # pi is not included as coeff if c.is_Float: # recast exact binary fractions to Rationals f = abs(c) % 1 if f != 0: p = -int(round(log(f, 2).evalf())) m = 2**p cm = c*m i = int(cm) if i == cm: c = Rational(i, m) cx = c*x else: c = Rational(int(c)) cx = c*x if x.is_integer: c2 = c % 2 if c2 == 1: return x elif not c2: if x.is_even is not None: # known parity return S.Zero return Integer(2) else: return c2*x return cx elif arg.is_zero: return S.Zero ``` ###Assistant : When arg is a Number times $\pi$ (e.g. $3\pi/2$) then return the Number normalized to be in the range $[0, 2]$, else `None`. When an even multiple of $\pi$ is encountered, if it is multiplying something with known parity then the multiple is returned as 0 otherwise as 2. Examples ======== >>> from sympy.functions.elementary.trigonometric import _pi_coeff >>> from sympy import pi, Dummy >>> from sympy.abc import x >>> _pi_coeff(3*x*pi) 3*x >>> _pi_coeff(11*pi/7) 11/7 >>> _pi_coeff(-11*pi/7) 3/7 >>> _pi_coeff(4*pi) 0 >>> _pi_coeff(5*pi) 1 >>> _pi_coeff(5.0*pi) 1 >>> _pi_coeff(5.5*pi) 3/2 >>> _pi_coeff(2 + pi) >>> _pi_coeff(2*Dummy(integer=True)*pi) 2 >>> _pi_coeff(2*Dummy(even=True)*pi) 0 " 1901,"async def async_test_still(hass, info) -> tuple[dict[str, str], str | None]: fmt = None if not (url := info.get(CONF_STILL_IMAGE_URL)): return {}, None if not isinstance(url, template_helper.Template) and url: url = cv.template(url) url.hass = hass try: url = url.async_render(parse_result=False) except TemplateError as err: _LOGGER.error(""Error parsing template %s: %s"", url, err) return {CONF_STILL_IMAGE_URL: ""template_error""}, None verify_ssl = info.get(CONF_VERIFY_SSL) auth = generate_auth(info) try: async_client = get_async_client(hass, verify_ssl=verify_ssl)",Verify that the still image is valid before we create an entity.,12,63,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_test_still(hass, info) -> tuple[dict[str, str], str | None]: fmt = None if not (url := info.get(CONF_STILL_IMAGE_URL)): return {}, None if not isinstance(url, template_helper.Template) and url: url = cv.template(url) url.hass = hass try: url = url.async_render(parse_result=False) except TemplateError as err: _LOGGER.error(""Error parsing template %s: %s"", url, err) return {CONF_STILL_IMAGE_URL: ""template_error""}, None verify_ssl = info.get(CONF_VERIFY_SSL) auth = generate_auth(info) try: async_client = get_async_client(hass, verify_ssl=verify_ssl) ``` ###Assistant : Verify that the still image is valid before we create an entity. " 1902,"def normalize(self, a): a = _convert_other(a, raiseit=True) return a.normalize(context=self) ","normalize reduces an operand to its simplest form. Essentially a plus operation with all trailing zeros removed from the result. >>> ExtendedContext.normalize(Decimal('2.1')) Decimal('2.1') >>> ExtendedContext.normalize(Decimal('-2.0')) Decimal('-2') >>> ExtendedContext.normalize(Decimal('1.200')) Decimal('1.2') >>> ExtendedContext.normalize(Decimal('-120')) Decimal('-1.2E+2') >>> ExtendedContext.normalize(Decimal('120.00')) Decimal('1.2E+2') >>> ExtendedContext.normalize(Decimal('0.00')) Decimal('0') >>> ExtendedContext.normalize(6) Decimal('6') ",41,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def normalize(self, a): a = _convert_other(a, raiseit=True) return a.normalize(context=self) ``` ###Assistant : normalize reduces an operand to its simplest form. Essentially a plus operation with all trailing zeros removed from the result. >>> ExtendedContext.normalize(Decimal('2.1')) Decimal('2.1') >>> ExtendedContext.normalize(Decimal('-2.0')) Decimal('-2') >>> ExtendedContext.normalize(Decimal('1.200')) Decimal('1.2') >>> ExtendedContext.normalize(Decimal('-120')) Decimal('-1.2E+2') >>> ExtendedContext.normalize(Decimal('120.00')) Decimal('1.2E+2') >>> ExtendedContext.normalize(Decimal('0.00')) Decimal('0') >>> ExtendedContext.normalize(6) Decimal('6') " 1903,"def get_tables(self) -> StatusResponse: query = result = self.native_query(query) df = result.data_frame df = df[['TABLE_NAME' 'TABLE_TYPE']] result.data_frame = df.rename(columns={'TABLE_NAME': 'table_name', 'TABLE_TYPE': 'table_type'}) return result "," Return list of entities that will be accessible as tables. Returns: HandlerResponse SELECT * FROM INFORMATION_SCHEMA.TABLES ",16,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_tables(self) -> StatusResponse: query = result = self.native_query(query) df = result.data_frame df = df[['TABLE_NAME' 'TABLE_TYPE']] result.data_frame = df.rename(columns={'TABLE_NAME': 'table_name', 'TABLE_TYPE': 'table_type'}) return result ``` ###Assistant : Return list of entities that will be accessible as tables. Returns: HandlerResponse SELECT * FROM INFORMATION_SCHEMA.TABLES " 1904,"def get_expected_values(self, image_inputs, batched=False): if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size else: h, w = image.shape[1], image.shape[2] scale = self.size / min(w, h) if h < w: newh, neww = self.size, scale * w else: newh, neww = scale * h, self.size max_size = int((1333 / 800) * self.size) if max(newh, neww) > max_size: scale = max_size / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) expected_height, expected_width = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width @require_torch @require_vision"," This function computes the expected height and width when providing images to ViltFeatureExtractor, assuming do_resize is set to True with a scalar size and size_divisor. ",25,131,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_expected_values(self, image_inputs, batched=False): if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size else: h, w = image.shape[1], image.shape[2] scale = self.size / min(w, h) if h < w: newh, neww = self.size, scale * w else: newh, neww = scale * h, self.size max_size = int((1333 / 800) * self.size) if max(newh, neww) > max_size: scale = max_size / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) expected_height, expected_width = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width @require_torch @require_vision ``` ###Assistant : This function computes the expected height and width when providing images to ViltFeatureExtractor, assuming do_resize is set to True with a scalar size and size_divisor. " 1905,"def preprocss_testing_data(self, data): num_augs = len(data[0]['img']) batch_size = len(data) aug_batch_imgs = [] aug_batch_data_samples = [] # adjust `images` and `data_samples` to a list of list # outer list is test-time augmentation and inter list # is batch dimension for aug_index in range(num_augs): batch_imgs = [] batch_data_samples = [] for batch_index in range(batch_size): single_img = data[batch_index]['img'][aug_index] # to gpu and normalize single_img = single_img.to(self.device) if self.to_rgb and single_img[0].size(0) == 3: single_img = single_img[[2, 1, 0], ...] single_img = (single_img - self.pixel_mean) / self.pixel_std batch_imgs.append(single_img) batch_data_samples.append( data[batch_index]['data_sample'][aug_index]) aug_batch_imgs.append(stack_batch(batch_imgs)) aug_batch_data_samples.append(batch_data_samples) return aug_batch_imgs, aug_batch_data_samples "," Process input data during training and testing phases. Args: data (list[dict]): The data to be processed, which comes from dataloader. The list indicate the batch dimension. Each dict contains these keys: - `img` (list[Tensor]): Image tensor with different test-time augmentation. - `data_sample` (list[:obj:`GeneralData`]): Meta information and annotations under different test-time augmentation. Returns: tuple: It should contain 2 items. - aug_batch_imgs (list[Tensor]): List of batch image tensor. The list indicate the test-time augmentations. Note that the batch size always is 1 when do the augtest. - aug_batch_data_samples (list[list[:obj:`GeneralData`]], Optional): The Data Samples. It usually includes information such as `gt_instance`. Return None If the input datas does not contain `data_sample`. The outer list indicate the number of augmentations and inter list indicate the batch dimension. ",123,90,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def preprocss_testing_data(self, data): num_augs = len(data[0]['img']) batch_size = len(data) aug_batch_imgs = [] aug_batch_data_samples = [] # adjust `images` and `data_samples` to a list of list # outer list is test-time augmentation and inter list # is batch dimension for aug_index in range(num_augs): batch_imgs = [] batch_data_samples = [] for batch_index in range(batch_size): single_img = data[batch_index]['img'][aug_index] # to gpu and normalize single_img = single_img.to(self.device) if self.to_rgb and single_img[0].size(0) == 3: single_img = single_img[[2, 1, 0], ...] single_img = (single_img - self.pixel_mean) / self.pixel_std batch_imgs.append(single_img) batch_data_samples.append( data[batch_index]['data_sample'][aug_index]) aug_batch_imgs.append(stack_batch(batch_imgs)) aug_batch_data_samples.append(batch_data_samples) return aug_batch_imgs, aug_batch_data_samples ``` ###Assistant : Process input data during training and testing phases. Args: data (list[dict]): The data to be processed, which comes from dataloader. The list indicate the batch dimension. Each dict contains these keys: - `img` (list[Tensor]): Image tensor with different test-time augmentation. - `data_sample` (list[:obj:`GeneralData`]): Meta information and annotations under different test-time augmentation. Returns: tuple: It should contain 2 items. - aug_batch_imgs (list[Tensor]): List of batch image tensor. The list indicate the test-time augmentations. Note that the batch size always is 1 when do the augtest. - aug_batch_data_samples (list[list[:obj:`GeneralData`]], Optional): The Data Samples. It usually includes information such as `gt_instance`. Return None If the input datas does not contain `data_sample`. The outer list indicate the number of augmentations and inter list indicate the batch dimension. " 1906,"def list_templates() -> List[pathlib.Path]: return (pathlib.Path(__file__).parent / ""templates"").glob(""*.html.j2"") ","List the available HTML templates. Returns: List[pathlib.Path]: A list of files with .html.j2 extensions inside ./templates/ ",16,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def list_templates() -> List[pathlib.Path]: return (pathlib.Path(__file__).parent / ""templates"").glob(""*.html.j2"") ``` ###Assistant : List the available HTML templates. Returns: List[pathlib.Path]: A list of files with .html.j2 extensions inside ./templates/ " 1907,"def usable_pip_file(path): # type: (t.Optional[str]) -> bool return bool(path) and os.path.exists(path) and bool(os.path.getsize(path)) # Cryptography ","Return True if the specified pip file is usable, otherwise False.",11,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def usable_pip_file(path): # type: (t.Optional[str]) -> bool return bool(path) and os.path.exists(path) and bool(os.path.getsize(path)) # Cryptography ``` ###Assistant : Return True if the specified pip file is usable, otherwise False. " 1908,"def binary_accuracy(y_true, y_pred, threshold=0.5): y_pred = tf.convert_to_tensor(y_pred) threshold = tf.cast(threshold, y_pred.dtype) y_pred = tf.cast(y_pred > threshold, y_pred.dtype) return backend.mean(tf.equal(y_true, y_pred), axis=-1) @keras_export('keras.metrics.categorical_accuracy') @tf.__internal__.dispatch.add_dispatch_support","Calculates how often predictions match binary labels. Standalone usage: >>> y_true = [[1], [1], [0], [0]] >>> y_pred = [[1], [1], [0], [0]] >>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred) >>> assert m.shape == (4,) >>> m.numpy() array([1., 1., 1., 1.], dtype=float32) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. threshold: (Optional) Float representing the threshold for deciding whether prediction values are 1 or 0. Returns: Binary accuracy values. shape = `[batch_size, d0, .. dN-1]` ",86,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def binary_accuracy(y_true, y_pred, threshold=0.5): y_pred = tf.convert_to_tensor(y_pred) threshold = tf.cast(threshold, y_pred.dtype) y_pred = tf.cast(y_pred > threshold, y_pred.dtype) return backend.mean(tf.equal(y_true, y_pred), axis=-1) @keras_export('keras.metrics.categorical_accuracy') @tf.__internal__.dispatch.add_dispatch_support ``` ###Assistant : Calculates how often predictions match binary labels. Standalone usage: >>> y_true = [[1], [1], [0], [0]] >>> y_pred = [[1], [1], [0], [0]] >>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred) >>> assert m.shape == (4,) >>> m.numpy() array([1., 1., 1., 1.], dtype=float32) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. threshold: (Optional) Float representing the threshold for deciding whether prediction values are 1 or 0. Returns: Binary accuracy values. shape = `[batch_size, d0, .. dN-1]` " 1909,"def _object2proto(self) -> SyftOblvClient_PB: return SyftOblvClient_PB( token=self.token, oblivious_user_id=self.oblivious_user_id, cookies=self.cookies, headers=self.headers, timeout=self.timeout, verify_ssl=self.verify_ssl, ) ","Returns a protobuf serialization of self. As a requirement of all objects which inherit from Serializable, this method transforms the current object into the corresponding Protobuf object so that it can be further serialized. :return: returns a protobuf object :rtype: SyftOblvClient_PB .. note:: This method is purely an internal method. Please use serialize(object) or one of the other public serialization methods if you wish to serialize an object. ",68,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _object2proto(self) -> SyftOblvClient_PB: return SyftOblvClient_PB( token=self.token, oblivious_user_id=self.oblivious_user_id, cookies=self.cookies, headers=self.headers, timeout=self.timeout, verify_ssl=self.verify_ssl, ) ``` ###Assistant : Returns a protobuf serialization of self. As a requirement of all objects which inherit from Serializable, this method transforms the current object into the corresponding Protobuf object so that it can be further serialized. :return: returns a protobuf object :rtype: SyftOblvClient_PB .. note:: This method is purely an internal method. Please use serialize(object) or one of the other public serialization methods if you wish to serialize an object. " 1910,"def test_bulk_handle_digest_email_skips_deactivated_users(self) -> None: realm = get_realm(""zulip"") hamlet = self.example_user(""hamlet"") user_ids = list( UserProfile.objects.filter(is_bot=False, realm=realm).values_list(""id"", flat=True) ) do_deactivate_user(hamlet, acting_user=None) with mock.patch(""zerver.lib.digest.enough_traffic"", return_value=True), mock.patch( ""zerver.lib.digest.send_future_email"" ) as mock_send_email: bulk_handle_digest_email(user_ids, 1) emailed_user_ids = [ call_args[1][""to_user_ids""][0] for call_args in mock_send_email.call_args_list ] self.assertEqual( set(emailed_user_ids), set(user_id for user_id in user_ids if user_id != hamlet.id) ) "," A user id may be added to the queue before the user is deactivated. In such a case, the function responsible for sending the email should correctly skip them. ",29,50,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_bulk_handle_digest_email_skips_deactivated_users(self) -> None: realm = get_realm(""zulip"") hamlet = self.example_user(""hamlet"") user_ids = list( UserProfile.objects.filter(is_bot=False, realm=realm).values_list(""id"", flat=True) ) do_deactivate_user(hamlet, acting_user=None) with mock.patch(""zerver.lib.digest.enough_traffic"", return_value=True), mock.patch( ""zerver.lib.digest.send_future_email"" ) as mock_send_email: bulk_handle_digest_email(user_ids, 1) emailed_user_ids = [ call_args[1][""to_user_ids""][0] for call_args in mock_send_email.call_args_list ] self.assertEqual( set(emailed_user_ids), set(user_id for user_id in user_ids if user_id != hamlet.id) ) ``` ###Assistant : A user id may be added to the queue before the user is deactivated. In such a case, the function responsible for sending the email should correctly skip them. " 1911,"def test_driver_4(): args_list = [ 'tests/tests.csv', '-is', ',', '-target', 'class', '-g', '1', '-p', '2', '-cv', '3', '-s', '42', '-config', 'TPOT light', '-v', '3' ] args = _get_arg_parser().parse_args(args_list) with captured_output() as (out, err): tpot_driver(args) ret_stdout = out.getvalue() assert ""TPOT settings"" in ret_stdout assert ""Final Pareto front testing scores"" in ret_stdout try: ret_val = float(ret_stdout.split('\n')[-2].split('\t')[1]) except Exception: ret_val = -float('inf') assert ret_val > 0.0 ",Assert that the tpot_driver() in TPOT driver outputs normal result with verbosity = 3.,14,62,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_driver_4(): args_list = [ 'tests/tests.csv', '-is', ',', '-target', 'class', '-g', '1', '-p', '2', '-cv', '3', '-s', '42', '-config', 'TPOT light', '-v', '3' ] args = _get_arg_parser().parse_args(args_list) with captured_output() as (out, err): tpot_driver(args) ret_stdout = out.getvalue() assert ""TPOT settings"" in ret_stdout assert ""Final Pareto front testing scores"" in ret_stdout try: ret_val = float(ret_stdout.split('\n')[-2].split('\t')[1]) except Exception: ret_val = -float('inf') assert ret_val > 0.0 ``` ###Assistant : Assert that the tpot_driver() in TPOT driver outputs normal result with verbosity = 3. " 1912,"async def test_set_avatar(self) -> None: handler = self.hs.get_sso_handler() # Create a new user to set avatar for reg_handler = self.hs.get_registration_handler() user_id = self.get_success(reg_handler.register_user(approved=True)) self.assertTrue( self.get_success(handler.set_avatar(user_id, ""http://my.server/me.png"")) ) # Ensure avatar is set on this newly created user, # so no need to compare for the exact image profile_handler = self.hs.get_profile_handler() profile = self.get_success(profile_handler.get_profile(user_id)) self.assertIsNot(profile[""avatar_url""], None) ",Tests successfully setting the avatar of a newly created user,10,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_set_avatar(self) -> None: handler = self.hs.get_sso_handler() # Create a new user to set avatar for reg_handler = self.hs.get_registration_handler() user_id = self.get_success(reg_handler.register_user(approved=True)) self.assertTrue( self.get_success(handler.set_avatar(user_id, ""http://my.server/me.png"")) ) # Ensure avatar is set on this newly created user, # so no need to compare for the exact image profile_handler = self.hs.get_profile_handler() profile = self.get_success(profile_handler.get_profile(user_id)) self.assertIsNot(profile[""avatar_url""], None) ``` ###Assistant : Tests successfully setting the avatar of a newly created user " 1913,"def tune_decorated(api_key_file): tuner = tune.Tuner( decorated_train_function, tune_config=tune.TuneConfig( metric=""loss"", mode=""min"", ), param_space={ ""mean"": tune.grid_search([1, 2, 3, 4, 5]), ""sd"": tune.uniform(0.2, 0.8), ""wandb"": {""api_key_file"": api_key_file, ""project"": ""Wandb_example""}, }, ) tuner.fit() ",Example for using the @wandb_mixin decorator with the function API,10,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def tune_decorated(api_key_file): tuner = tune.Tuner( decorated_train_function, tune_config=tune.TuneConfig( metric=""loss"", mode=""min"", ), param_space={ ""mean"": tune.grid_search([1, 2, 3, 4, 5]), ""sd"": tune.uniform(0.2, 0.8), ""wandb"": {""api_key_file"": api_key_file, ""project"": ""Wandb_example""}, }, ) tuner.fit() ``` ###Assistant : Example for using the @wandb_mixin decorator with the function API " 1914,"def format_usage(self, usage): # type: (str) -> str msg = ""\nUsage: {}\n"".format(self.indent_lines(textwrap.dedent(usage), "" "")) return msg "," Ensure there is only one newline between usage and the first heading if there is no description. ",17,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def format_usage(self, usage): # type: (str) -> str msg = ""\nUsage: {}\n"".format(self.indent_lines(textwrap.dedent(usage), "" "")) return msg ``` ###Assistant : Ensure there is only one newline between usage and the first heading if there is no description. " 1915,"def from_index_summation(expr, first_index=None, last_index=None, dimensions=None): r from sympy.tensor.array.expressions.from_indexed_to_array import convert_indexed_to_array from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix first_indices = [] if first_index is not None: first_indices.append(first_index) if last_index is not None: first_indices.append(last_index) arr = convert_indexed_to_array(expr, first_indices=first_indices) return convert_array_to_matrix(arr) "," Parse expression of matrices with explicitly summed indices into a matrix expression without indices, if possible. This transformation expressed in mathematical notation: `\sum_{j=0}^{N-1} A_{i,j} B_{j,k} \Longrightarrow \mathbf{A}\cdot \mathbf{B}` Optional parameter ``first_index``: specify which free index to use as the index starting the expression. Examples ======== >>> from sympy import MatrixSymbol, MatrixExpr, Sum >>> from sympy.abc import i, j, k, l, N >>> A = MatrixSymbol(""A"", N, N) >>> B = MatrixSymbol(""B"", N, N) >>> expr = Sum(A[i, j]*B[j, k], (j, 0, N-1)) >>> MatrixExpr.from_index_summation(expr) A*B Transposition is detected: >>> expr = Sum(A[j, i]*B[j, k], (j, 0, N-1)) >>> MatrixExpr.from_index_summation(expr) A.T*B Detect the trace: >>> expr = Sum(A[i, i], (i, 0, N-1)) >>> MatrixExpr.from_index_summation(expr) Trace(A) More complicated expressions: >>> expr = Sum(A[i, j]*B[k, j]*A[l, k], (j, 0, N-1), (k, 0, N-1)) >>> MatrixExpr.from_index_summation(expr) A*B.T*A.T ",133,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def from_index_summation(expr, first_index=None, last_index=None, dimensions=None): r from sympy.tensor.array.expressions.from_indexed_to_array import convert_indexed_to_array from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix first_indices = [] if first_index is not None: first_indices.append(first_index) if last_index is not None: first_indices.append(last_index) arr = convert_indexed_to_array(expr, first_indices=first_indices) return convert_array_to_matrix(arr) ``` ###Assistant : Parse expression of matrices with explicitly summed indices into a matrix expression without indices, if possible. This transformation expressed in mathematical notation: `\sum_{j=0}^{N-1} A_{i,j} B_{j,k} \Longrightarrow \mathbf{A}\cdot \mathbf{B}` Optional parameter ``first_index``: specify which free index to use as the index starting the expression. Examples ======== >>> from sympy import MatrixSymbol, MatrixExpr, Sum >>> from sympy.abc import i, j, k, l, N >>> A = MatrixSymbol(""A"", N, N) >>> B = MatrixSymbol(""B"", N, N) >>> expr = Sum(A[i, j]*B[j, k], (j, 0, N-1)) >>> MatrixExpr.from_index_summation(expr) A*B Transposition is detected: >>> expr = Sum(A[j, i]*B[j, k], (j, 0, N-1)) >>> MatrixExpr.from_index_summation(expr) A.T*B Detect the trace: >>> expr = Sum(A[i, i], (i, 0, N-1)) >>> MatrixExpr.from_index_summation(expr) Trace(A) More complicated expressions: >>> expr = Sum(A[i, j]*B[k, j]*A[l, k], (j, 0, N-1), (k, 0, N-1)) >>> MatrixExpr.from_index_summation(expr) A*B.T*A.T " 1916,"def get_views(self): q = f""SHOW FULL TABLES IN {self.database} WHERE TABLE_TYPE LIKE 'VIEW';"" result = self.native_query(q) return result "," Get more information about specific database views ",7,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_views(self): q = f""SHOW FULL TABLES IN {self.database} WHERE TABLE_TYPE LIKE 'VIEW';"" result = self.native_query(q) return result ``` ###Assistant : Get more information about specific database views " 1917,"def _mac(model, obs, h): B, n_agents = obs.size(0), obs.size(1) if not isinstance(obs, dict): obs = {""obs"": obs} obs_agents_as_batches = {k: _drop_agent_dim(v) for k, v in obs.items()} h_flat = [s.reshape([B * n_agents, -1]) for s in h] q_flat, h_flat = model(obs_agents_as_batches, h_flat, None) return q_flat.reshape([B, n_agents, -1]), [ s.reshape([B, n_agents, -1]) for s in h_flat ] ","Forward pass of the multi-agent controller. Args: model: TorchModelV2 class obs: Tensor of shape [B, n_agents, obs_size] h: List of tensors of shape [B, n_agents, h_size] Returns: q_vals: Tensor of shape [B, n_agents, n_actions] h: Tensor of shape [B, n_agents, h_size] ",41,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _mac(model, obs, h): B, n_agents = obs.size(0), obs.size(1) if not isinstance(obs, dict): obs = {""obs"": obs} obs_agents_as_batches = {k: _drop_agent_dim(v) for k, v in obs.items()} h_flat = [s.reshape([B * n_agents, -1]) for s in h] q_flat, h_flat = model(obs_agents_as_batches, h_flat, None) return q_flat.reshape([B, n_agents, -1]), [ s.reshape([B, n_agents, -1]) for s in h_flat ] ``` ###Assistant : Forward pass of the multi-agent controller. Args: model: TorchModelV2 class obs: Tensor of shape [B, n_agents, obs_size] h: List of tensors of shape [B, n_agents, h_size] Returns: q_vals: Tensor of shape [B, n_agents, n_actions] h: Tensor of shape [B, n_agents, h_size] " 1918,"def _get_count(self): has_meta = all(val is not None for val in self._alignments.video_meta_data.values()) retval = len(self._alignments.video_meta_data[""pts_time""]) if has_meta else None logger.debug(""Frame count from alignments file: (has_meta: %s, %s"", has_meta, retval) return retval "," If the alignments file has been run through the manual tool, then it will hold video meta information, meaning that the count of frames in the alignment file can be relied on to be accurate. Returns ------- int or ``None`` For video input which contain video meta-data in the alignments file then the count of frames is returned. In all other cases ``None`` is returned ",65,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_count(self): has_meta = all(val is not None for val in self._alignments.video_meta_data.values()) retval = len(self._alignments.video_meta_data[""pts_time""]) if has_meta else None logger.debug(""Frame count from alignments file: (has_meta: %s, %s"", has_meta, retval) return retval ``` ###Assistant : If the alignments file has been run through the manual tool, then it will hold video meta information, meaning that the count of frames in the alignment file can be relied on to be accurate. Returns ------- int or ``None`` For video input which contain video meta-data in the alignments file then the count of frames is returned. In all other cases ``None`` is returned " 1919,"def test_sparse1_with_non_sparse_components(): fit_then_transform( sparse1_paratial_1h.todense(), sparse1, categorical_features=[True, False] ) ",Test fit_transform a sparse matrix with specifying categorical_features.,8,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_sparse1_with_non_sparse_components(): fit_then_transform( sparse1_paratial_1h.todense(), sparse1, categorical_features=[True, False] ) ``` ###Assistant : Test fit_transform a sparse matrix with specifying categorical_features. " 1920,"def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch): MNAME = ""hi"" foutl = get_io_paths(hello_world_f90, mname=MNAME) ipath = foutl.f90inp monkeypatch.setattr(sys, ""argv"", f'f2py {ipath} -m {MNAME}'.split()) with util.switchdir(ipath.parent): f2pycli() # Always generate C module assert Path.exists(foutl.cmodf) # File contains a function, check for F77 wrappers assert Path.exists(foutl.wrap77) ","Checks the generation of files based on a module name CLI :: -m ",13,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch): MNAME = ""hi"" foutl = get_io_paths(hello_world_f90, mname=MNAME) ipath = foutl.f90inp monkeypatch.setattr(sys, ""argv"", f'f2py {ipath} -m {MNAME}'.split()) with util.switchdir(ipath.parent): f2pycli() # Always generate C module assert Path.exists(foutl.cmodf) # File contains a function, check for F77 wrappers assert Path.exists(foutl.wrap77) ``` ###Assistant : Checks the generation of files based on a module name CLI :: -m " 1921,"def delegate(args, host_state, exclude, require): # type: (CommonConfig, HostState, t.List[str], t.List[str]) -> None assert isinstance(args, EnvironmentConfig) with delegation_context(args, host_state): if isinstance(args, TestConfig): args.metadata.ci_provider = get_ci_provider().code make_dirs(ResultType.TMP.path) with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=ResultType.TMP.path) as metadata_fd: args.metadata_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(metadata_fd.name)) args.metadata.to_file(args.metadata_path) try: delegate_command(args, host_state, exclude, require) finally: args.metadata_path = None else: delegate_command(args, host_state, exclude, require) ",Delegate execution of ansible-test to another environment.,7,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def delegate(args, host_state, exclude, require): # type: (CommonConfig, HostState, t.List[str], t.List[str]) -> None assert isinstance(args, EnvironmentConfig) with delegation_context(args, host_state): if isinstance(args, TestConfig): args.metadata.ci_provider = get_ci_provider().code make_dirs(ResultType.TMP.path) with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=ResultType.TMP.path) as metadata_fd: args.metadata_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(metadata_fd.name)) args.metadata.to_file(args.metadata_path) try: delegate_command(args, host_state, exclude, require) finally: args.metadata_path = None else: delegate_command(args, host_state, exclude, require) ``` ###Assistant : Delegate execution of ansible-test to another environment. " 1922,"def require_torch_non_multi_gpu(test_case): if not is_torch_available(): return unittest.skip(""test requires PyTorch"")(test_case) import torch return unittest.skipUnless(torch.cuda.device_count() < 2, ""test requires 0 or 1 GPU"")(test_case) "," Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch). ",13,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def require_torch_non_multi_gpu(test_case): if not is_torch_available(): return unittest.skip(""test requires PyTorch"")(test_case) import torch return unittest.skipUnless(torch.cuda.device_count() < 2, ""test requires 0 or 1 GPU"")(test_case) ``` ###Assistant : Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch). " 1923,"def _ReturnKeyHandler(self, event): # if the element is disabled, ignore the event if self.Disabled: return MyForm = self.ParentForm button_element = self._FindReturnKeyBoundButton(MyForm) if button_element is not None: button_element.ButtonCallBack() "," Internal callback for the ENTER / RETURN key. Results in calling the ButtonCallBack for element that has the return key bound to it, just as if button was clicked. :param event: :type event: ",33,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _ReturnKeyHandler(self, event): # if the element is disabled, ignore the event if self.Disabled: return MyForm = self.ParentForm button_element = self._FindReturnKeyBoundButton(MyForm) if button_element is not None: button_element.ButtonCallBack() ``` ###Assistant : Internal callback for the ENTER / RETURN key. Results in calling the ButtonCallBack for element that has the return key bound to it, just as if button was clicked. :param event: :type event: " 1924,"def uint64_frame(): return DataFrame( {""A"": np.arange(3), ""B"": [2**63, 2**63 + 5, 2**63 + 10]}, dtype=np.uint64 ) @pytest.fixture"," Fixture for DataFrame with uint64 values Columns are ['A', 'B'] ",10,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def uint64_frame(): return DataFrame( {""A"": np.arange(3), ""B"": [2**63, 2**63 + 5, 2**63 + 10]}, dtype=np.uint64 ) @pytest.fixture ``` ###Assistant : Fixture for DataFrame with uint64 values Columns are ['A', 'B'] " 1925,"def test_null_annotation(self): book = Book.objects.annotate( no_value=Value(None, output_field=IntegerField()) ).first() self.assertIsNone(book.no_value) "," Annotating None onto a model round-trips ",6,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_null_annotation(self): book = Book.objects.annotate( no_value=Value(None, output_field=IntegerField()) ).first() self.assertIsNone(book.no_value) ``` ###Assistant : Annotating None onto a model round-trips " 1926,"def icosahedral_graph(create_using=None): description = [ ""adjacencylist"", ""Platonic Icosahedral Graph"", 12, [ [2, 6, 8, 9, 12], [3, 6, 7, 9], [4, 7, 9, 10], [5, 7, 10, 11], [6, 7, 11, 12], [7, 12], [], [9, 10, 11, 12], [10], [11], [12], [], ], ] G = make_small_undirected_graph(description, create_using) return G "," Returns the Platonic Icosahedral graph. The icosahedral graph has 12 nodes and 30 edges. It is a Platonic graph whose nodes have the connectivity of the icosahedron. It is undirected, regular and Hamiltonian [1]_. Parameters ---------- create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Returns ------- G : networkx Graph Icosahedral graph with 12 nodes and 30 edges. References ---------- .. [1] https://mathworld.wolfram.com/IcosahedralGraph.html ",73,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def icosahedral_graph(create_using=None): description = [ ""adjacencylist"", ""Platonic Icosahedral Graph"", 12, [ [2, 6, 8, 9, 12], [3, 6, 7, 9], [4, 7, 9, 10], [5, 7, 10, 11], [6, 7, 11, 12], [7, 12], [], [9, 10, 11, 12], [10], [11], [12], [], ], ] G = make_small_undirected_graph(description, create_using) return G ``` ###Assistant : Returns the Platonic Icosahedral graph. The icosahedral graph has 12 nodes and 30 edges. It is a Platonic graph whose nodes have the connectivity of the icosahedron. It is undirected, regular and Hamiltonian [1]_. Parameters ---------- create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Returns ------- G : networkx Graph Icosahedral graph with 12 nodes and 30 edges. References ---------- .. [1] https://mathworld.wolfram.com/IcosahedralGraph.html " 1927,"def evaluate(model, criterion, metric, data_loader): model.eval() metric.reset() losses = [] for batch in tqdm(data_loader): input_ids, token_type_ids, position_ids, masks, ent_label, spo_label = batch max_batch_len = input_ids.shape[-1] ent_mask = paddle.unsqueeze(masks, axis=2) spo_mask = paddle.matmul(ent_mask, ent_mask, transpose_y=True) spo_mask = paddle.unsqueeze(spo_mask, axis=1) logits = model(input_ids, token_type_ids, position_ids) ent_loss = criterion( logits[0], ent_label[0], weight=ent_mask, reduction='sum') spo_loss = criterion( logits[1], spo_label[0], weight=spo_mask, reduction='sum') loss = ent_loss + spo_loss losses.append(loss.numpy()) lengths = paddle.sum(masks, axis=-1) correct = metric.compute(lengths, logits[0], logits[1], ent_label[1], spo_label[1]) metric.update(correct) results = metric.accumulate() print('eval loss: %.5f, entity f1: %.5f, spo f1: %.5f' % (np.mean(losses), results['entity'][2], results['spo'][2])) model.train() metric.reset() "," Given a dataset, it evals model and compute the metric. Args: model(obj:`paddle.nn.Layer`): A model to classify texts. dataloader(obj:`paddle.io.DataLoader`): The dataset loader which generates batches. criterion(`paddle.nn.functional`): It can compute the loss. metric(obj:`paddle.metric.Metric`): The evaluation metric. ",34,93,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def evaluate(model, criterion, metric, data_loader): model.eval() metric.reset() losses = [] for batch in tqdm(data_loader): input_ids, token_type_ids, position_ids, masks, ent_label, spo_label = batch max_batch_len = input_ids.shape[-1] ent_mask = paddle.unsqueeze(masks, axis=2) spo_mask = paddle.matmul(ent_mask, ent_mask, transpose_y=True) spo_mask = paddle.unsqueeze(spo_mask, axis=1) logits = model(input_ids, token_type_ids, position_ids) ent_loss = criterion( logits[0], ent_label[0], weight=ent_mask, reduction='sum') spo_loss = criterion( logits[1], spo_label[0], weight=spo_mask, reduction='sum') loss = ent_loss + spo_loss losses.append(loss.numpy()) lengths = paddle.sum(masks, axis=-1) correct = metric.compute(lengths, logits[0], logits[1], ent_label[1], spo_label[1]) metric.update(correct) results = metric.accumulate() print('eval loss: %.5f, entity f1: %.5f, spo f1: %.5f' % (np.mean(losses), results['entity'][2], results['spo'][2])) model.train() metric.reset() ``` ###Assistant : Given a dataset, it evals model and compute the metric. Args: model(obj:`paddle.nn.Layer`): A model to classify texts. dataloader(obj:`paddle.io.DataLoader`): The dataset loader which generates batches. criterion(`paddle.nn.functional`): It can compute the loss. metric(obj:`paddle.metric.Metric`): The evaluation metric. " 1928,"def get_fields(self, include_parents=True, include_hidden=False): if include_parents is False: include_parents = PROXY_PARENTS return self._get_fields( include_parents=include_parents, include_hidden=include_hidden ) "," Return a list of fields associated to the model. By default, include forward and reverse fields, fields derived from inheritance, but not hidden fields. The returned fields can be changed using the parameters: - include_parents: include fields derived from inheritance - include_hidden: include fields that have a related_name that starts with a ""+"" ",53,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_fields(self, include_parents=True, include_hidden=False): if include_parents is False: include_parents = PROXY_PARENTS return self._get_fields( include_parents=include_parents, include_hidden=include_hidden ) ``` ###Assistant : Return a list of fields associated to the model. By default, include forward and reverse fields, fields derived from inheritance, but not hidden fields. The returned fields can be changed using the parameters: - include_parents: include fields derived from inheritance - include_hidden: include fields that have a related_name that starts with a ""+"" " 1929,"def _key_to_file(self, session_key=None): if session_key is None: session_key = self._get_or_create_session_key() # Make sure we're not vulnerable to directory traversal. Session keys # should always be md5s, so they should never contain directory # components. if not set(session_key).issubset(VALID_KEY_CHARS): raise InvalidSessionKey(""Invalid characters in session key"") return os.path.join(self.storage_path, self.file_prefix + session_key) "," Get the file associated with this session key. ",8,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _key_to_file(self, session_key=None): if session_key is None: session_key = self._get_or_create_session_key() # Make sure we're not vulnerable to directory traversal. Session keys # should always be md5s, so they should never contain directory # components. if not set(session_key).issubset(VALID_KEY_CHARS): raise InvalidSessionKey(""Invalid characters in session key"") return os.path.join(self.storage_path, self.file_prefix + session_key) ``` ###Assistant : Get the file associated with this session key. " 1930,"def check_against_chunks(self, chunks): # type: (Iterator[bytes]) -> None gots = {} for hash_name in self._allowed.keys(): try: gots[hash_name] = hashlib.new(hash_name) except (ValueError, TypeError): raise InstallationError(f""Unknown hash name: {hash_name}"") for chunk in chunks: for hash in gots.values(): hash.update(chunk) for hash_name, got in gots.items(): if got.hexdigest() in self._allowed[hash_name]: return self._raise(gots) ","Check good hashes against ones built from iterable of chunks of data. Raise HashMismatch if none match. ",17,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_against_chunks(self, chunks): # type: (Iterator[bytes]) -> None gots = {} for hash_name in self._allowed.keys(): try: gots[hash_name] = hashlib.new(hash_name) except (ValueError, TypeError): raise InstallationError(f""Unknown hash name: {hash_name}"") for chunk in chunks: for hash in gots.values(): hash.update(chunk) for hash_name, got in gots.items(): if got.hexdigest() in self._allowed[hash_name]: return self._raise(gots) ``` ###Assistant : Check good hashes against ones built from iterable of chunks of data. Raise HashMismatch if none match. " 1931,"def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index): bin_labels = labels.new_full((labels.size(0), label_channels), 0) valid_mask = (labels >= 0) & (labels != ignore_index) inds = torch.nonzero( valid_mask & (labels < label_channels), as_tuple=False) if inds.numel() > 0: bin_labels[inds, labels[inds]] = 1 valid_mask = valid_mask.view(-1, 1).expand(labels.size(0), label_channels).float() if label_weights is None: bin_label_weights = valid_mask else: bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels) bin_label_weights *= valid_mask return bin_labels, bin_label_weights, valid_mask ",Expand onehot labels to match the size of prediction.,9,61,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index): bin_labels = labels.new_full((labels.size(0), label_channels), 0) valid_mask = (labels >= 0) & (labels != ignore_index) inds = torch.nonzero( valid_mask & (labels < label_channels), as_tuple=False) if inds.numel() > 0: bin_labels[inds, labels[inds]] = 1 valid_mask = valid_mask.view(-1, 1).expand(labels.size(0), label_channels).float() if label_weights is None: bin_label_weights = valid_mask else: bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels) bin_label_weights *= valid_mask return bin_labels, bin_label_weights, valid_mask ``` ###Assistant : Expand onehot labels to match the size of prediction. " 1932,"def test_sequence_input_types(self, input_type): if not tf.executing_eagerly(): self.skipTest(""Improved checking is only present in data_adapter."") xy_function, x_function = self._make_sequence_input_functions( input_type ) fit_kwargs, evaluate_kwargs, predict_kwargs = {}, {}, {} if input_type == ""generator"": fit_kwargs[""steps_per_epoch""] = 4 evaluate_kwargs[""steps""] = 4 predict_kwargs[""steps""] = 4 model = test_utils.get_small_mlp(1, 1, 1) model.compile( loss=""mse"", optimizer=""sgd"", run_eagerly=test_utils.should_run_eagerly(), ) model.fit(xy_function(use_namedtuple=False), **fit_kwargs) model.evaluate(xy_function(use_namedtuple=False), **evaluate_kwargs) model.predict(x_function(use_namedtuple=False), **predict_kwargs) ",Ensure that namedtuples and tuples are plumbed identically.,8,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_sequence_input_types(self, input_type): if not tf.executing_eagerly(): self.skipTest(""Improved checking is only present in data_adapter."") xy_function, x_function = self._make_sequence_input_functions( input_type ) fit_kwargs, evaluate_kwargs, predict_kwargs = {}, {}, {} if input_type == ""generator"": fit_kwargs[""steps_per_epoch""] = 4 evaluate_kwargs[""steps""] = 4 predict_kwargs[""steps""] = 4 model = test_utils.get_small_mlp(1, 1, 1) model.compile( loss=""mse"", optimizer=""sgd"", run_eagerly=test_utils.should_run_eagerly(), ) model.fit(xy_function(use_namedtuple=False), **fit_kwargs) model.evaluate(xy_function(use_namedtuple=False), **evaluate_kwargs) model.predict(x_function(use_namedtuple=False), **predict_kwargs) ``` ###Assistant : Ensure that namedtuples and tuples are plumbed identically. " 1933,"def get_stock_value_on(warehouse=None, posting_date=None, item_code=None): if not posting_date: posting_date = nowdate() values, condition = [posting_date], """" if warehouse: lft, rgt, is_group = frappe.db.get_value(""Warehouse"", warehouse, [""lft"", ""rgt"", ""is_group""]) if is_group: values.extend([lft, rgt]) condition += ""and exists (\ select name from `tabWarehouse` wh where wh.name = sle.warehouse\ and wh.lft >= %s and wh.rgt <= %s)"" else: values.append(warehouse) condition += "" AND warehouse = %s"" if item_code: values.append(item_code) condition += "" AND item_code = %s"" stock_ledger_entries = frappe.db.sql( .format( condition ), values, as_dict=1, ) sle_map = {} for sle in stock_ledger_entries: if not (sle.item_code, sle.warehouse) in sle_map: sle_map[(sle.item_code, sle.warehouse)] = flt(sle.stock_value) return sum(sle_map.values()) @frappe.whitelist()"," SELECT item_code, stock_value, name, warehouse FROM `tabStock Ledger Entry` sle WHERE posting_date <= %s {0} and is_cancelled = 0 ORDER BY timestamp(posting_date, posting_time) DESC, creation DESC ",26,100,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_stock_value_on(warehouse=None, posting_date=None, item_code=None): if not posting_date: posting_date = nowdate() values, condition = [posting_date], """" if warehouse: lft, rgt, is_group = frappe.db.get_value(""Warehouse"", warehouse, [""lft"", ""rgt"", ""is_group""]) if is_group: values.extend([lft, rgt]) condition += ""and exists (\ select name from `tabWarehouse` wh where wh.name = sle.warehouse\ and wh.lft >= %s and wh.rgt <= %s)"" else: values.append(warehouse) condition += "" AND warehouse = %s"" if item_code: values.append(item_code) condition += "" AND item_code = %s"" stock_ledger_entries = frappe.db.sql( .format( condition ), values, as_dict=1, ) sle_map = {} for sle in stock_ledger_entries: if not (sle.item_code, sle.warehouse) in sle_map: sle_map[(sle.item_code, sle.warehouse)] = flt(sle.stock_value) return sum(sle_map.values()) @frappe.whitelist() ``` ###Assistant : SELECT item_code, stock_value, name, warehouse FROM `tabStock Ledger Entry` sle WHERE posting_date <= %s {0} and is_cancelled = 0 ORDER BY timestamp(posting_date, posting_time) DESC, creation DESC " 1934,"def _is_installed_rpm(name): log.debug(f""_is_installed_rpm '{name}'"") cmd = [""/usr/bin/rpm"", ""-q"", name] return __salt__[""cmd.retcode""](cmd) == 0 "," Returns True if the rpm package is installed. Otherwise returns False. ",11,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _is_installed_rpm(name): log.debug(f""_is_installed_rpm '{name}'"") cmd = [""/usr/bin/rpm"", ""-q"", name] return __salt__[""cmd.retcode""](cmd) == 0 ``` ###Assistant : Returns True if the rpm package is installed. Otherwise returns False. " 1935,"def iter_tree_files(root, on_error=None, follow_links=None): if on_error is not None and not callable(on_error): raise TypeError(""on_error:{!r} is not callable."".format(on_error)) if follow_links is None: follow_links = True for entry in _iter_tree_entries_next(os.path.abspath(root), '', {}, on_error, follow_links): if not entry.is_dir(follow_links): yield entry.path # Alias `iter_tree_files()` as `iter_tree()`. iter_tree = iter_tree_files "," Walks the specified directory for all files. *root* (:class:`str`) is the root directory to search for files. *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. It will be called with the exception (:exc:`OSError`). Reraise the exception to abort the walk. Default is :data:`None` to ignore file-system exceptions. *follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk symbolic links that resolve to directories. Default is :data:`None` for :data:`True`. Raises :exc:`RecursionError` if recursion is detected. Returns an :class:`~collections.abc.Iterable` yielding the path to each file (:class:`str`) relative to *root*. ",90,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def iter_tree_files(root, on_error=None, follow_links=None): if on_error is not None and not callable(on_error): raise TypeError(""on_error:{!r} is not callable."".format(on_error)) if follow_links is None: follow_links = True for entry in _iter_tree_entries_next(os.path.abspath(root), '', {}, on_error, follow_links): if not entry.is_dir(follow_links): yield entry.path # Alias `iter_tree_files()` as `iter_tree()`. iter_tree = iter_tree_files ``` ###Assistant : Walks the specified directory for all files. *root* (:class:`str`) is the root directory to search for files. *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. It will be called with the exception (:exc:`OSError`). Reraise the exception to abort the walk. Default is :data:`None` to ignore file-system exceptions. *follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk symbolic links that resolve to directories. Default is :data:`None` for :data:`True`. Raises :exc:`RecursionError` if recursion is detected. Returns an :class:`~collections.abc.Iterable` yielding the path to each file (:class:`str`) relative to *root*. " 1936,"def _get_url_from_path(path, name): # type: (str, str) -> Optional[str] if _looks_like_path(name) and os.path.isdir(path): if is_installable_dir(path): return path_to_url(path) raise InstallationError( f""Directory {name!r} is not installable. Neither 'setup.py' "" ""nor 'pyproject.toml' found."" ) if not is_archive_file(path): return None if os.path.isfile(path): return path_to_url(path) urlreq_parts = name.split('@', 1) if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]): # If the path contains '@' and the part before it does not look # like a path, try to treat it as a PEP 440 URL req instead. return None logger.warning( 'Requirement %r looks like a filename, but the ' 'file does not exist', name ) return path_to_url(path) "," First, it checks whether a provided path is an installable directory (e.g. it has a setup.py). If it is, returns the path. If false, check if the path is an archive file (such as a .whl). The function checks if the path is a file. If false, if the path has an @, it will treat it as a PEP 440 URL requirement and return the path. ",67,100,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_url_from_path(path, name): # type: (str, str) -> Optional[str] if _looks_like_path(name) and os.path.isdir(path): if is_installable_dir(path): return path_to_url(path) raise InstallationError( f""Directory {name!r} is not installable. Neither 'setup.py' "" ""nor 'pyproject.toml' found."" ) if not is_archive_file(path): return None if os.path.isfile(path): return path_to_url(path) urlreq_parts = name.split('@', 1) if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]): # If the path contains '@' and the part before it does not look # like a path, try to treat it as a PEP 440 URL req instead. return None logger.warning( 'Requirement %r looks like a filename, but the ' 'file does not exist', name ) return path_to_url(path) ``` ###Assistant : First, it checks whether a provided path is an installable directory (e.g. it has a setup.py). If it is, returns the path. If false, check if the path is an archive file (such as a .whl). The function checks if the path is a file. If false, if the path has an @, it will treat it as a PEP 440 URL requirement and return the path. " 1937,"def test_install_fileset_with_bff_extension(): installp_call = MagicMock(return_value={""retcode"": 0, ""stdout"": """"}) fileset_pkg_name = ( ""/cecc/repos/aix72/TL3/BASE/installp/ppc/bos.rte.printers_7.2.2.0.bff"" ) list_pkgs_mock = MagicMock( side_effect=[{""bos.rte.printers"": ""7.1.6.0""}, {""bos.rte.printers"": ""7.2.4.0""}] ) with patch(""pathlib.Path.is_file"", return_value=True): with patch.dict( aixpkg.__salt__, {""cmd.run_all"": installp_call, ""config.get"": MagicMock(return_value=False)}, ), patch.object(aixpkg, ""list_pkgs"", list_pkgs_mock): result = aixpkg.install(fileset_pkg_name) assert installp_call.call_count == 1 installp_call.assert_any_call( ""/usr/sbin/installp -acYXg -d /cecc/repos/aix72/TL3/BASE/installp/ppc bos.rte.printers_7.2.2.0.bff"", python_shell=False, ) expected = {""bos.rte.printers"": {""old"": ""7.1.6.0"", ""new"": ""7.2.4.0""}} assert result == expected "," Test install of fileset with bff extension ",7,61,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_install_fileset_with_bff_extension(): installp_call = MagicMock(return_value={""retcode"": 0, ""stdout"": """"}) fileset_pkg_name = ( ""/cecc/repos/aix72/TL3/BASE/installp/ppc/bos.rte.printers_7.2.2.0.bff"" ) list_pkgs_mock = MagicMock( side_effect=[{""bos.rte.printers"": ""7.1.6.0""}, {""bos.rte.printers"": ""7.2.4.0""}] ) with patch(""pathlib.Path.is_file"", return_value=True): with patch.dict( aixpkg.__salt__, {""cmd.run_all"": installp_call, ""config.get"": MagicMock(return_value=False)}, ), patch.object(aixpkg, ""list_pkgs"", list_pkgs_mock): result = aixpkg.install(fileset_pkg_name) assert installp_call.call_count == 1 installp_call.assert_any_call( ""/usr/sbin/installp -acYXg -d /cecc/repos/aix72/TL3/BASE/installp/ppc bos.rte.printers_7.2.2.0.bff"", python_shell=False, ) expected = {""bos.rte.printers"": {""old"": ""7.1.6.0"", ""new"": ""7.2.4.0""}} assert result == expected ``` ###Assistant : Test install of fileset with bff extension " 1938,"def is_user_in_allow_list(block_list): return frappe.session.user in frappe.db.sql_list( , block_list, ) ","select allow_user from `tabLeave Block List Allow` where parent=%s",9,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_user_in_allow_list(block_list): return frappe.session.user in frappe.db.sql_list( , block_list, ) ``` ###Assistant : select allow_user from `tabLeave Block List Allow` where parent=%s " 1939,"def test_normalization(push_channel): types = { ""list"": list, } msgs = [ {""list"": tuple([1, 2, 3])}, ] for msg in msgs: ret = push_channel.send(msg, timeout=5, tries=1) for key, value in ret[""load""].items(): assert types[key] == type(value) "," Since we use msgpack, we need to test that list types are converted to lists ",15,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_normalization(push_channel): types = { ""list"": list, } msgs = [ {""list"": tuple([1, 2, 3])}, ] for msg in msgs: ret = push_channel.send(msg, timeout=5, tries=1) for key, value in ret[""load""].items(): assert types[key] == type(value) ``` ###Assistant : Since we use msgpack, we need to test that list types are converted to lists " 1940,"def to_perioddelta(self, freq) -> TimedeltaArray: # Deprecaation GH#34853 warnings.warn( ""to_perioddelta is deprecated and will be removed in a "" ""future version. "" ""Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead."", FutureWarning, # stacklevel chosen to be correct for when called from DatetimeIndex stacklevel=find_stack_level(inspect.currentframe()), ) from pandas.core.arrays.timedeltas import TimedeltaArray if self._ndarray.dtype != ""M8[ns]"": raise NotImplementedError(""Only supported for nanosecond resolution."") i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8 m8delta = i8delta.view(""m8[ns]"") return TimedeltaArray(m8delta) # ----------------------------------------------------------------- # Properties - Vectorized Timestamp Properties/Methods "," Calculate deltas between self values and self converted to Periods at a freq. Used for vectorized offsets. Parameters ---------- freq : Period frequency Returns ------- TimedeltaArray/Index ",26,73,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def to_perioddelta(self, freq) -> TimedeltaArray: # Deprecaation GH#34853 warnings.warn( ""to_perioddelta is deprecated and will be removed in a "" ""future version. "" ""Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead."", FutureWarning, # stacklevel chosen to be correct for when called from DatetimeIndex stacklevel=find_stack_level(inspect.currentframe()), ) from pandas.core.arrays.timedeltas import TimedeltaArray if self._ndarray.dtype != ""M8[ns]"": raise NotImplementedError(""Only supported for nanosecond resolution."") i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8 m8delta = i8delta.view(""m8[ns]"") return TimedeltaArray(m8delta) # ----------------------------------------------------------------- # Properties - Vectorized Timestamp Properties/Methods ``` ###Assistant : Calculate deltas between self values and self converted to Periods at a freq. Used for vectorized offsets. Parameters ---------- freq : Period frequency Returns ------- TimedeltaArray/Index " 1941,"def set(self, components): if len(components) > 0: self.__components = components else: raise Exception(""please give any vector"") "," input: new components changes the components of the vector. replace the components with newer one. ",15,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set(self, components): if len(components) > 0: self.__components = components else: raise Exception(""please give any vector"") ``` ###Assistant : input: new components changes the components of the vector. replace the components with newer one. " 1942,"def get_best_result(self) -> Optional[Tuple[Union[int, str], Module, Dict[str, Dict[str, Tensor]], Optional[float], List[Dict]]]: if self._best_task_id is not None: compact_model = torch.load(Path(self._log_dir_root, 'best_result', 'model.pth')) compact_model_masks = torch.load(Path(self._log_dir_root, 'best_result', 'masks.pth')) with Path(self._log_dir_root, 'best_result', 'config_list.json').open('r') as f: config_list = json_tricks.load(f) return self._best_task_id, compact_model, compact_model_masks, self._best_score, config_list return None "," Returns ------- Optional[Tuple[int, Module, Dict[str, Dict[str, Tensor]], float, List[Dict]]] If self._best_task_id is not None, return best task id, best compact model, masks on the compact model, score, config list used in this task. ",33,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_best_result(self) -> Optional[Tuple[Union[int, str], Module, Dict[str, Dict[str, Tensor]], Optional[float], List[Dict]]]: if self._best_task_id is not None: compact_model = torch.load(Path(self._log_dir_root, 'best_result', 'model.pth')) compact_model_masks = torch.load(Path(self._log_dir_root, 'best_result', 'masks.pth')) with Path(self._log_dir_root, 'best_result', 'config_list.json').open('r') as f: config_list = json_tricks.load(f) return self._best_task_id, compact_model, compact_model_masks, self._best_score, config_list return None ``` ###Assistant : Returns ------- Optional[Tuple[int, Module, Dict[str, Dict[str, Tensor]], float, List[Dict]]] If self._best_task_id is not None, return best task id, best compact model, masks on the compact model, score, config list used in this task. " 1943,"def set_variation_by_axes(self, axes): try: self.font.setvaraxes(axes) except AttributeError as e: msg = ""FreeType 2.9.1 or greater is required"" raise NotImplementedError(msg) from e "," :param axes: A list of values for each axis. :exception OSError: If the font is not a variation font. ",19,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_variation_by_axes(self, axes): try: self.font.setvaraxes(axes) except AttributeError as e: msg = ""FreeType 2.9.1 or greater is required"" raise NotImplementedError(msg) from e ``` ###Assistant : :param axes: A list of values for each axis. :exception OSError: If the font is not a variation font. " 1944,"def dispatch_line(self, frame): if self.stop_here(frame) or self.break_here(frame): self.user_line(frame) if self.quitting: raise BdbQuit return self.trace_dispatch ","Invoke user function and return trace function for line event. If the debugger stops on the current line, invoke self.user_line(). Raise BdbQuit if self.quitting is set. Return self.trace_dispatch to continue tracing in this scope. ",34,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dispatch_line(self, frame): if self.stop_here(frame) or self.break_here(frame): self.user_line(frame) if self.quitting: raise BdbQuit return self.trace_dispatch ``` ###Assistant : Invoke user function and return trace function for line event. If the debugger stops on the current line, invoke self.user_line(). Raise BdbQuit if self.quitting is set. Return self.trace_dispatch to continue tracing in this scope. " 1945,"def clear(self) -> None: self._in_blocks.clear() self._snapshot_blocks = None self._snapshot_stats = None # We're erasing the snapshot, so put all stages into the ""after snapshot"" # bucket. self._stages_after_snapshot = ( self._stages_before_snapshot + self._stages_after_snapshot ) self._stages_before_snapshot = [] ","Clear all cached block references of this plan, including input blocks. This will render the plan un-executable unless the root is a LazyBlockList.",23,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def clear(self) -> None: self._in_blocks.clear() self._snapshot_blocks = None self._snapshot_stats = None # We're erasing the snapshot, so put all stages into the ""after snapshot"" # bucket. self._stages_after_snapshot = ( self._stages_before_snapshot + self._stages_after_snapshot ) self._stages_before_snapshot = [] ``` ###Assistant : Clear all cached block references of this plan, including input blocks. This will render the plan un-executable unless the root is a LazyBlockList. " 1946,"def _global_clustering(self, X=None): clusterer = self.n_clusters centroids = self.subcluster_centers_ compute_labels = (X is not None) and self.compute_labels # Preprocessing for the global clustering. not_enough_centroids = False if isinstance(clusterer, Integral): clusterer = AgglomerativeClustering(n_clusters=self.n_clusters) # There is no need to perform the global clustering step. if len(centroids) < self.n_clusters: not_enough_centroids = True # To use in predict to avoid recalculation. self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True) if clusterer is None or not_enough_centroids: self.subcluster_labels_ = np.arange(len(centroids)) if not_enough_centroids: warnings.warn( ""Number of subclusters found (%d) by BIRCH is less "" ""than (%d). Decrease the threshold."" % (len(centroids), self.n_clusters), ConvergenceWarning, ) else: # The global clustering step that clusters the subclusters of # the leaves. It assumes the centroids of the subclusters as # samples and finds the final centroids. self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_) if compute_labels: self.labels_ = self._predict(X) "," Global clustering for the subclusters obtained after fitting ",8,131,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _global_clustering(self, X=None): clusterer = self.n_clusters centroids = self.subcluster_centers_ compute_labels = (X is not None) and self.compute_labels # Preprocessing for the global clustering. not_enough_centroids = False if isinstance(clusterer, Integral): clusterer = AgglomerativeClustering(n_clusters=self.n_clusters) # There is no need to perform the global clustering step. if len(centroids) < self.n_clusters: not_enough_centroids = True # To use in predict to avoid recalculation. self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True) if clusterer is None or not_enough_centroids: self.subcluster_labels_ = np.arange(len(centroids)) if not_enough_centroids: warnings.warn( ""Number of subclusters found (%d) by BIRCH is less "" ""than (%d). Decrease the threshold."" % (len(centroids), self.n_clusters), ConvergenceWarning, ) else: # The global clustering step that clusters the subclusters of # the leaves. It assumes the centroids of the subclusters as # samples and finds the final centroids. self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_) if compute_labels: self.labels_ = self._predict(X) ``` ###Assistant : Global clustering for the subclusters obtained after fitting " 1947,"def test_prefill_form_backcompat(extras, expected): mock_form = mock.Mock() mock_form.data = {""conn_id"": ""test"", ""extra"": json.dumps(extras), ""conn_type"": ""test""} cmv = ConnectionModelView() cmv.extra_fields = ['extra__test__my_param'] # this is set by `lazy_add_provider_discovered_options_to_connection_form` cmv.extra_field_name_mapping['extra__test__my_param'] = 'my_param' cmv.prefill_form(form=mock_form, pk=1) assert mock_form.extra__test__my_param.data == expected @pytest.mark.parametrize('field_name', ['extra__test__custom_field', 'custom_field']) @mock.patch('airflow.utils.module_loading.import_string') @mock.patch('airflow.providers_manager.ProvidersManager.hooks', new_callable=PropertyMock)"," When populating custom fields in the connection form we should first check for the non-prefixed value (since prefixes in extra are deprecated) and then fallback to the prefixed value. Either way, the field is known internally to the model view as the prefixed value. ",44,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_prefill_form_backcompat(extras, expected): mock_form = mock.Mock() mock_form.data = {""conn_id"": ""test"", ""extra"": json.dumps(extras), ""conn_type"": ""test""} cmv = ConnectionModelView() cmv.extra_fields = ['extra__test__my_param'] # this is set by `lazy_add_provider_discovered_options_to_connection_form` cmv.extra_field_name_mapping['extra__test__my_param'] = 'my_param' cmv.prefill_form(form=mock_form, pk=1) assert mock_form.extra__test__my_param.data == expected @pytest.mark.parametrize('field_name', ['extra__test__custom_field', 'custom_field']) @mock.patch('airflow.utils.module_loading.import_string') @mock.patch('airflow.providers_manager.ProvidersManager.hooks', new_callable=PropertyMock) ``` ###Assistant : When populating custom fields in the connection form we should first check for the non-prefixed value (since prefixes in extra are deprecated) and then fallback to the prefixed value. Either way, the field is known internally to the model view as the prefixed value. " 1948,"def tokenize_query(query): result = defaultdict(list) query_params = defaultdict(list) tokens = split_query_into_tokens(query) for token in tokens: if token.upper() in [""OR"", ""AND""] or token.strip(""()"") == """": continue state = ""query"" for idx, char in enumerate(token): next_char = token[idx + 1] if idx < len(token) - 1 else None if idx == 0 and char in ('""', ""'"", "":""): break if char == "":"": if next_char in ("":"", "" ""): state = ""query"" else: state = ""tags"" break query_params[state].append(token) if ""query"" in query_params: result[""query""] = [format_query(query) for query in query_params[""query""]] for tag in query_params[""tags""]: key, value = format_tag(tag) result[key].append(value) return dict(result) "," Tokenizes a standard Sentry search query. Example: >>> query = 'is:resolved foo bar tag:value' >>> tokenize_query(query) { 'is': ['resolved'], 'query': ['foo', 'bar'], 'tag': ['value'], } Has a companion implementation in static/app/utils/tokenizeSearch.tsx ",31,98,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def tokenize_query(query): result = defaultdict(list) query_params = defaultdict(list) tokens = split_query_into_tokens(query) for token in tokens: if token.upper() in [""OR"", ""AND""] or token.strip(""()"") == """": continue state = ""query"" for idx, char in enumerate(token): next_char = token[idx + 1] if idx < len(token) - 1 else None if idx == 0 and char in ('""', ""'"", "":""): break if char == "":"": if next_char in ("":"", "" ""): state = ""query"" else: state = ""tags"" break query_params[state].append(token) if ""query"" in query_params: result[""query""] = [format_query(query) for query in query_params[""query""]] for tag in query_params[""tags""]: key, value = format_tag(tag) result[key].append(value) return dict(result) ``` ###Assistant : Tokenizes a standard Sentry search query. Example: >>> query = 'is:resolved foo bar tag:value' >>> tokenize_query(query) { 'is': ['resolved'], 'query': ['foo', 'bar'], 'tag': ['value'], } Has a companion implementation in static/app/utils/tokenizeSearch.tsx " 1949,"def save_model(model, filepath, overwrite=True, save_format=None, **kwargs): save_format = get_save_format(filepath, save_format) if save_format not in (""keras"", ""tf"", ""h5"", ""keras_v3""): raise ValueError( ""Unknown `save_format` argument. Expected one of "" ""'keras', 'tf', or 'h5'. "" f""Received: save_format{save_format}"" ) if save_format == ""keras_v3"" or ( saving_lib.saving_v3_enabled() and save_format == ""keras"" ): # If file exists and should not be overwritten. try: exists = os.path.exists(filepath) except TypeError: exists = False if exists and not overwrite: proceed = io_utils.ask_to_proceed_with_overwrite(filepath) if not proceed: return if kwargs: raise ValueError( ""The following argument(s) are not supported "" f""with the native Keras format: {list(kwargs.keys())}"" ) saving_lib.save_model(model, filepath) else: # Legacy case return legacy_sm_saving_lib.save_model( model, filepath, overwrite=overwrite, save_format=save_format, **kwargs, ) @keras_export(""keras.models.load_model"")","Saves a model as a TensorFlow SavedModel or HDF5 file. See the [Serialization and Saving guide]( https://keras.io/guides/serialization_and_saving/) for details. Args: model: Keras model instance to be saved. filepath: `str` or `pathlib.Path` object. Path where to save the model. overwrite: Whether we should overwrite any existing model at the target location, or instead ask the user via an interactive prompt. save_format: Either `""keras""`, `""tf""`, `""h5""`, indicating whether to save the model in the native Keras format (`.keras`), in the TensorFlow SavedModel format (referred to as ""SavedModel"" below), or in the legacy HDF5 format (`.h5`). Defaults to `""tf""` in TF 2.X, and `""h5""` in TF 1.X. SavedModel format arguments: include_optimizer: Only applied to SavedModel and legacy HDF5 formats. If False, do not save the optimizer state. Defaults to True. signatures: Only applies to SavedModel format. Signatures to save with the SavedModel. See the `signatures` argument in `tf.saved_model.save` for details. options: Only applies to SavedModel format. `tf.saved_model.SaveOptions` object that specifies SavedModel saving options. save_traces: Only applies to SavedModel format. When enabled, the SavedModel will store the function traces for each layer. This can be disabled, so that only the configs of each layer are stored. Defaults to `True`. Disabling this will decrease serialization time and reduce file size, but it requires that all custom layers/models implement a `get_config()` method. Example: ```python model = tf.keras.Sequential([ tf.keras.layers.Dense(5, input_shape=(3,)), tf.keras.layers.Softmax()]) model.save(""model.keras"") loaded_model = tf.keras.models.load_model(""model.keras"") x = tf.random.uniform((10, 3)) assert np.allclose(model.predict(x), loaded_model.predict(x)) ``` Note that `model.save()` is an alias for `tf.keras.models.save_model()`. The SavedModel or HDF5 file contains: - The model's configuration (architecture) - The model's weights - The model's optimizer's state (if any) Thus models can be reinstantiated in the exact same state, without any of the code used for model definition or training. Note that the model weights may have different scoped names after being loaded. Scoped names include the model/layer names, such as `""dense_1/kernel:0""`. It is recommended that you use the layer properties to access specific variables, e.g. `model.get_layer(""dense_1"").kernel`. __SavedModel serialization format__ With `save_format=""tf""`, the model and all trackable objects attached to the it (e.g. layers and variables) are saved as a TensorFlow SavedModel. The model config, weights, and optimizer are included in the SavedModel. Additionally, for every Keras layer attached to the model, the SavedModel stores: * The config and metadata -- e.g. name, dtype, trainable status * Traced call and loss functions, which are stored as TensorFlow subgraphs. The traced functions allow the SavedModel format to save and load custom layers without the original class definition. You can choose to not save the traced functions by disabling the `save_traces` option. This will decrease the time it takes to save the model and the amount of disk space occupied by the output SavedModel. If you enable this option, then you _must_ provide all custom class definitions when loading the model. See the `custom_objects` argument in `tf.keras.models.load_model`. ",472,110,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save_model(model, filepath, overwrite=True, save_format=None, **kwargs): save_format = get_save_format(filepath, save_format) if save_format not in (""keras"", ""tf"", ""h5"", ""keras_v3""): raise ValueError( ""Unknown `save_format` argument. Expected one of "" ""'keras', 'tf', or 'h5'. "" f""Received: save_format{save_format}"" ) if save_format == ""keras_v3"" or ( saving_lib.saving_v3_enabled() and save_format == ""keras"" ): # If file exists and should not be overwritten. try: exists = os.path.exists(filepath) except TypeError: exists = False if exists and not overwrite: proceed = io_utils.ask_to_proceed_with_overwrite(filepath) if not proceed: return if kwargs: raise ValueError( ""The following argument(s) are not supported "" f""with the native Keras format: {list(kwargs.keys())}"" ) saving_lib.save_model(model, filepath) else: # Legacy case return legacy_sm_saving_lib.save_model( model, filepath, overwrite=overwrite, save_format=save_format, **kwargs, ) @keras_export(""keras.models.load_model"") ``` ###Assistant : Saves a model as a TensorFlow SavedModel or HDF5 file. See the [Serialization and Saving guide]( https://keras.io/guides/serialization_and_saving/) for details. Args: model: Keras model instance to be saved. filepath: `str` or `pathlib.Path` object. Path where to save the model. overwrite: Whether we should overwrite any existing model at the target location, or instead ask the user via an interactive prompt. save_format: Either `""keras""`, `""tf""`, `""h5""`, indicating whether to save the model in the native Keras format (`.keras`), in the TensorFlow SavedModel format (referred to as ""SavedModel"" below), or in the legacy HDF5 format (`.h5`). Defaults to `""tf""` in TF 2.X, and `""h5""` in TF 1.X. SavedModel format arguments: include_optimizer: Only applied to SavedModel and legacy HDF5 formats. If False, do not save the optimizer state. Defaults to True. signatures: Only applies to SavedModel format. Signatures to save with the SavedModel. See the `signatures` argument in `tf.saved_model.save` for details. options: Only applies to SavedModel format. `tf.saved_model.SaveOptions` object that specifies SavedModel saving options. save_traces: Only applies to SavedModel format. When enabled, the SavedModel will store the function traces for each layer. This can be disabled, so that only the configs of each layer are stored. Defaults to `True`. Disabling this will decrease serialization time and reduce file size, but it requires that all custom layers/models implement a `get_config()` method. Example: ```python model = tf.keras.Sequential([ tf.keras.layers.Dense(5, input_shape=(3,)), tf.keras.layers.Softmax()]) model.save(""model.keras"") loaded_model = tf.keras.models.load_model(""model.keras"") x = tf.random.uniform((10, 3)) assert np.allclose(model.predict(x), loaded_model.predict(x)) ``` Note that `model.save()` is an alias for `tf.keras.models.save_model()`. The SavedModel or HDF5 file contains: - The model's configuration (architecture) - The model's weights - The model's optimizer's state (if any) Thus models can be reinstantiated in the exact same state, without any of the code used for model definition or training. Note that the model weights may have different scoped names after being loaded. Scoped names include the model/layer names, such as `""dense_1/kernel:0""`. It is recommended that you use the layer properties to access specific variables, e.g. `model.get_layer(""dense_1"").kernel`. __SavedModel serialization format__ With `save_format=""tf""`, the model and all trackable objects attached to the it (e.g. layers and variables) are saved as a TensorFlow SavedModel. The model config, weights, and optimizer are included in the SavedModel. Additionally, for every Keras layer attached to the model, the SavedModel stores: * The config and metadata -- e.g. name, dtype, trainable status * Traced call and loss functions, which are stored as TensorFlow subgraphs. The traced functions allow the SavedModel format to save and load custom layers without the original class definition. You can choose to not save the traced functions by disabling the `save_traces` option. This will decrease the time it takes to save the model and the amount of disk space occupied by the output SavedModel. If you enable this option, then you _must_ provide all custom class definitions when loading the model. See the `custom_objects` argument in `tf.keras.models.load_model`. " 1950,"def test_random_spanning_tree_additive_small(): pytest.importorskip(""numpy"") edges = { (0, 1): 1, (0, 2): 1, (0, 5): 3, (1, 2): 2, (1, 4): 3, (2, 3): 3, (5, 3): 4, (5, 4): 5, (4, 3): 4, } # Build the graph G = nx.Graph() for u, v in edges: G.add_edge(u, v, weight=edges[(u, v)]) solution_edges = [(0, 2), (1, 2), (2, 3), (3, 4), (3, 5)] solution = nx.Graph() solution.add_edges_from(solution_edges) sampled_tree = nx.random_spanning_tree( G, weight=""weight"", multiplicative=False, seed=37 ) assert nx.utils.edges_equal(solution.edges, sampled_tree.edges) @pytest.mark.slow"," Sample a single spanning tree from the additive method. ",9,78,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_random_spanning_tree_additive_small(): pytest.importorskip(""numpy"") edges = { (0, 1): 1, (0, 2): 1, (0, 5): 3, (1, 2): 2, (1, 4): 3, (2, 3): 3, (5, 3): 4, (5, 4): 5, (4, 3): 4, } # Build the graph G = nx.Graph() for u, v in edges: G.add_edge(u, v, weight=edges[(u, v)]) solution_edges = [(0, 2), (1, 2), (2, 3), (3, 4), (3, 5)] solution = nx.Graph() solution.add_edges_from(solution_edges) sampled_tree = nx.random_spanning_tree( G, weight=""weight"", multiplicative=False, seed=37 ) assert nx.utils.edges_equal(solution.edges, sampled_tree.edges) @pytest.mark.slow ``` ###Assistant : Sample a single spanning tree from the additive method. " 1951,"def sequence_loss(flow_preds, flow_gt, valid_flow_mask, gamma=0.8, max_flow=400): if gamma > 1: raise ValueError(f""Gamma should be < 1, got {gamma}."") # exlude invalid pixels and extremely large diplacements flow_norm = torch.sum(flow_gt**2, dim=1).sqrt() valid_flow_mask = valid_flow_mask & (flow_norm < max_flow) valid_flow_mask = valid_flow_mask[:, None, :, :] flow_preds = torch.stack(flow_preds) # shape = (num_flow_updates, batch_size, 2, H, W) abs_diff = (flow_preds - flow_gt).abs() abs_diff = (abs_diff * valid_flow_mask).mean(axis=(1, 2, 3, 4)) num_predictions = flow_preds.shape[0] weights = gamma ** torch.arange(num_predictions - 1, -1, -1).to(flow_gt.device) flow_loss = (abs_diff * weights).sum() return flow_loss ",Loss function defined over sequence of flow predictions,8,86,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def sequence_loss(flow_preds, flow_gt, valid_flow_mask, gamma=0.8, max_flow=400): if gamma > 1: raise ValueError(f""Gamma should be < 1, got {gamma}."") # exlude invalid pixels and extremely large diplacements flow_norm = torch.sum(flow_gt**2, dim=1).sqrt() valid_flow_mask = valid_flow_mask & (flow_norm < max_flow) valid_flow_mask = valid_flow_mask[:, None, :, :] flow_preds = torch.stack(flow_preds) # shape = (num_flow_updates, batch_size, 2, H, W) abs_diff = (flow_preds - flow_gt).abs() abs_diff = (abs_diff * valid_flow_mask).mean(axis=(1, 2, 3, 4)) num_predictions = flow_preds.shape[0] weights = gamma ** torch.arange(num_predictions - 1, -1, -1).to(flow_gt.device) flow_loss = (abs_diff * weights).sum() return flow_loss ``` ###Assistant : Loss function defined over sequence of flow predictions " 1952,"def booleans_processing(config, **kwargs): final_booleans = {} if tf.executing_eagerly(): final_booleans[""output_attentions""] = ( kwargs[""output_attentions""] if kwargs[""output_attentions""] is not None else config.output_attentions ) final_booleans[""output_hidden_states""] = ( kwargs[""output_hidden_states""] if kwargs[""output_hidden_states""] is not None else config.output_hidden_states ) final_booleans[""return_dict""] = ( kwargs[""return_dict""] if kwargs[""return_dict""] is not None else config.return_dict ) if ""use_cache"" in kwargs: final_booleans[""use_cache""] = ( kwargs[""use_cache""] if kwargs[""use_cache""] is not None else getattr(config, ""use_cache"", None) ) else: final_booleans[""output_attentions""] = config.output_attentions final_booleans[""output_hidden_states""] = config.output_hidden_states if kwargs.get(""return_dict"", None) not in (None, True): tf_logger.warning( ""The parameter `return_dict` cannot be set in graph mode and will always be set to `True`."" ) final_booleans[""return_dict""] = True if ""use_cache"" in kwargs: final_booleans[""use_cache""] = getattr(config, ""use_cache"", None) return final_booleans "," Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or graph) Args: config ([`PretrainedConfig`]): The config of the running model. **kwargs: The boolean parameters Returns: A dictionary with the proper values for each boolean ",45,108,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def booleans_processing(config, **kwargs): final_booleans = {} if tf.executing_eagerly(): final_booleans[""output_attentions""] = ( kwargs[""output_attentions""] if kwargs[""output_attentions""] is not None else config.output_attentions ) final_booleans[""output_hidden_states""] = ( kwargs[""output_hidden_states""] if kwargs[""output_hidden_states""] is not None else config.output_hidden_states ) final_booleans[""return_dict""] = ( kwargs[""return_dict""] if kwargs[""return_dict""] is not None else config.return_dict ) if ""use_cache"" in kwargs: final_booleans[""use_cache""] = ( kwargs[""use_cache""] if kwargs[""use_cache""] is not None else getattr(config, ""use_cache"", None) ) else: final_booleans[""output_attentions""] = config.output_attentions final_booleans[""output_hidden_states""] = config.output_hidden_states if kwargs.get(""return_dict"", None) not in (None, True): tf_logger.warning( ""The parameter `return_dict` cannot be set in graph mode and will always be set to `True`."" ) final_booleans[""return_dict""] = True if ""use_cache"" in kwargs: final_booleans[""use_cache""] = getattr(config, ""use_cache"", None) return final_booleans ``` ###Assistant : Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or graph) Args: config ([`PretrainedConfig`]): The config of the running model. **kwargs: The boolean parameters Returns: A dictionary with the proper values for each boolean " 1953,"def test_dagrun_root_fail_unfinished(self): # TODO: this should live in test_dagrun.py # Run both the failed and successful tasks dag_id = 'test_dagrun_states_root_fail_unfinished' dag = self.dagbag.get_dag(dag_id) dr = dag.create_dagrun( run_type=DagRunType.SCHEDULED, execution_date=DEFAULT_DATE, state=None, ) self.null_exec.mock_task_fail(dag_id, 'test_dagrun_fail', dr.run_id) with pytest.raises(AirflowException): dag.run(start_date=dr.execution_date, end_date=dr.execution_date, executor=self.null_exec) # Mark the successful task as never having run since we want to see if the # dagrun will be in a running state despite having an unfinished task. with create_session() as session: ti = dr.get_task_instance('test_dagrun_unfinished', session=session) ti.state = State.NONE session.commit() dr.update_state() assert dr.state == State.RUNNING "," DagRuns with one unfinished and one failed root task -> RUNNING ",11,84,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_dagrun_root_fail_unfinished(self): # TODO: this should live in test_dagrun.py # Run both the failed and successful tasks dag_id = 'test_dagrun_states_root_fail_unfinished' dag = self.dagbag.get_dag(dag_id) dr = dag.create_dagrun( run_type=DagRunType.SCHEDULED, execution_date=DEFAULT_DATE, state=None, ) self.null_exec.mock_task_fail(dag_id, 'test_dagrun_fail', dr.run_id) with pytest.raises(AirflowException): dag.run(start_date=dr.execution_date, end_date=dr.execution_date, executor=self.null_exec) # Mark the successful task as never having run since we want to see if the # dagrun will be in a running state despite having an unfinished task. with create_session() as session: ti = dr.get_task_instance('test_dagrun_unfinished', session=session) ti.state = State.NONE session.commit() dr.update_state() assert dr.state == State.RUNNING ``` ###Assistant : DagRuns with one unfinished and one failed root task -> RUNNING " 1954,"def _get_call_args(backend_name, data, args, kwargs): if isinstance(data, ABCSeries): arg_def = [ (""kind"", ""line""), (""ax"", None), (""figsize"", None), (""use_index"", True), (""title"", None), (""grid"", None), (""legend"", False), (""style"", None), (""logx"", False), (""logy"", False), (""loglog"", False), (""xticks"", None), (""yticks"", None), (""xlim"", None), (""ylim"", None), (""rot"", None), (""fontsize"", None), (""colormap"", None), (""table"", False), (""yerr"", None), (""xerr"", None), (""label"", None), (""secondary_y"", False), (""xlabel"", None), (""ylabel"", None), ] elif isinstance(data, ABCDataFrame): arg_def = [ (""x"", None), (""y"", None), (""kind"", ""line""), (""ax"", None), (""subplots"", False), (""sharex"", None), (""sharey"", False), (""layout"", None), (""figsize"", None), (""use_index"", True), (""title"", None), (""grid"", None), (""legend"", True), (""style"", None), (""logx"", False), (""logy"", False), (""loglog"", False), (""xticks"", None), (""yticks"", None), (""xlim"", None), (""ylim"", None), (""rot"", None), (""fontsize"", None), (""colormap"", None), (""table"", False), (""yerr"", None), (""xerr"", None), (""secondary_y"", False), (""sort_columns"", False), (""xlabel"", None), (""ylabel"", None), ] else: raise TypeError( f""Called plot accessor for type {type(data).__name__}, "" ""expected Series or DataFrame"" ) if ""sort_columns"" in itertools.chain(args, kwargs.keys()): warnings.warn( ""`sort_columns` is deprecated and will be removed in a future "" ""version."", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) if args and isinstance(data, ABCSeries): positional_args = str(args)[1:-1] keyword_args = "", "".join( [f""{name}={repr(value)}"" for (name, _), value in zip(arg_def, args)] ) msg = ( ""`Series.plot()` should not be called with positional "" ""arguments, only keyword arguments. The order of "" ""positional arguments will change in the future. "" f""Use `Series.plot({keyword_args})` instead of "" f""`Series.plot({positional_args})`."" ) raise TypeError(msg) pos_args = {name: value for (name, _), value in zip(arg_def, args)} if backend_name == ""pandas.plotting._matplotlib"": kwargs = dict(arg_def, **pos_args, **kwargs) else: kwargs = dict(pos_args, **kwargs) x = kwargs.pop(""x"", None) y = kwargs.pop(""y"", None) kind = kwargs.pop(""kind"", ""line"") return x, y, kind, kwargs "," This function makes calls to this accessor `__call__` method compatible with the previous `SeriesPlotMethods.__call__` and `DataFramePlotMethods.__call__`. Those had slightly different signatures, since `DataFramePlotMethods` accepted `x` and `y` parameters. ",28,266,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_call_args(backend_name, data, args, kwargs): if isinstance(data, ABCSeries): arg_def = [ (""kind"", ""line""), (""ax"", None), (""figsize"", None), (""use_index"", True), (""title"", None), (""grid"", None), (""legend"", False), (""style"", None), (""logx"", False), (""logy"", False), (""loglog"", False), (""xticks"", None), (""yticks"", None), (""xlim"", None), (""ylim"", None), (""rot"", None), (""fontsize"", None), (""colormap"", None), (""table"", False), (""yerr"", None), (""xerr"", None), (""label"", None), (""secondary_y"", False), (""xlabel"", None), (""ylabel"", None), ] elif isinstance(data, ABCDataFrame): arg_def = [ (""x"", None), (""y"", None), (""kind"", ""line""), (""ax"", None), (""subplots"", False), (""sharex"", None), (""sharey"", False), (""layout"", None), (""figsize"", None), (""use_index"", True), (""title"", None), (""grid"", None), (""legend"", True), (""style"", None), (""logx"", False), (""logy"", False), (""loglog"", False), (""xticks"", None), (""yticks"", None), (""xlim"", None), (""ylim"", None), (""rot"", None), (""fontsize"", None), (""colormap"", None), (""table"", False), (""yerr"", None), (""xerr"", None), (""secondary_y"", False), (""sort_columns"", False), (""xlabel"", None), (""ylabel"", None), ] else: raise TypeError( f""Called plot accessor for type {type(data).__name__}, "" ""expected Series or DataFrame"" ) if ""sort_columns"" in itertools.chain(args, kwargs.keys()): warnings.warn( ""`sort_columns` is deprecated and will be removed in a future "" ""version."", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) if args and isinstance(data, ABCSeries): positional_args = str(args)[1:-1] keyword_args = "", "".join( [f""{name}={repr(value)}"" for (name, _), value in zip(arg_def, args)] ) msg = ( ""`Series.plot()` should not be called with positional "" ""arguments, only keyword arguments. The order of "" ""positional arguments will change in the future. "" f""Use `Series.plot({keyword_args})` instead of "" f""`Series.plot({positional_args})`."" ) raise TypeError(msg) pos_args = {name: value for (name, _), value in zip(arg_def, args)} if backend_name == ""pandas.plotting._matplotlib"": kwargs = dict(arg_def, **pos_args, **kwargs) else: kwargs = dict(pos_args, **kwargs) x = kwargs.pop(""x"", None) y = kwargs.pop(""y"", None) kind = kwargs.pop(""kind"", ""line"") return x, y, kind, kwargs ``` ###Assistant : This function makes calls to this accessor `__call__` method compatible with the previous `SeriesPlotMethods.__call__` and `DataFramePlotMethods.__call__`. Those had slightly different signatures, since `DataFramePlotMethods` accepted `x` and `y` parameters. " 1955,"async def test_ahas_key(self): await cache.aset(""hello1"", ""goodbye1"") self.assertIs(await cache.ahas_key(""hello1""), False) self.assertIs(await cache.ahas_key(""goodbye1""), False) ",ahas_key() doesn't ever return True for the dummy cache backend.,10,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_ahas_key(self): await cache.aset(""hello1"", ""goodbye1"") self.assertIs(await cache.ahas_key(""hello1""), False) self.assertIs(await cache.ahas_key(""goodbye1""), False) ``` ###Assistant : ahas_key() doesn't ever return True for the dummy cache backend. " 1956,"def min_temp(self) -> float: if self.temperature_unit == UnitOfTemperature.CELSIUS: return TEMP_MIN return TEMP_MIN_F ",Return the minimum temperature supported by the device.,8,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def min_temp(self) -> float: if self.temperature_unit == UnitOfTemperature.CELSIUS: return TEMP_MIN return TEMP_MIN_F ``` ###Assistant : Return the minimum temperature supported by the device. " 1957,"def nseries(self, x=None, x0=0, n=6, dir='+', logx=None, cdir=0): if x and x not in self.free_symbols: return self if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None): return self.series(x, x0, n, dir, cdir=cdir) else: return self._eval_nseries(x, n=n, logx=logx, cdir=cdir) "," Wrapper to _eval_nseries if assumptions allow, else to series. If x is given, x0 is 0, dir='+', and self has x, then _eval_nseries is called. This calculates ""n"" terms in the innermost expressions and then builds up the final series just by ""cross-multiplying"" everything out. The optional ``logx`` parameter can be used to replace any log(x) in the returned series with a symbolic value to avoid evaluating log(x) at 0. A symbol to use in place of log(x) should be provided. Advantage -- it's fast, because we do not have to determine how many terms we need to calculate in advance. Disadvantage -- you may end up with less terms than you may have expected, but the O(x**n) term appended will always be correct and so the result, though perhaps shorter, will also be correct. If any of those assumptions is not met, this is treated like a wrapper to series which will try harder to return the correct number of terms. See also lseries(). Examples ======== >>> from sympy import sin, log, Symbol >>> from sympy.abc import x, y >>> sin(x).nseries(x, 0, 6) x - x**3/6 + x**5/120 + O(x**6) >>> log(x+1).nseries(x, 0, 5) x - x**2/2 + x**3/3 - x**4/4 + O(x**5) Handling of the ``logx`` parameter --- in the following example the expansion fails since ``sin`` does not have an asymptotic expansion at -oo (the limit of log(x) as x approaches 0): >>> e = sin(log(x)) >>> e.nseries(x, 0, 6) Traceback (most recent call last): ... PoleError: ... ... >>> logx = Symbol('logx') >>> e.nseries(x, 0, 6, logx=logx) sin(logx) In the following example, the expansion works but only returns self unless the ``logx`` parameter is used: >>> e = x**y >>> e.nseries(x, 0, 2) x**y >>> e.nseries(x, 0, 2, logx=logx) exp(logx*y) ",294,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def nseries(self, x=None, x0=0, n=6, dir='+', logx=None, cdir=0): if x and x not in self.free_symbols: return self if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None): return self.series(x, x0, n, dir, cdir=cdir) else: return self._eval_nseries(x, n=n, logx=logx, cdir=cdir) ``` ###Assistant : Wrapper to _eval_nseries if assumptions allow, else to series. If x is given, x0 is 0, dir='+', and self has x, then _eval_nseries is called. This calculates ""n"" terms in the innermost expressions and then builds up the final series just by ""cross-multiplying"" everything out. The optional ``logx`` parameter can be used to replace any log(x) in the returned series with a symbolic value to avoid evaluating log(x) at 0. A symbol to use in place of log(x) should be provided. Advantage -- it's fast, because we do not have to determine how many terms we need to calculate in advance. Disadvantage -- you may end up with less terms than you may have expected, but the O(x**n) term appended will always be correct and so the result, though perhaps shorter, will also be correct. If any of those assumptions is not met, this is treated like a wrapper to series which will try harder to return the correct number of terms. See also lseries(). Examples ======== >>> from sympy import sin, log, Symbol >>> from sympy.abc import x, y >>> sin(x).nseries(x, 0, 6) x - x**3/6 + x**5/120 + O(x**6) >>> log(x+1).nseries(x, 0, 5) x - x**2/2 + x**3/3 - x**4/4 + O(x**5) Handling of the ``logx`` parameter --- in the following example the expansion fails since ``sin`` does not have an asymptotic expansion at -oo (the limit of log(x) as x approaches 0): >>> e = sin(log(x)) >>> e.nseries(x, 0, 6) Traceback (most recent call last): ... PoleError: ... ... >>> logx = Symbol('logx') >>> e.nseries(x, 0, 6, logx=logx) sin(logx) In the following example, the expansion works but only returns self unless the ``logx`` parameter is used: >>> e = x**y >>> e.nseries(x, 0, 2) x**y >>> e.nseries(x, 0, 2, logx=logx) exp(logx*y) " 1958,"def test_getitem_error(self, exception): container = self.Container(exception(""failure"")) with pytest.raises(validate.ValidationError) as cm: validate.validate(validate.get(""foo"", default=""default""), container) assert_validationerror(cm.value, ) "," ValidationError(GetItemSchema): Could not get key 'foo' from object Container Context: failure ",11,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_getitem_error(self, exception): container = self.Container(exception(""failure"")) with pytest.raises(validate.ValidationError) as cm: validate.validate(validate.get(""foo"", default=""default""), container) assert_validationerror(cm.value, ) ``` ###Assistant : ValidationError(GetItemSchema): Could not get key 'foo' from object Container Context: failure " 1959,"def num_base_priors(self) -> List[int]: return [1 for _ in range(len(self.strides))] ","list[int]: The number of priors (points) at a point on the feature grid",13,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def num_base_priors(self) -> List[int]: return [1 for _ in range(len(self.strides))] ``` ###Assistant : list[int]: The number of priors (points) at a point on the feature grid " 1960,"def test_logentry_change_message_localized_datetime_input(self): post_data = { ""site"": self.site.pk, ""title"": ""Changed"", ""hist"": ""Some content"", ""created_0"": ""12/03/2008"", ""created_1"": ""11:54"", } with translation.override(""fr""): change_url = reverse( ""admin:admin_utils_article_change"", args=[quote(self.a1.pk)] ) response = self.client.post(change_url, post_data) self.assertRedirects( response, reverse(""admin:admin_utils_article_changelist"") ) logentry = LogEntry.objects.filter( content_type__model__iexact=""article"" ).latest(""id"") self.assertEqual(logentry.get_change_message(), ""Changed Title and History."") "," Localized date/time inputs shouldn't affect changed form data detection. ",9,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_logentry_change_message_localized_datetime_input(self): post_data = { ""site"": self.site.pk, ""title"": ""Changed"", ""hist"": ""Some content"", ""created_0"": ""12/03/2008"", ""created_1"": ""11:54"", } with translation.override(""fr""): change_url = reverse( ""admin:admin_utils_article_change"", args=[quote(self.a1.pk)] ) response = self.client.post(change_url, post_data) self.assertRedirects( response, reverse(""admin:admin_utils_article_changelist"") ) logentry = LogEntry.objects.filter( content_type__model__iexact=""article"" ).latest(""id"") self.assertEqual(logentry.get_change_message(), ""Changed Title and History."") ``` ###Assistant : Localized date/time inputs shouldn't affect changed form data detection. " 1961,"def _cleanup_code(code): return code # Nothing to do here # language=PythonVerboseRegExp _call_function_bytecode = bytecode_regex( rb ) else: # Starting with python 3.11, the bytecode is peppered with CACHE instructions (which dis module conveniently hides # unless show_caches=True is used). Dealing with these CACHE instructions in regex rules is going to render them # unreadable, so instead we pre-process the bytecode and filter the offending opcodes out."," # Matches `global_function('some', 'constant', 'arguments')`. # Load the global function. In code with >256 of names, this may require extended name references. ((?:`EXTENDED_ARG`.)* (?:`LOAD_NAME`|`LOAD_GLOBAL`|`LOAD_FAST`).) # For foo.bar.whizz(), the above is the 'foo', below is the 'bar.whizz'. ((?:(?:`EXTENDED_ARG`.)* (?:`LOAD_METHOD`|`LOAD_ATTR`).)*) # Load however many arguments it takes. These (for now) must all be constants. # Again, code with >256 constants may need extended enumeration. ((?:(?:`EXTENDED_ARG`.)* `LOAD_CONST`.)*) # Call the function. The parameter is the argument count (which may also be >256) if CALL_FUNCTION or # CALL_METHOD are used. For CALL_FUNCTION_EX, the parameter are flags. ((?:`EXTENDED_ARG`.)* (?:`CALL_FUNCTION`|`CALL_METHOD`|`CALL_FUNCTION_EX`).) ",94,66,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _cleanup_code(code): return code # Nothing to do here # language=PythonVerboseRegExp _call_function_bytecode = bytecode_regex( rb ) else: # Starting with python 3.11, the bytecode is peppered with CACHE instructions (which dis module conveniently hides # unless show_caches=True is used). Dealing with these CACHE instructions in regex rules is going to render them # unreadable, so instead we pre-process the bytecode and filter the offending opcodes out. ``` ###Assistant : # Matches `global_function('some', 'constant', 'arguments')`. # Load the global function. In code with >256 of names, this may require extended name references. ((?:`EXTENDED_ARG`.)* (?:`LOAD_NAME`|`LOAD_GLOBAL`|`LOAD_FAST`).) # For foo.bar.whizz(), the above is the 'foo', below is the 'bar.whizz'. ((?:(?:`EXTENDED_ARG`.)* (?:`LOAD_METHOD`|`LOAD_ATTR`).)*) # Load however many arguments it takes. These (for now) must all be constants. # Again, code with >256 constants may need extended enumeration. ((?:(?:`EXTENDED_ARG`.)* `LOAD_CONST`.)*) # Call the function. The parameter is the argument count (which may also be >256) if CALL_FUNCTION or # CALL_METHOD are used. For CALL_FUNCTION_EX, the parameter are flags. ((?:`EXTENDED_ARG`.)* (?:`CALL_FUNCTION`|`CALL_METHOD`|`CALL_FUNCTION_EX`).) " 1962,"def filemode(mode): perm = [] for table in filemode_table: for bit, char in table: if mode & bit == bit: perm.append(char) break else: perm.append(""-"") return """".join(perm) ","Convert a file's mode to a string of the form -rwxrwxrwx. Used by TarFile.list() ",14,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def filemode(mode): perm = [] for table in filemode_table: for bit, char in table: if mode & bit == bit: perm.append(char) break else: perm.append(""-"") return """".join(perm) ``` ###Assistant : Convert a file's mode to a string of the form -rwxrwxrwx. Used by TarFile.list() " 1963,"def filter_2d(x, k, gain=1, data_format='NCHW', impl='cuda'): r k = _setup_kernel(k) * gain p = k.shape[0] - 1 return _simple_upfirdn_2d(x, k, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl) #---------------------------------------------------------------------------- ","Filter a batch of 2D images with the given FIR filter. Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and filters each image with the given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified `gain`. Pixels outside the image are assumed to be zero. Args: x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). gain: Scaling factor for signal magnitude (default: 1.0). data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`). impl: Name of the implementation to use. Can be `""ref""` or `""cuda""` (default). Returns: Tensor of the same shape and datatype as `x`. ",130,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def filter_2d(x, k, gain=1, data_format='NCHW', impl='cuda'): r k = _setup_kernel(k) * gain p = k.shape[0] - 1 return _simple_upfirdn_2d(x, k, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl) #---------------------------------------------------------------------------- ``` ###Assistant : Filter a batch of 2D images with the given FIR filter. Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and filters each image with the given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified `gain`. Pixels outside the image are assumed to be zero. Args: x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). gain: Scaling factor for signal magnitude (default: 1.0). data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`). impl: Name of the implementation to use. Can be `""ref""` or `""cuda""` (default). Returns: Tensor of the same shape and datatype as `x`. " 1964,"def execute(): frappe.reload_doctype(""Pricing Rule"") currency = frappe.db.get_default(""currency"") for doc in frappe.get_all(""Pricing Rule"", fields=[""company"", ""name""]): if doc.company: currency = frappe.get_cached_value(""Company"", doc.company, ""default_currency"") frappe.db.sql( , (currency, doc.name) ) ",update `tabPricing Rule` set currency = %s where name = %s,11,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def execute(): frappe.reload_doctype(""Pricing Rule"") currency = frappe.db.get_default(""currency"") for doc in frappe.get_all(""Pricing Rule"", fields=[""company"", ""name""]): if doc.company: currency = frappe.get_cached_value(""Company"", doc.company, ""default_currency"") frappe.db.sql( , (currency, doc.name) ) ``` ###Assistant : update `tabPricing Rule` set currency = %s where name = %s " 1965,"def forward(self, x, mask=None): B_, N, C = x.shape qkv = self.qkv(x).reshape((B_, N, 3, self.num_heads, C // self.num_heads)).transpose((2, 0, 3, 1, 4)) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) q = q * self.scale attn = (q @ k.transpose((0, 1, 3, 2))) relative_position_bias = self.relative_position_bias_table[self.relative_position_index.reshape( (-1, ))].reshape((self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1)) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.transpose((2, 0, 1)) # nH, Wh*Ww, Wh*Ww attn = attn + relative_position_bias.unsqueeze(0) if mask is not None: nW = mask.shape[0] attn = attn.reshape((B_ // nW, nW, self.num_heads, N, N)) + mask.unsqueeze(1).unsqueeze(0) attn = attn.reshape((-1, self.num_heads, N, N)) attn = self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn) x = (attn @ v).transpose((0, 2, 1, 3)).reshape((B_, N, C)) x = self.proj(x) x = self.proj_drop(x) return x "," Args: x: input features with shape of (num_windows*B, N, C) mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None ",21,131,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def forward(self, x, mask=None): B_, N, C = x.shape qkv = self.qkv(x).reshape((B_, N, 3, self.num_heads, C // self.num_heads)).transpose((2, 0, 3, 1, 4)) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) q = q * self.scale attn = (q @ k.transpose((0, 1, 3, 2))) relative_position_bias = self.relative_position_bias_table[self.relative_position_index.reshape( (-1, ))].reshape((self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1)) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.transpose((2, 0, 1)) # nH, Wh*Ww, Wh*Ww attn = attn + relative_position_bias.unsqueeze(0) if mask is not None: nW = mask.shape[0] attn = attn.reshape((B_ // nW, nW, self.num_heads, N, N)) + mask.unsqueeze(1).unsqueeze(0) attn = attn.reshape((-1, self.num_heads, N, N)) attn = self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn) x = (attn @ v).transpose((0, 2, 1, 3)).reshape((B_, N, C)) x = self.proj(x) x = self.proj_drop(x) return x ``` ###Assistant : Args: x: input features with shape of (num_windows*B, N, C) mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None " 1966,"def test_join_rules_invite(self): creator = ""@creator:example.com"" pleb = ""@joiner:example.com"" auth_events = { (""m.room.create"", """"): _create_event(RoomVersions.V6, creator), (""m.room.member"", creator): _join_event(RoomVersions.V6, creator), (""m.room.join_rules"", """"): _join_rules_event( RoomVersions.V6, creator, ""invite"" ), } # A join without an invite is rejected. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user cannot be force-joined to a room. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, _member_event(RoomVersions.V6, pleb, ""join"", sender=creator), auth_events.values(), ) # Banned should be rejected. auth_events[(""m.room.member"", pleb)] = _member_event( RoomVersions.V6, pleb, ""ban"" ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user who left cannot re-join. auth_events[(""m.room.member"", pleb)] = _member_event( RoomVersions.V6, pleb, ""leave"" ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user can send a join if they're in the room. auth_events[(""m.room.member"", pleb)] = _member_event( RoomVersions.V6, pleb, ""join"" ) event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user can accept an invite. auth_events[(""m.room.member"", pleb)] = _member_event( RoomVersions.V6, pleb, ""invite"", sender=creator ) event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) "," Test joining an invite only room. ",6,154,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_join_rules_invite(self): creator = ""@creator:example.com"" pleb = ""@joiner:example.com"" auth_events = { (""m.room.create"", """"): _create_event(RoomVersions.V6, creator), (""m.room.member"", creator): _join_event(RoomVersions.V6, creator), (""m.room.join_rules"", """"): _join_rules_event( RoomVersions.V6, creator, ""invite"" ), } # A join without an invite is rejected. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user cannot be force-joined to a room. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, _member_event(RoomVersions.V6, pleb, ""join"", sender=creator), auth_events.values(), ) # Banned should be rejected. auth_events[(""m.room.member"", pleb)] = _member_event( RoomVersions.V6, pleb, ""ban"" ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user who left cannot re-join. auth_events[(""m.room.member"", pleb)] = _member_event( RoomVersions.V6, pleb, ""leave"" ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user can send a join if they're in the room. auth_events[(""m.room.member"", pleb)] = _member_event( RoomVersions.V6, pleb, ""join"" ) event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user can accept an invite. auth_events[(""m.room.member"", pleb)] = _member_event( RoomVersions.V6, pleb, ""invite"", sender=creator ) event_auth.check_auth_rules_for_event( RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) ``` ###Assistant : Test joining an invite only room. " 1967,"def validate_onboarding(data): logging.info(f""Validating onboarding data {data}"") messages = data['outputs']['messages'] if len(messages) == 0: return False status_message = messages[-2] if status_message is None: return False submitted_data = status_message.get('data') if submitted_data is None: return False final_status = submitted_data.get('final_status') return final_status == ONBOARD_SUCCESS # TODO: find a better way to avoid duplicating this from model_chat world.py"," Check the contents of the data to ensure they are valid. ",11,53,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def validate_onboarding(data): logging.info(f""Validating onboarding data {data}"") messages = data['outputs']['messages'] if len(messages) == 0: return False status_message = messages[-2] if status_message is None: return False submitted_data = status_message.get('data') if submitted_data is None: return False final_status = submitted_data.get('final_status') return final_status == ONBOARD_SUCCESS # TODO: find a better way to avoid duplicating this from model_chat world.py ``` ###Assistant : Check the contents of the data to ensure they are valid. " 1968,"async def test_async_track_entity_registry_updated_event_with_empty_list(hass): unsub_single = async_track_entity_registry_updated_event( hass, [], ha.callback(lambda event: None) ) unsub_single2 = async_track_entity_registry_updated_event( hass, [], ha.callback(lambda event: None) ) unsub_single2() unsub_single() ",Test async_track_entity_registry_updated_event passing an empty list of entities.,8,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_async_track_entity_registry_updated_event_with_empty_list(hass): unsub_single = async_track_entity_registry_updated_event( hass, [], ha.callback(lambda event: None) ) unsub_single2 = async_track_entity_registry_updated_event( hass, [], ha.callback(lambda event: None) ) unsub_single2() unsub_single() ``` ###Assistant : Test async_track_entity_registry_updated_event passing an empty list of entities. " 1969,"def generate_self_signed_tls_certs(): try: from cryptography import x509 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes, serialization from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.x509.oid import NameOID except ImportError: raise ImportError( ""Using `Security.temporary` requires `cryptography`, please "" ""install it using either pip or conda"" ) key = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend() ) key_contents = key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption(), ).decode() ray_interal = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, ""ray-internal"")]) # This is the same logic used by the GCS server to acquire a # private/interal IP address to listen on. If we just use localhost + # 127.0.0.1 then we won't be able to connect to the GCS and will get # an error like ""No match found for server name: 192.168.X.Y"" s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect((""8.8.8.8"", 80)) private_ip_address = s.getsockname()[0] s.close() altnames = x509.SubjectAlternativeName( [ x509.DNSName( socket.gethostbyname(socket.gethostname()) ), # Probably 127.0.0.1 x509.DNSName(""127.0.0.1""), x509.DNSName(private_ip_address), # 192.168.*.* x509.DNSName(""localhost""), ] ) now = datetime.datetime.utcnow() cert = ( x509.CertificateBuilder() .subject_name(ray_interal) .issuer_name(ray_interal) .add_extension(altnames, critical=False) .public_key(key.public_key()) .serial_number(x509.random_serial_number()) .not_valid_before(now) .not_valid_after(now + datetime.timedelta(days=365)) .sign(key, hashes.SHA256(), default_backend()) ) cert_contents = cert.public_bytes(serialization.Encoding.PEM).decode() return cert_contents, key_contents ","Create self-signed key/cert pair for testing. This method requires the library ``cryptography`` be installed. ",14,167,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def generate_self_signed_tls_certs(): try: from cryptography import x509 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes, serialization from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.x509.oid import NameOID except ImportError: raise ImportError( ""Using `Security.temporary` requires `cryptography`, please "" ""install it using either pip or conda"" ) key = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend() ) key_contents = key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption(), ).decode() ray_interal = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, ""ray-internal"")]) # This is the same logic used by the GCS server to acquire a # private/interal IP address to listen on. If we just use localhost + # 127.0.0.1 then we won't be able to connect to the GCS and will get # an error like ""No match found for server name: 192.168.X.Y"" s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect((""8.8.8.8"", 80)) private_ip_address = s.getsockname()[0] s.close() altnames = x509.SubjectAlternativeName( [ x509.DNSName( socket.gethostbyname(socket.gethostname()) ), # Probably 127.0.0.1 x509.DNSName(""127.0.0.1""), x509.DNSName(private_ip_address), # 192.168.*.* x509.DNSName(""localhost""), ] ) now = datetime.datetime.utcnow() cert = ( x509.CertificateBuilder() .subject_name(ray_interal) .issuer_name(ray_interal) .add_extension(altnames, critical=False) .public_key(key.public_key()) .serial_number(x509.random_serial_number()) .not_valid_before(now) .not_valid_after(now + datetime.timedelta(days=365)) .sign(key, hashes.SHA256(), default_backend()) ) cert_contents = cert.public_bytes(serialization.Encoding.PEM).decode() return cert_contents, key_contents ``` ###Assistant : Create self-signed key/cert pair for testing. This method requires the library ``cryptography`` be installed. " 1970,"def print_help(self): has_ticker_start = ""[unvl]"" if not self.ticker else """" has_ticker_end = ""[/unvl]"" if not self.ticker else """" help_text = f console.print(text=help_text, menu=""Stocks - Government"") ","Print help [src][QuiverQuant][/src] [info]Explore:[/info][cmds] lasttrades last trades topbuys show most purchased stocks topsells show most sold stocks lastcontracts show last government contracts given out qtrcontracts quarterly government contracts analysis toplobbying top corporate lobbying tickers load load a specific ticker for analysis[/cmds] [param]Ticker: [/param]{self.ticker or None}{has_ticker_start}[cmds] gtrades show government trades for ticker contracts show government contracts for ticker histcont show historical quarterly government contracts for ticker lobbying corporate lobbying details for ticker[/cmds]{has_ticker_end} ",71,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def print_help(self): has_ticker_start = ""[unvl]"" if not self.ticker else """" has_ticker_end = ""[/unvl]"" if not self.ticker else """" help_text = f console.print(text=help_text, menu=""Stocks - Government"") ``` ###Assistant : Print help [src][QuiverQuant][/src] [info]Explore:[/info][cmds] lasttrades last trades topbuys show most purchased stocks topsells show most sold stocks lastcontracts show last government contracts given out qtrcontracts quarterly government contracts analysis toplobbying top corporate lobbying tickers load load a specific ticker for analysis[/cmds] [param]Ticker: [/param]{self.ticker or None}{has_ticker_start}[cmds] gtrades show government trades for ticker contracts show government contracts for ticker histcont show historical quarterly government contracts for ticker lobbying corporate lobbying details for ticker[/cmds]{has_ticker_end} " 1971,"def get_item_warehouse_projected_qty(items_to_consider): item_warehouse_projected_qty = {} for item_code, warehouse, projected_qty in frappe.db.sql( .format( "", "".join([""%s""] * len(items_to_consider)) ), items_to_consider, ): if item_code not in item_warehouse_projected_qty: item_warehouse_projected_qty.setdefault(item_code, {}) if warehouse not in item_warehouse_projected_qty.get(item_code): item_warehouse_projected_qty[item_code][warehouse] = flt(projected_qty) warehouse_doc = frappe.get_doc(""Warehouse"", warehouse) while warehouse_doc.parent_warehouse: if not item_warehouse_projected_qty.get(item_code, {}).get(warehouse_doc.parent_warehouse): item_warehouse_projected_qty.setdefault(item_code, {})[warehouse_doc.parent_warehouse] = flt( projected_qty ) else: item_warehouse_projected_qty[item_code][warehouse_doc.parent_warehouse] += flt(projected_qty) warehouse_doc = frappe.get_doc(""Warehouse"", warehouse_doc.parent_warehouse) return item_warehouse_projected_qty ","select item_code, warehouse, projected_qty from tabBin where item_code in ({0}) and (warehouse != """" and warehouse is not null)",19,60,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_item_warehouse_projected_qty(items_to_consider): item_warehouse_projected_qty = {} for item_code, warehouse, projected_qty in frappe.db.sql( .format( "", "".join([""%s""] * len(items_to_consider)) ), items_to_consider, ): if item_code not in item_warehouse_projected_qty: item_warehouse_projected_qty.setdefault(item_code, {}) if warehouse not in item_warehouse_projected_qty.get(item_code): item_warehouse_projected_qty[item_code][warehouse] = flt(projected_qty) warehouse_doc = frappe.get_doc(""Warehouse"", warehouse) while warehouse_doc.parent_warehouse: if not item_warehouse_projected_qty.get(item_code, {}).get(warehouse_doc.parent_warehouse): item_warehouse_projected_qty.setdefault(item_code, {})[warehouse_doc.parent_warehouse] = flt( projected_qty ) else: item_warehouse_projected_qty[item_code][warehouse_doc.parent_warehouse] += flt(projected_qty) warehouse_doc = frappe.get_doc(""Warehouse"", warehouse_doc.parent_warehouse) return item_warehouse_projected_qty ``` ###Assistant : select item_code, warehouse, projected_qty from tabBin where item_code in ({0}) and (warehouse != """" and warehouse is not null) " 1972,"def _collect_type_vars(types, typevar_types=None): if typevar_types is None: typevar_types = typing.TypeVar tvars = [] for t in types: if ( isinstance(t, typevar_types) and t not in tvars and not _is_unpack(t) ): tvars.append(t) if _should_collect_from_parameters(t): tvars.extend([t for t in t.__parameters__ if t not in tvars]) return tuple(tvars) NoReturn = typing.NoReturn # Some unconstrained type variables. These are used by the container types. # (These are not for export.) T = typing.TypeVar('T') # Any type. KT = typing.TypeVar('KT') # Key type. VT = typing.TypeVar('VT') # Value type. T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers. T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant. ClassVar = typing.ClassVar # On older versions of typing there is an internal class named ""Final"". # 3.8+ if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7): Final = typing.Final # 3.7 else:","Collect all type variable contained in types in order of first appearance (lexicographic order). For example:: _collect_type_vars((T, List[S, T])) == (T, S) ",22,132,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _collect_type_vars(types, typevar_types=None): if typevar_types is None: typevar_types = typing.TypeVar tvars = [] for t in types: if ( isinstance(t, typevar_types) and t not in tvars and not _is_unpack(t) ): tvars.append(t) if _should_collect_from_parameters(t): tvars.extend([t for t in t.__parameters__ if t not in tvars]) return tuple(tvars) NoReturn = typing.NoReturn # Some unconstrained type variables. These are used by the container types. # (These are not for export.) T = typing.TypeVar('T') # Any type. KT = typing.TypeVar('KT') # Key type. VT = typing.TypeVar('VT') # Value type. T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers. T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant. ClassVar = typing.ClassVar # On older versions of typing there is an internal class named ""Final"". # 3.8+ if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7): Final = typing.Final # 3.7 else: ``` ###Assistant : Collect all type variable contained in types in order of first appearance (lexicographic order). For example:: _collect_type_vars((T, List[S, T])) == (T, S) " 1973,"def from_dataframe(df, allow_copy=True): if isinstance(df, pd.DataFrame): return df if not hasattr(df, ""__dataframe__""): raise ValueError(""`df` does not support __dataframe__"") return _from_dataframe(df.__dataframe__(allow_copy=allow_copy)) "," Build a ``pd.DataFrame`` from any DataFrame supporting the interchange protocol. Parameters ---------- df : DataFrameXchg Object supporting the exchange protocol, i.e. `__dataframe__` method. allow_copy : bool, default: True Whether to allow copying the memory to perform the conversion (if false then zero-copy approach is requested). Returns ------- pd.DataFrame ",48,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def from_dataframe(df, allow_copy=True): if isinstance(df, pd.DataFrame): return df if not hasattr(df, ""__dataframe__""): raise ValueError(""`df` does not support __dataframe__"") return _from_dataframe(df.__dataframe__(allow_copy=allow_copy)) ``` ###Assistant : Build a ``pd.DataFrame`` from any DataFrame supporting the interchange protocol. Parameters ---------- df : DataFrameXchg Object supporting the exchange protocol, i.e. `__dataframe__` method. allow_copy : bool, default: True Whether to allow copying the memory to perform the conversion (if false then zero-copy approach is requested). Returns ------- pd.DataFrame " 1974,"def parse_wheel(wheel_zip, name): # type: (ZipFile, str) -> Tuple[str, Message] try: info_dir = wheel_dist_info_dir(wheel_zip, name) metadata = wheel_metadata(wheel_zip, info_dir) version = wheel_version(metadata) except UnsupportedWheel as e: raise UnsupportedWheel(""{} has an invalid wheel, {}"".format(name, str(e))) check_compatibility(version, name) return info_dir, metadata ","Extract information from the provided wheel, ensuring it meets basic standards. Returns the name of the .dist-info directory and the parsed WHEEL metadata. ",23,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parse_wheel(wheel_zip, name): # type: (ZipFile, str) -> Tuple[str, Message] try: info_dir = wheel_dist_info_dir(wheel_zip, name) metadata = wheel_metadata(wheel_zip, info_dir) version = wheel_version(metadata) except UnsupportedWheel as e: raise UnsupportedWheel(""{} has an invalid wheel, {}"".format(name, str(e))) check_compatibility(version, name) return info_dir, metadata ``` ###Assistant : Extract information from the provided wheel, ensuring it meets basic standards. Returns the name of the .dist-info directory and the parsed WHEEL metadata. " 1975,"def style_docstrings_in_code(code, max_len=119): # fmt: off splits = code.split('\""\""\""') splits = [ (s if i % 2 == 0 or _re_doc_ignore.search(splits[i - 1]) is not None else style_docstring(s, max_len=max_len)) for i, s in enumerate(splits) ] black_errors = ""\n\n"".join([s[1] for s in splits if isinstance(s, tuple) and len(s[1]) > 0]) splits = [s[0] if isinstance(s, tuple) else s for s in splits] clean_code = '\""\""\""'.join(splits) # fmt: on return clean_code, black_errors "," Style all docstrings in some code. Args: code (`str`): The code in which we want to style the docstrings. max_len (`int`): The maximum number of characters per line. Returns: `Tuple[str, str]`: A tuple with the clean code and the black errors (if any) ",43,70,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def style_docstrings_in_code(code, max_len=119): # fmt: off splits = code.split('\""\""\""') splits = [ (s if i % 2 == 0 or _re_doc_ignore.search(splits[i - 1]) is not None else style_docstring(s, max_len=max_len)) for i, s in enumerate(splits) ] black_errors = ""\n\n"".join([s[1] for s in splits if isinstance(s, tuple) and len(s[1]) > 0]) splits = [s[0] if isinstance(s, tuple) else s for s in splits] clean_code = '\""\""\""'.join(splits) # fmt: on return clean_code, black_errors ``` ###Assistant : Style all docstrings in some code. Args: code (`str`): The code in which we want to style the docstrings. max_len (`int`): The maximum number of characters per line. Returns: `Tuple[str, str]`: A tuple with the clean code and the black errors (if any) " 1976,"def check_version_info(cluster_metadata): cluster_version_info = ( cluster_metadata[""ray_version""], cluster_metadata[""python_version""], ) version_info = compute_version_info() if version_info != cluster_version_info: node_ip_address = ray._private.services.get_node_ip_address() error_message = ( ""Version mismatch: The cluster was started with:\n"" "" Ray: "" + cluster_version_info[0] + ""\n"" "" Python: "" + cluster_version_info[1] + ""\n"" ""This process on node "" + node_ip_address + "" was started with:"" + ""\n"" "" Ray: "" + version_info[0] + ""\n"" "" Python: "" + version_info[1] + ""\n"" ) raise RuntimeError(error_message) ","Check if the Python and Ray versions stored in GCS matches this process. Args: cluster_metadata: Ray cluster metadata from GCS. Raises: Exception: An exception is raised if there is a version mismatch. ",32,73,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_version_info(cluster_metadata): cluster_version_info = ( cluster_metadata[""ray_version""], cluster_metadata[""python_version""], ) version_info = compute_version_info() if version_info != cluster_version_info: node_ip_address = ray._private.services.get_node_ip_address() error_message = ( ""Version mismatch: The cluster was started with:\n"" "" Ray: "" + cluster_version_info[0] + ""\n"" "" Python: "" + cluster_version_info[1] + ""\n"" ""This process on node "" + node_ip_address + "" was started with:"" + ""\n"" "" Ray: "" + version_info[0] + ""\n"" "" Python: "" + version_info[1] + ""\n"" ) raise RuntimeError(error_message) ``` ###Assistant : Check if the Python and Ray versions stored in GCS matches this process. Args: cluster_metadata: Ray cluster metadata from GCS. Raises: Exception: An exception is raised if there is a version mismatch. " 1977,"def get_console() -> ""Console"": global _console if _console is None: from .console import Console _console = Console() return _console ","Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console, and hasn't been explicitly given one. Returns: Console: A console instance. ",25,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_console() -> ""Console"": global _console if _console is None: from .console import Console _console = Console() return _console ``` ###Assistant : Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console, and hasn't been explicitly given one. Returns: Console: A console instance. " 1978,"def is_hash_allowed(self, hashes): # type: (Optional[Hashes]) -> bool if hashes is None or not self.has_hash: return False # Assert non-None so mypy knows self.hash_name and self.hash are str. assert self.hash_name is not None assert self.hash is not None return hashes.is_hash_allowed(self.hash_name, hex_digest=self.hash) # TODO: Relax this comparison logic to ignore, for example, fragments."," Return True if the link has a hash and it is allowed. ",12,52,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_hash_allowed(self, hashes): # type: (Optional[Hashes]) -> bool if hashes is None or not self.has_hash: return False # Assert non-None so mypy knows self.hash_name and self.hash are str. assert self.hash_name is not None assert self.hash is not None return hashes.is_hash_allowed(self.hash_name, hex_digest=self.hash) # TODO: Relax this comparison logic to ignore, for example, fragments. ``` ###Assistant : Return True if the link has a hash and it is allowed. " 1979,"def coverage_error(y_true, y_score, *, sample_weight=None): y_true = check_array(y_true, ensure_2d=True) y_score = check_array(y_score, ensure_2d=True) check_consistent_length(y_true, y_score, sample_weight) y_type = type_of_target(y_true, input_name=""y_true"") if y_type != ""multilabel-indicator"": raise ValueError(""{0} format is not supported"".format(y_type)) if y_true.shape != y_score.shape: raise ValueError(""y_true and y_score have different shape"") y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true)) y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1)) coverage = (y_score >= y_min_relevant).sum(axis=1) coverage = coverage.filled(0) return np.average(coverage, weights=sample_weight) ","Coverage error measure. Compute how far we need to go through the ranked scores to cover all true labels. The best value is equal to the average number of labels in ``y_true`` per sample. Ties in ``y_scores`` are broken by giving maximal rank that would have been assigned to all tied values. Note: Our implementation's score is 1 greater than the one given in Tsoumakas et al., 2010. This extends it to handle the degenerate case in which an instance has 0 true labels. Read more in the :ref:`User Guide `. Parameters ---------- y_true : ndarray of shape (n_samples, n_labels) True binary labels in binary indicator format. y_score : ndarray of shape (n_samples, n_labels) Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by ""decision_function"" on some classifiers). sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- coverage_error : float References ---------- .. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010). Mining multi-label data. In Data mining and knowledge discovery handbook (pp. 667-685). Springer US. ",179,60,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def coverage_error(y_true, y_score, *, sample_weight=None): y_true = check_array(y_true, ensure_2d=True) y_score = check_array(y_score, ensure_2d=True) check_consistent_length(y_true, y_score, sample_weight) y_type = type_of_target(y_true, input_name=""y_true"") if y_type != ""multilabel-indicator"": raise ValueError(""{0} format is not supported"".format(y_type)) if y_true.shape != y_score.shape: raise ValueError(""y_true and y_score have different shape"") y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true)) y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1)) coverage = (y_score >= y_min_relevant).sum(axis=1) coverage = coverage.filled(0) return np.average(coverage, weights=sample_weight) ``` ###Assistant : Coverage error measure. Compute how far we need to go through the ranked scores to cover all true labels. The best value is equal to the average number of labels in ``y_true`` per sample. Ties in ``y_scores`` are broken by giving maximal rank that would have been assigned to all tied values. Note: Our implementation's score is 1 greater than the one given in Tsoumakas et al., 2010. This extends it to handle the degenerate case in which an instance has 0 true labels. Read more in the :ref:`User Guide `. Parameters ---------- y_true : ndarray of shape (n_samples, n_labels) True binary labels in binary indicator format. y_score : ndarray of shape (n_samples, n_labels) Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by ""decision_function"" on some classifiers). sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- coverage_error : float References ---------- .. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010). Mining multi-label data. In Data mining and knowledge discovery handbook (pp. 667-685). Springer US. " 1980,"def total_predict_batches(self) -> int: return sum(self.trainer.num_predict_batches) ","The total number of prediction batches, which may change from epoch to epoch. Use this to set the total number of iterations in the progress bar. Can return ``inf`` if the predict dataloader is of infinite size. ",37,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def total_predict_batches(self) -> int: return sum(self.trainer.num_predict_batches) ``` ###Assistant : The total number of prediction batches, which may change from epoch to epoch. Use this to set the total number of iterations in the progress bar. Can return ``inf`` if the predict dataloader is of infinite size. " 1981,"def shape(self) -> Dict[str, Tuple[int]]: self._check_values_type() return {k: dataset.shape for k, dataset in self.items()} ","Shape of each split of the dataset (number of columns, number of rows). Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset(""rotten_tomatoes"") >>> ds.shape {'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)} ``` ",36,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def shape(self) -> Dict[str, Tuple[int]]: self._check_values_type() return {k: dataset.shape for k, dataset in self.items()} ``` ###Assistant : Shape of each split of the dataset (number of columns, number of rows). Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset(""rotten_tomatoes"") >>> ds.shape {'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)} ``` " 1982,"def is_torch_support_available(self) -> bool: if is_torch_available(): from transformers.file_utils import torch_version return torch_version >= self.torch_onnx_minimum_version else: return False "," The minimum PyTorch version required to export the model. Returns: `bool`: Whether the installed version of PyTorch is compatible with the model. ",22,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_torch_support_available(self) -> bool: if is_torch_available(): from transformers.file_utils import torch_version return torch_version >= self.torch_onnx_minimum_version else: return False ``` ###Assistant : The minimum PyTorch version required to export the model. Returns: `bool`: Whether the installed version of PyTorch is compatible with the model. " 1983,"def check_connection(self, logger, config) -> Tuple[bool, any]: auth_header = TokenAuthenticator(token=config[""api_key""]).get_auth_header() ping_url = ORB_API_BASE_URL + ""ping"" ping_response = requests.get(ping_url, headers=auth_header) try: ping_response.raise_for_status() return True, None except Exception as e: return False, e "," Makes a request to the /ping endpoint, which validates that the authentication credentials are appropriate. API Docs: https://docs.withorb.com/reference/ping ",18,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_connection(self, logger, config) -> Tuple[bool, any]: auth_header = TokenAuthenticator(token=config[""api_key""]).get_auth_header() ping_url = ORB_API_BASE_URL + ""ping"" ping_response = requests.get(ping_url, headers=auth_header) try: ping_response.raise_for_status() return True, None except Exception as e: return False, e ``` ###Assistant : Makes a request to the /ping endpoint, which validates that the authentication credentials are appropriate. API Docs: https://docs.withorb.com/reference/ping " 1984,"def add_items_upsert(self, content_type_pk, indexers): compiler = InsertQuery(IndexEntry).get_compiler(connection=self.connection) title_sql = [] autocomplete_sql = [] body_sql = [] data_params = [] for indexer in indexers: data_params.extend((content_type_pk, indexer.id)) # Compile title value value = compiler.prepare_value( IndexEntry._meta.get_field(""title""), indexer.title ) sql, params = value.as_sql(compiler, self.connection) title_sql.append(sql) data_params.extend(params) # Compile autocomplete value value = compiler.prepare_value( IndexEntry._meta.get_field(""autocomplete""), indexer.autocomplete ) sql, params = value.as_sql(compiler, self.connection) autocomplete_sql.append(sql) data_params.extend(params) # Compile body value value = compiler.prepare_value( IndexEntry._meta.get_field(""body""), indexer.body ) sql, params = value.as_sql(compiler, self.connection) body_sql.append(sql) data_params.extend(params) data_sql = "", "".join( [ ""(%%s, %%s, %s, %s, %s, 1.0)"" % (a, b, c) for a, b, c in zip(title_sql, autocomplete_sql, body_sql) ] ) with self.connection.cursor() as cursor: cursor.execute( % (IndexEntry._meta.db_table, data_sql), data_params, ) self._refresh_title_norms() "," INSERT INTO %s (content_type_id, object_id, title, autocomplete, body, title_norm) (VALUES %s) ON CONFLICT (content_type_id, object_id) DO UPDATE SET title = EXCLUDED.title, title_norm = 1.0, autocomplete = EXCLUDED.autocomplete, body = EXCLUDED.body ",30,112,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add_items_upsert(self, content_type_pk, indexers): compiler = InsertQuery(IndexEntry).get_compiler(connection=self.connection) title_sql = [] autocomplete_sql = [] body_sql = [] data_params = [] for indexer in indexers: data_params.extend((content_type_pk, indexer.id)) # Compile title value value = compiler.prepare_value( IndexEntry._meta.get_field(""title""), indexer.title ) sql, params = value.as_sql(compiler, self.connection) title_sql.append(sql) data_params.extend(params) # Compile autocomplete value value = compiler.prepare_value( IndexEntry._meta.get_field(""autocomplete""), indexer.autocomplete ) sql, params = value.as_sql(compiler, self.connection) autocomplete_sql.append(sql) data_params.extend(params) # Compile body value value = compiler.prepare_value( IndexEntry._meta.get_field(""body""), indexer.body ) sql, params = value.as_sql(compiler, self.connection) body_sql.append(sql) data_params.extend(params) data_sql = "", "".join( [ ""(%%s, %%s, %s, %s, %s, 1.0)"" % (a, b, c) for a, b, c in zip(title_sql, autocomplete_sql, body_sql) ] ) with self.connection.cursor() as cursor: cursor.execute( % (IndexEntry._meta.db_table, data_sql), data_params, ) self._refresh_title_norms() ``` ###Assistant : INSERT INTO %s (content_type_id, object_id, title, autocomplete, body, title_norm) (VALUES %s) ON CONFLICT (content_type_id, object_id) DO UPDATE SET title = EXCLUDED.title, title_norm = 1.0, autocomplete = EXCLUDED.autocomplete, body = EXCLUDED.body " 1985,"def test_already_created_plus_written_results(indexer, indexer_cache) -> None: org_id = 1234 raw_indexer = indexer indexer = CachingIndexer(indexer_cache, indexer) v0 = raw_indexer.record(use_case_id, org_id, ""v1.2.0"") v1 = raw_indexer.record(use_case_id, org_id, ""v1.2.1"") v2 = raw_indexer.record(use_case_id, org_id, ""v1.2.2"") expected_mapping = {""v1.2.0"": v0, ""v1.2.1"": v1, ""v1.2.2"": v2} results = indexer.bulk_record( use_case_id=use_case_id, org_strings={org_id: {""v1.2.0"", ""v1.2.1"", ""v1.2.2""}} ) assert len(results[org_id]) == len(expected_mapping) == 3 for string, id in results[org_id].items(): assert expected_mapping[string] == id results = indexer.bulk_record( use_case_id=use_case_id, org_strings={org_id: {""v1.2.0"", ""v1.2.1"", ""v1.2.2"", ""v1.2.3""}}, ) v3 = raw_indexer.resolve(use_case_id, org_id, ""v1.2.3"") expected_mapping[""v1.2.3""] = v3 assert len(results[org_id]) == len(expected_mapping) == 4 for string, id in results[org_id].items(): assert expected_mapping[string] == id fetch_meta = results.get_fetch_metadata() assert_fetch_type_for_tag_string_set( fetch_meta[org_id], FetchType.CACHE_HIT, {""v1.2.0"", ""v1.2.1"", ""v1.2.2""} ) assert_fetch_type_for_tag_string_set(fetch_meta[org_id], FetchType.FIRST_SEEN, {""v1.2.3""}) "," Test that we correctly combine db read results with db write results for the same organization. ",16,108,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_already_created_plus_written_results(indexer, indexer_cache) -> None: org_id = 1234 raw_indexer = indexer indexer = CachingIndexer(indexer_cache, indexer) v0 = raw_indexer.record(use_case_id, org_id, ""v1.2.0"") v1 = raw_indexer.record(use_case_id, org_id, ""v1.2.1"") v2 = raw_indexer.record(use_case_id, org_id, ""v1.2.2"") expected_mapping = {""v1.2.0"": v0, ""v1.2.1"": v1, ""v1.2.2"": v2} results = indexer.bulk_record( use_case_id=use_case_id, org_strings={org_id: {""v1.2.0"", ""v1.2.1"", ""v1.2.2""}} ) assert len(results[org_id]) == len(expected_mapping) == 3 for string, id in results[org_id].items(): assert expected_mapping[string] == id results = indexer.bulk_record( use_case_id=use_case_id, org_strings={org_id: {""v1.2.0"", ""v1.2.1"", ""v1.2.2"", ""v1.2.3""}}, ) v3 = raw_indexer.resolve(use_case_id, org_id, ""v1.2.3"") expected_mapping[""v1.2.3""] = v3 assert len(results[org_id]) == len(expected_mapping) == 4 for string, id in results[org_id].items(): assert expected_mapping[string] == id fetch_meta = results.get_fetch_metadata() assert_fetch_type_for_tag_string_set( fetch_meta[org_id], FetchType.CACHE_HIT, {""v1.2.0"", ""v1.2.1"", ""v1.2.2""} ) assert_fetch_type_for_tag_string_set(fetch_meta[org_id], FetchType.FIRST_SEEN, {""v1.2.3""}) ``` ###Assistant : Test that we correctly combine db read results with db write results for the same organization. " 1986,"def compiler_fixup(compiler_so, cc_args): stripArch = stripSysroot = False compiler_so = list(compiler_so) if not _supports_universal_builds(): # OSX before 10.4.0, these don't support -arch and -isysroot at # all. stripArch = stripSysroot = True else: stripArch = '-arch' in cc_args stripSysroot = any(arg for arg in cc_args if arg.startswith('-isysroot')) if stripArch or 'ARCHFLAGS' in os.environ: while True: try: index = compiler_so.index('-arch') # Strip this argument and the next one: del compiler_so[index:index+2] except ValueError: break elif not _supports_arm64_builds(): # Look for ""-arch arm64"" and drop that for idx in reversed(range(len(compiler_so))): if compiler_so[idx] == '-arch' and compiler_so[idx+1] == ""arm64"": del compiler_so[idx:idx+2] if 'ARCHFLAGS' in os.environ and not stripArch: # User specified different -arch flags in the environ, # see also distutils.sysconfig compiler_so = compiler_so + os.environ['ARCHFLAGS'].split() if stripSysroot: while True: indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')] if not indices: break index = indices[0] if compiler_so[index] == '-isysroot': # Strip this argument and the next one: del compiler_so[index:index+2] else: # It's '-isysroot/some/path' in one arg del compiler_so[index:index+1] # Check if the SDK that is used during compilation actually exists, # the universal build requires the usage of a universal SDK and not all # users have that installed by default. sysroot = None argvar = cc_args indices = [i for i,x in enumerate(cc_args) if x.startswith('-isysroot')] if not indices: argvar = compiler_so indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')] for idx in indices: if argvar[idx] == '-isysroot': sysroot = argvar[idx+1] break else: sysroot = argvar[idx][len('-isysroot'):] break if sysroot and not os.path.isdir(sysroot): sys.stderr.write(f""Compiling with an SDK that doesn't seem to exist: {sysroot}\n"") sys.stderr.write(""Please check your Xcode installation\n"") sys.stderr.flush() return compiler_so "," This function will strip '-isysroot PATH' and '-arch ARCH' from the compile flags if the user has specified one them in extra_compile_flags. This is needed because '-arch ARCH' adds another architecture to the build, without a way to remove an architecture. Furthermore GCC will barf if multiple '-isysroot' arguments are present. ",51,268,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def compiler_fixup(compiler_so, cc_args): stripArch = stripSysroot = False compiler_so = list(compiler_so) if not _supports_universal_builds(): # OSX before 10.4.0, these don't support -arch and -isysroot at # all. stripArch = stripSysroot = True else: stripArch = '-arch' in cc_args stripSysroot = any(arg for arg in cc_args if arg.startswith('-isysroot')) if stripArch or 'ARCHFLAGS' in os.environ: while True: try: index = compiler_so.index('-arch') # Strip this argument and the next one: del compiler_so[index:index+2] except ValueError: break elif not _supports_arm64_builds(): # Look for ""-arch arm64"" and drop that for idx in reversed(range(len(compiler_so))): if compiler_so[idx] == '-arch' and compiler_so[idx+1] == ""arm64"": del compiler_so[idx:idx+2] if 'ARCHFLAGS' in os.environ and not stripArch: # User specified different -arch flags in the environ, # see also distutils.sysconfig compiler_so = compiler_so + os.environ['ARCHFLAGS'].split() if stripSysroot: while True: indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')] if not indices: break index = indices[0] if compiler_so[index] == '-isysroot': # Strip this argument and the next one: del compiler_so[index:index+2] else: # It's '-isysroot/some/path' in one arg del compiler_so[index:index+1] # Check if the SDK that is used during compilation actually exists, # the universal build requires the usage of a universal SDK and not all # users have that installed by default. sysroot = None argvar = cc_args indices = [i for i,x in enumerate(cc_args) if x.startswith('-isysroot')] if not indices: argvar = compiler_so indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')] for idx in indices: if argvar[idx] == '-isysroot': sysroot = argvar[idx+1] break else: sysroot = argvar[idx][len('-isysroot'):] break if sysroot and not os.path.isdir(sysroot): sys.stderr.write(f""Compiling with an SDK that doesn't seem to exist: {sysroot}\n"") sys.stderr.write(""Please check your Xcode installation\n"") sys.stderr.flush() return compiler_so ``` ###Assistant : This function will strip '-isysroot PATH' and '-arch ARCH' from the compile flags if the user has specified one them in extra_compile_flags. This is needed because '-arch ARCH' adds another architecture to the build, without a way to remove an architecture. Furthermore GCC will barf if multiple '-isysroot' arguments are present. " 1987,"def test_copy_page_with_excluded_parental_and_child_relations(self): try: # modify excluded fields for this test EventPage.exclude_fields_in_copy = [ ""advert_placements"", ""categories"", ""signup_link"", ] # set up data christmas_event = EventPage.objects.get(url_path=""/home/events/christmas/"") summer_category = EventCategory.objects.create(name=""Summer"") holiday_category = EventCategory.objects.create(name=""Holidays"") # add URL (to test excluding a basic field) christmas_event.signup_link = ""https://christmas-is-awesome.com/rsvp"" # add parental many to many relations christmas_event.categories = (summer_category, holiday_category) christmas_event.save() # Copy it new_christmas_event = christmas_event.copy( update_attrs={ ""title"": ""New christmas event"", ""slug"": ""new-christmas-event"", } ) # check that the signup_link was NOT copied self.assertEqual( christmas_event.signup_link, ""https://christmas-is-awesome.com/rsvp"" ) self.assertEqual(new_christmas_event.signup_link, """") # check that original event is untouched self.assertEqual( christmas_event.categories.count(), 2, ""Child objects (parental many to many) defined on the superclass were removed from the original page"", ) # check that parental many to many are NOT copied self.assertEqual( new_christmas_event.categories.count(), 0, ""Child objects (parental many to many) were copied but should be excluded"", ) # check that child objects on original event were left untouched self.assertEqual( christmas_event.advert_placements.count(), 1, ""Child objects defined on the original superclass were edited when copied"", ) # check that child objects were NOT copied self.assertEqual( new_christmas_event.advert_placements.count(), 0, ""Child objects defined on the superclass were copied and should not be"", ) finally: # reset excluded fields for future tests EventPage.exclude_fields_in_copy = [] ",Test that a page will be copied with parental and child relations removed if excluded.,15,197,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_copy_page_with_excluded_parental_and_child_relations(self): try: # modify excluded fields for this test EventPage.exclude_fields_in_copy = [ ""advert_placements"", ""categories"", ""signup_link"", ] # set up data christmas_event = EventPage.objects.get(url_path=""/home/events/christmas/"") summer_category = EventCategory.objects.create(name=""Summer"") holiday_category = EventCategory.objects.create(name=""Holidays"") # add URL (to test excluding a basic field) christmas_event.signup_link = ""https://christmas-is-awesome.com/rsvp"" # add parental many to many relations christmas_event.categories = (summer_category, holiday_category) christmas_event.save() # Copy it new_christmas_event = christmas_event.copy( update_attrs={ ""title"": ""New christmas event"", ""slug"": ""new-christmas-event"", } ) # check that the signup_link was NOT copied self.assertEqual( christmas_event.signup_link, ""https://christmas-is-awesome.com/rsvp"" ) self.assertEqual(new_christmas_event.signup_link, """") # check that original event is untouched self.assertEqual( christmas_event.categories.count(), 2, ""Child objects (parental many to many) defined on the superclass were removed from the original page"", ) # check that parental many to many are NOT copied self.assertEqual( new_christmas_event.categories.count(), 0, ""Child objects (parental many to many) were copied but should be excluded"", ) # check that child objects on original event were left untouched self.assertEqual( christmas_event.advert_placements.count(), 1, ""Child objects defined on the original superclass were edited when copied"", ) # check that child objects were NOT copied self.assertEqual( new_christmas_event.advert_placements.count(), 0, ""Child objects defined on the superclass were copied and should not be"", ) finally: # reset excluded fields for future tests EventPage.exclude_fields_in_copy = [] ``` ###Assistant : Test that a page will be copied with parental and child relations removed if excluded. " 1988,"def test_app_model_in_list_body_class(self): response = self.client.get(reverse(""admin:admin_views_section_changelist"")) self.assertContains(response, ' Tuple[str, str, str] return _distro.version_parts(best) "," Return the version of the current OS distribution as a tuple ``(major, minor, build_number)`` with items as follows: * ``major``: The result of :func:`distro.major_version`. * ``minor``: The result of :func:`distro.minor_version`. * ``build_number``: The result of :func:`distro.build_number`. For a description of the *best* parameter, see the :func:`distro.version` method. ",47,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def version_parts(best=False): # type: (bool) -> Tuple[str, str, str] return _distro.version_parts(best) ``` ###Assistant : Return the version of the current OS distribution as a tuple ``(major, minor, build_number)`` with items as follows: * ``major``: The result of :func:`distro.major_version`. * ``minor``: The result of :func:`distro.minor_version`. * ``build_number``: The result of :func:`distro.build_number`. For a description of the *best* parameter, see the :func:`distro.version` method. " 1990,"def check_status(self): status = { 'success': False } try: con = self.__connect() with closing(con) as con: #TODO: best way to check con.connected ? status['success'] = True except Exception as e: log.error(f'Error connecting to SQL Server {self.database}, {e}!') status['error'] = e return status "," Check the connection of the SQL Server database :return: success status and error message if error occurs ",17,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_status(self): status = { 'success': False } try: con = self.__connect() with closing(con) as con: #TODO: best way to check con.connected ? status['success'] = True except Exception as e: log.error(f'Error connecting to SQL Server {self.database}, {e}!') status['error'] = e return status ``` ###Assistant : Check the connection of the SQL Server database :return: success status and error message if error occurs " 1991,"def model_from_config(config, custom_objects=None): if isinstance(config, list): raise TypeError( ""`model_from_config` expects a dictionary, not a list. "" f""Received: config={config}. Did you meant to use "" ""`Sequential.from_config(config)`?"" ) from keras.layers import deserialize # pylint: disable=g-import-not-at-top return deserialize(config, custom_objects=custom_objects) @keras_export(""keras.models.model_from_yaml"")","Instantiates a Keras model from its config. Usage: ``` # for a Functional API model tf.keras.Model().from_config(model.get_config()) # for a Sequential model tf.keras.Sequential().from_config(model.get_config()) ``` Args: config: Configuration dictionary. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: A Keras model instance (uncompiled). Raises: TypeError: if `config` is not a dictionary. ",57,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def model_from_config(config, custom_objects=None): if isinstance(config, list): raise TypeError( ""`model_from_config` expects a dictionary, not a list. "" f""Received: config={config}. Did you meant to use "" ""`Sequential.from_config(config)`?"" ) from keras.layers import deserialize # pylint: disable=g-import-not-at-top return deserialize(config, custom_objects=custom_objects) @keras_export(""keras.models.model_from_yaml"") ``` ###Assistant : Instantiates a Keras model from its config. Usage: ``` # for a Functional API model tf.keras.Model().from_config(model.get_config()) # for a Sequential model tf.keras.Sequential().from_config(model.get_config()) ``` Args: config: Configuration dictionary. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: A Keras model instance (uncompiled). Raises: TypeError: if `config` is not a dictionary. " 1992,"def test_golden_path(self): with self.assertNumQueries(0): result = self.page.cached_content_type self.assertEqual(result, ContentType.objects.get(id=self.page.content_type_id)) "," The return value should match the value you'd get if fetching the ContentType from the database, and shouldn't trigger any database queries when the ContentType is already in memory. ",29,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_golden_path(self): with self.assertNumQueries(0): result = self.page.cached_content_type self.assertEqual(result, ContentType.objects.get(id=self.page.content_type_id)) ``` ###Assistant : The return value should match the value you'd get if fetching the ContentType from the database, and shouldn't trigger any database queries when the ContentType is already in memory. " 1993,"def fold_function_name(function_name): parts = function_name.split(""."") if len(parts) == 1: return function_name tail = parts.pop() grouped = [list(g) for _, g in groupby(parts)] "," Fold multiple consecutive occurences of the same property name into a single group, excluding the last component. foo | foo foo.foo | foo.foo foo.foo.foo | {foo#2}.foo bar.foo.foo | bar.foo.foo bar.foo.foo.foo | bar.{foo#2}.foo bar.foo.foo.onError | bar.{foo#2}.onError bar.bar.bar.foo.foo.onError | {bar#3}.{foo#2}.onError bar.foo.foo.bar.bar.onError | bar.{foo#2}.{bar#2}.onError ",41,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fold_function_name(function_name): parts = function_name.split(""."") if len(parts) == 1: return function_name tail = parts.pop() grouped = [list(g) for _, g in groupby(parts)] ``` ###Assistant : Fold multiple consecutive occurences of the same property name into a single group, excluding the last component. foo | foo foo.foo | foo.foo foo.foo.foo | {foo#2}.foo bar.foo.foo | bar.foo.foo bar.foo.foo.foo | bar.{foo#2}.foo bar.foo.foo.onError | bar.{foo#2}.onError bar.bar.bar.foo.foo.onError | {bar#3}.{foo#2}.onError bar.foo.foo.bar.bar.onError | bar.{foo#2}.{bar#2}.onError " 1994,"def _laplace_rule_exp(f, t, s, doit=True, **hints): hints.pop('simplify', True) a = Wild('a', exclude=[t]) y = Wild('y') z = Wild('z') k, func = f.as_independent(t, as_Add=False) ma1 = func.match(exp(y)*z) if ma1: ma2 = ma1[y].collect(t).match(a*t) if ma2: debug('_laplace_apply_rules match:') debug(' f: %s ( %s, %s )'%(f, ma1, ma2)) debug(' rule: multiply with exp (1.5)') L = _laplace_apply_rules(ma1[z], t, s-ma2[a], doit=doit, **hints) try: r, p, c = L return (k*r, p+ma2[a], c) except TypeError: return k*L return None "," This internal helper function tries to transform a product containing the `exp` function and returns `None` if it cannot do it. ",21,73,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _laplace_rule_exp(f, t, s, doit=True, **hints): hints.pop('simplify', True) a = Wild('a', exclude=[t]) y = Wild('y') z = Wild('z') k, func = f.as_independent(t, as_Add=False) ma1 = func.match(exp(y)*z) if ma1: ma2 = ma1[y].collect(t).match(a*t) if ma2: debug('_laplace_apply_rules match:') debug(' f: %s ( %s, %s )'%(f, ma1, ma2)) debug(' rule: multiply with exp (1.5)') L = _laplace_apply_rules(ma1[z], t, s-ma2[a], doit=doit, **hints) try: r, p, c = L return (k*r, p+ma2[a], c) except TypeError: return k*L return None ``` ###Assistant : This internal helper function tries to transform a product containing the `exp` function and returns `None` if it cannot do it. " 1995,"def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar): for i in xrange(startpos, len(line)): if line[i] == startchar: depth += 1 elif line[i] == endchar: depth -= 1 if depth == 0: return (i + 1, 0) return (-1, depth) ","Find the position just after the matching endchar. Args: line: a CleansedLines line. startpos: start searching at this position. depth: nesting level at startpos. startchar: expression opening character. endchar: expression closing character. Returns: On finding matching endchar: (index just after matching endchar, 0) Otherwise: (-1, new depth at end of this line) ",52,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar): for i in xrange(startpos, len(line)): if line[i] == startchar: depth += 1 elif line[i] == endchar: depth -= 1 if depth == 0: return (i + 1, 0) return (-1, depth) ``` ###Assistant : Find the position just after the matching endchar. Args: line: a CleansedLines line. startpos: start searching at this position. depth: nesting level at startpos. startchar: expression opening character. endchar: expression closing character. Returns: On finding matching endchar: (index just after matching endchar, 0) Otherwise: (-1, new depth at end of this line) " 1996,"def test_toy_example_collapse_points(): rng = np.random.RandomState(42) input_dim = 5 two_points = rng.randn(2, input_dim) X = np.vstack([two_points, two_points.mean(axis=0)[np.newaxis, :]]) y = [0, 0, 1] ","Test on a toy example of three points that should collapse We build a simple example: two points from the same class and a point from a different class in the middle of them. On this simple example, the new (transformed) points should all collapse into one single point. Indeed, the objective is 2/(1 + exp(d/2)), with d the euclidean distance between the two samples from the same class. This is maximized for d=0 (because d>=0), with an objective equal to 1 (loss=-1.). ",83,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_toy_example_collapse_points(): rng = np.random.RandomState(42) input_dim = 5 two_points = rng.randn(2, input_dim) X = np.vstack([two_points, two_points.mean(axis=0)[np.newaxis, :]]) y = [0, 0, 1] ``` ###Assistant : Test on a toy example of three points that should collapse We build a simple example: two points from the same class and a point from a different class in the middle of them. On this simple example, the new (transformed) points should all collapse into one single point. Indeed, the objective is 2/(1 + exp(d/2)), with d the euclidean distance between the two samples from the same class. This is maximized for d=0 (because d>=0), with an objective equal to 1 (loss=-1.). " 1997,"def _get_string_indexer_log_records(caplog): return [ ( rec.message, { k: v for k, v in rec.__dict__.items() if k in ( ""string_type"", ""is_global_quota"", ""num_global_quotas"", ""num_global_quotas"", ""org_batch_size"", ) }, ) for rec in caplog.records ] "," Get all log records and relevant extra arguments for easy snapshotting. ",11,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_string_indexer_log_records(caplog): return [ ( rec.message, { k: v for k, v in rec.__dict__.items() if k in ( ""string_type"", ""is_global_quota"", ""num_global_quotas"", ""num_global_quotas"", ""org_batch_size"", ) }, ) for rec in caplog.records ] ``` ###Assistant : Get all log records and relevant extra arguments for easy snapshotting. " 1998,"async def test_registered_pin_required(hass, user_form): with patch(MOCK_API_CONNECT, return_value=True), patch( MOCK_API_DEVICE_REGISTERED, new_callable=PropertyMock ) as mock_device_registered, patch(MOCK_API_IS_PIN_REQUIRED, return_value=True): mock_device_registered.return_value = True await hass.config_entries.flow.async_configure( user_form[""flow_id""], user_input=TEST_CREDS ) ",Test if the device is already registered and PIN required.,10,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_registered_pin_required(hass, user_form): with patch(MOCK_API_CONNECT, return_value=True), patch( MOCK_API_DEVICE_REGISTERED, new_callable=PropertyMock ) as mock_device_registered, patch(MOCK_API_IS_PIN_REQUIRED, return_value=True): mock_device_registered.return_value = True await hass.config_entries.flow.async_configure( user_form[""flow_id""], user_input=TEST_CREDS ) ``` ###Assistant : Test if the device is already registered and PIN required. " 1999,"def test_send_push_multiple_workers(self): http_client_mock1 = Mock(spec_set=[""post_json_get_json""]) http_client_mock1.post_json_get_json.side_effect = ( lambda *_, **__: defer.succeed({}) ) self.make_worker_hs( ""synapse.app.generic_worker"", { ""worker_name"": ""pusher1"", ""pusher_instances"": [""pusher1"", ""pusher2""], }, proxied_blacklisted_http_client=http_client_mock1, ) http_client_mock2 = Mock(spec_set=[""post_json_get_json""]) http_client_mock2.post_json_get_json.side_effect = ( lambda *_, **__: defer.succeed({}) ) self.make_worker_hs( ""synapse.app.generic_worker"", { ""worker_name"": ""pusher2"", ""pusher_instances"": [""pusher1"", ""pusher2""], }, proxied_blacklisted_http_client=http_client_mock2, ) # We choose a user name that we know should go to pusher1. event_id = self._create_pusher_and_send_msg(""user2"") # Advance time a bit, so the pusher will register something has happened self.pump() http_client_mock1.post_json_get_json.assert_called_once() http_client_mock2.post_json_get_json.assert_not_called() self.assertEqual( http_client_mock1.post_json_get_json.call_args[0][0], ""https://push.example.com/_matrix/push/v1/notify"", ) self.assertEqual( event_id, http_client_mock1.post_json_get_json.call_args[0][1][""notification""][ ""event_id"" ], ) http_client_mock1.post_json_get_json.reset_mock() http_client_mock2.post_json_get_json.reset_mock() # Now we choose a user name that we know should go to pusher2. event_id = self._create_pusher_and_send_msg(""user4"") # Advance time a bit, so the pusher will register something has happened self.pump() http_client_mock1.post_json_get_json.assert_not_called() http_client_mock2.post_json_get_json.assert_called_once() self.assertEqual( http_client_mock2.post_json_get_json.call_args[0][0], ""https://push.example.com/_matrix/push/v1/notify"", ) self.assertEqual( event_id, http_client_mock2.post_json_get_json.call_args[0][1][""notification""][ ""event_id"" ], ) ",Test that registration works when using sharded pusher workers.,9,133,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_send_push_multiple_workers(self): http_client_mock1 = Mock(spec_set=[""post_json_get_json""]) http_client_mock1.post_json_get_json.side_effect = ( lambda *_, **__: defer.succeed({}) ) self.make_worker_hs( ""synapse.app.generic_worker"", { ""worker_name"": ""pusher1"", ""pusher_instances"": [""pusher1"", ""pusher2""], }, proxied_blacklisted_http_client=http_client_mock1, ) http_client_mock2 = Mock(spec_set=[""post_json_get_json""]) http_client_mock2.post_json_get_json.side_effect = ( lambda *_, **__: defer.succeed({}) ) self.make_worker_hs( ""synapse.app.generic_worker"", { ""worker_name"": ""pusher2"", ""pusher_instances"": [""pusher1"", ""pusher2""], }, proxied_blacklisted_http_client=http_client_mock2, ) # We choose a user name that we know should go to pusher1. event_id = self._create_pusher_and_send_msg(""user2"") # Advance time a bit, so the pusher will register something has happened self.pump() http_client_mock1.post_json_get_json.assert_called_once() http_client_mock2.post_json_get_json.assert_not_called() self.assertEqual( http_client_mock1.post_json_get_json.call_args[0][0], ""https://push.example.com/_matrix/push/v1/notify"", ) self.assertEqual( event_id, http_client_mock1.post_json_get_json.call_args[0][1][""notification""][ ""event_id"" ], ) http_client_mock1.post_json_get_json.reset_mock() http_client_mock2.post_json_get_json.reset_mock() # Now we choose a user name that we know should go to pusher2. event_id = self._create_pusher_and_send_msg(""user4"") # Advance time a bit, so the pusher will register something has happened self.pump() http_client_mock1.post_json_get_json.assert_not_called() http_client_mock2.post_json_get_json.assert_called_once() self.assertEqual( http_client_mock2.post_json_get_json.call_args[0][0], ""https://push.example.com/_matrix/push/v1/notify"", ) self.assertEqual( event_id, http_client_mock2.post_json_get_json.call_args[0][1][""notification""][ ""event_id"" ], ) ``` ###Assistant : Test that registration works when using sharded pusher workers. " 2000,"def _fix_unknown_dimension(self, input_shape, output_shape): output_shape = list(output_shape) msg = ( ""total size of new array must be unchanged, "" ""input_shape = {}, output_shape = {}"".format( input_shape, output_shape ) ) known, unknown = 1, None for index, dim in enumerate(output_shape): if dim < 0: if unknown is None: unknown = index else: raise ValueError( f""There must be at most one unknown dimension in "" f""output_shape. Received: output_shape={output_shape}."" ) else: known *= dim original = np.prod(input_shape, dtype=int) if unknown is not None: if known == 0 or original % known != 0: raise ValueError(msg) output_shape[unknown] = original // known elif original != known: raise ValueError(msg) return output_shape ","Find and replace a missing dimension in an output shape. This is a near direct port of the internal Numpy function `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c` Args: input_shape: Shape of array being reshaped output_shape: Desired shape of the array with at most a single -1 which indicates a dimension that should be derived from the input shape. Returns: The new output shape with a -1 replaced with its computed value. Raises: ValueError: If the total array size of the output_shape is different than the input_shape, or more than one unknown dimension is specified. ",91,105,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _fix_unknown_dimension(self, input_shape, output_shape): output_shape = list(output_shape) msg = ( ""total size of new array must be unchanged, "" ""input_shape = {}, output_shape = {}"".format( input_shape, output_shape ) ) known, unknown = 1, None for index, dim in enumerate(output_shape): if dim < 0: if unknown is None: unknown = index else: raise ValueError( f""There must be at most one unknown dimension in "" f""output_shape. Received: output_shape={output_shape}."" ) else: known *= dim original = np.prod(input_shape, dtype=int) if unknown is not None: if known == 0 or original % known != 0: raise ValueError(msg) output_shape[unknown] = original // known elif original != known: raise ValueError(msg) return output_shape ``` ###Assistant : Find and replace a missing dimension in an output shape. This is a near direct port of the internal Numpy function `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c` Args: input_shape: Shape of array being reshaped output_shape: Desired shape of the array with at most a single -1 which indicates a dimension that should be derived from the input shape. Returns: The new output shape with a -1 replaced with its computed value. Raises: ValueError: If the total array size of the output_shape is different than the input_shape, or more than one unknown dimension is specified. " 2001,"def test_converter_with_unicode_dtype(): txt = StringIO('abc,def\nrst,xyz') conv = bytes.upper res = np.loadtxt( txt, dtype=np.dtype(""U3""), converters=conv, delimiter="","") expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']]) assert_equal(res, expected) "," With the default 'bytes' encoding, tokens are encoded prior to being passed to the converter. This means that the output of the converter may be bytes instead of unicode as expected by `read_rows`. This test checks that outputs from the above scenario are properly decoded prior to parsing by `read_rows`. ",50,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_converter_with_unicode_dtype(): txt = StringIO('abc,def\nrst,xyz') conv = bytes.upper res = np.loadtxt( txt, dtype=np.dtype(""U3""), converters=conv, delimiter="","") expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']]) assert_equal(res, expected) ``` ###Assistant : With the default 'bytes' encoding, tokens are encoded prior to being passed to the converter. This means that the output of the converter may be bytes instead of unicode as expected by `read_rows`. This test checks that outputs from the above scenario are properly decoded prior to parsing by `read_rows`. " 2002,"def alembic_stamp(revision): # lazy import for performance import alembic.command alembic.command.stamp(alembic_config(), revision=revision) "," Stamp the revision table with the given revision; don’t run any migrations Args: revision: The revision passed to `alembic stamp`. ",20,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def alembic_stamp(revision): # lazy import for performance import alembic.command alembic.command.stamp(alembic_config(), revision=revision) ``` ###Assistant : Stamp the revision table with the given revision; don’t run any migrations Args: revision: The revision passed to `alembic stamp`. " 2003,"def unflatten_superdims(assignment): def check(cond): if cond: return raise NotImplementedError(""Failed to convert OpSharding into a ShardingSpec. "" ""Please open a bug report!"") flat_assignment = np.asarray(assignment, dtype=np.int64) check(flat_assignment[0] == 0) dims = [] while flat_assignment.size > 1: stride = flat_assignment[1] for i in range(len(flat_assignment)): if flat_assignment[i] != i * stride: break else: # After this loop i should point to an ""element after the sequence"", so # we have to increment it if the whole array is a strided sequence. i += 1 size = i dims.append((size, stride)) assert size > 1 # Ensure progress flat_assignment = flat_assignment[::size] return dims ","Unflatten a list of dimension sizes and their strides that generates assignment. If this function succeeds for a given ``assignment``, then the following property should be satisfied:: dims_with_strides = unflatten_superdims(assignment) base_array = np.arange(map(fst, sorted(dims_with_strides, key=snd, reverse=True))) assignment == base_array.transpose(argsort(dims_with_strides, key=snd, reverse=True)).flatten() That is, the returned dimensions list all sizes of the base array (with strides indicating their initial order). The order of dimensions in the list corresponds to the permutation that applied to the base array generates the assignment. ",79,98,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def unflatten_superdims(assignment): def check(cond): if cond: return raise NotImplementedError(""Failed to convert OpSharding into a ShardingSpec. "" ""Please open a bug report!"") flat_assignment = np.asarray(assignment, dtype=np.int64) check(flat_assignment[0] == 0) dims = [] while flat_assignment.size > 1: stride = flat_assignment[1] for i in range(len(flat_assignment)): if flat_assignment[i] != i * stride: break else: # After this loop i should point to an ""element after the sequence"", so # we have to increment it if the whole array is a strided sequence. i += 1 size = i dims.append((size, stride)) assert size > 1 # Ensure progress flat_assignment = flat_assignment[::size] return dims ``` ###Assistant : Unflatten a list of dimension sizes and their strides that generates assignment. If this function succeeds for a given ``assignment``, then the following property should be satisfied:: dims_with_strides = unflatten_superdims(assignment) base_array = np.arange(map(fst, sorted(dims_with_strides, key=snd, reverse=True))) assignment == base_array.transpose(argsort(dims_with_strides, key=snd, reverse=True)).flatten() That is, the returned dimensions list all sizes of the base array (with strides indicating their initial order). The order of dimensions in the list corresponds to the permutation that applied to the base array generates the assignment. " 2004,"def test_perf_issue_no_associate_error_event(self): self.project.update_option(""sentry:performance_issue_creation_rate"", 1.0) with mock.patch(""sentry_sdk.tracing.Span.containing_transaction""), self.feature( { ""projects:performance-suspect-spans-ingestion"": True, ""organizations:performance-issues-ingest"": True, } ): manager = EventManager(make_event()) manager.normalize() event = manager.save(self.project.id) assert len(event.groups) == 1 # sneakily make the group type wrong group = event.groups[0] group.type = GroupType.PERFORMANCE_N_PLUS_ONE.value group.save() manager = EventManager(make_event()) manager.normalize() event = manager.save(self.project.id) assert len(event.groups) == 0 ",Test that you can't associate an error event with a performance issue,12,50,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_perf_issue_no_associate_error_event(self): self.project.update_option(""sentry:performance_issue_creation_rate"", 1.0) with mock.patch(""sentry_sdk.tracing.Span.containing_transaction""), self.feature( { ""projects:performance-suspect-spans-ingestion"": True, ""organizations:performance-issues-ingest"": True, } ): manager = EventManager(make_event()) manager.normalize() event = manager.save(self.project.id) assert len(event.groups) == 1 # sneakily make the group type wrong group = event.groups[0] group.type = GroupType.PERFORMANCE_N_PLUS_ONE.value group.save() manager = EventManager(make_event()) manager.normalize() event = manager.save(self.project.id) assert len(event.groups) == 0 ``` ###Assistant : Test that you can't associate an error event with a performance issue " 2005,"def testNodeTerminatedDuringUpdate(self): cluster_config = copy.deepcopy(MOCK_DEFAULT_CONFIG) cluster_config[""available_node_types""][""ray.worker.default""][""min_workers""] = 2 cluster_config[""worker_start_ray_commands""] = [""ray_start_cmd""] # Don't need the extra node type or a docker config. cluster_config[""head_node_type""] = [""ray.worker.default""] del cluster_config[""available_node_types""][""ray.head.default""] del cluster_config[""docker""] config_path = self.write_config(cluster_config) self.provider = MockProvider() runner = MockProcessRunner() lm = LoadMetrics() mock_metrics = Mock(spec=AutoscalerPrometheusMetrics()) autoscaler = MockAutoscaler( config_path, lm, MockNodeInfoStub(), max_failures=0, process_runner=runner, update_interval_s=0, prom_metrics=mock_metrics, ) # Scale up to two up-to-date workers autoscaler.update() self.waitForNodes(2) self.provider.finish_starting_nodes() autoscaler.update() self.waitForNodes(2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE}) # Mark both nodes as unhealthy for _ in range(5): if autoscaler.updaters: time.sleep(0.05) autoscaler.update() lm.last_heartbeat_time_by_ip[""172.0.0.0""] = 0 lm.last_heartbeat_time_by_ip[""172.0.0.1""] = 0 # Expect both updates to be successful, no nodes in updating state assert mock_metrics.successful_updates.inc.call_count == 2 assert mock_metrics.worker_update_time.observe.call_count == 2 mock_metrics.updating_nodes.set.assert_called_with(0) assert not autoscaler.updaters # Set up process runner to terminate worker 0 during missed heartbeat # recovery and also cause the updater to fail."," Tests autoscaler handling a node getting terminated during an update triggered by the node missing a heartbeat. Extension of testRecoverUnhealthyWorkers. In this test, two nodes miss a heartbeat. One of them (node 0) is terminated during its recovery update. The other (node 1) just fails its update. When processing completed updates, the autoscaler terminates node 1 but does not try to terminate node 0 again. ",65,134,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def testNodeTerminatedDuringUpdate(self): cluster_config = copy.deepcopy(MOCK_DEFAULT_CONFIG) cluster_config[""available_node_types""][""ray.worker.default""][""min_workers""] = 2 cluster_config[""worker_start_ray_commands""] = [""ray_start_cmd""] # Don't need the extra node type or a docker config. cluster_config[""head_node_type""] = [""ray.worker.default""] del cluster_config[""available_node_types""][""ray.head.default""] del cluster_config[""docker""] config_path = self.write_config(cluster_config) self.provider = MockProvider() runner = MockProcessRunner() lm = LoadMetrics() mock_metrics = Mock(spec=AutoscalerPrometheusMetrics()) autoscaler = MockAutoscaler( config_path, lm, MockNodeInfoStub(), max_failures=0, process_runner=runner, update_interval_s=0, prom_metrics=mock_metrics, ) # Scale up to two up-to-date workers autoscaler.update() self.waitForNodes(2) self.provider.finish_starting_nodes() autoscaler.update() self.waitForNodes(2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE}) # Mark both nodes as unhealthy for _ in range(5): if autoscaler.updaters: time.sleep(0.05) autoscaler.update() lm.last_heartbeat_time_by_ip[""172.0.0.0""] = 0 lm.last_heartbeat_time_by_ip[""172.0.0.1""] = 0 # Expect both updates to be successful, no nodes in updating state assert mock_metrics.successful_updates.inc.call_count == 2 assert mock_metrics.worker_update_time.observe.call_count == 2 mock_metrics.updating_nodes.set.assert_called_with(0) assert not autoscaler.updaters # Set up process runner to terminate worker 0 during missed heartbeat # recovery and also cause the updater to fail. ``` ###Assistant : Tests autoscaler handling a node getting terminated during an update triggered by the node missing a heartbeat. Extension of testRecoverUnhealthyWorkers. In this test, two nodes miss a heartbeat. One of them (node 0) is terminated during its recovery update. The other (node 1) just fails its update. When processing completed updates, the autoscaler terminates node 1 but does not try to terminate node 0 again. " 2006,"def call_cr(self, other_args): parser = argparse.ArgumentParser( prog=""cr"", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( ""-t"", ""--type"", dest=""type"", type=str, help=""Select interest rate type"", default=""supply"", choices=[""borrow"", ""supply""], ) parser.add_argument( ""-c"", ""--cryptocurrrencies"", dest=""cryptos"", type=loanscan_model.check_valid_coin, help=f, default=""BTC,ETH,USDT,USDC"", ) parser.add_argument( ""-p"", ""--platforms"", dest=""platforms"", type=loanscan_model.check_valid_platform, help=f, default=""BlockFi,Ledn,SwissBorg,Youhodler"", ) if other_args and ""-"" not in other_args[0][0]: other_args.insert(0, ""-t"") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED, limit=10 ) if ns_parser: loanscan_view.display_crypto_rates( rate_type=ns_parser.type, cryptos=ns_parser.cryptos, platforms=ns_parser.platforms, limit=ns_parser.limit, export=ns_parser.export, ) ","Process cr commandDisplays crypto {borrow,supply} interest rates for cryptocurrencies across several platforms. You can select rate type with --type {borrow,supply} You can display only N number of platforms with --limit parameter.Cryptocurrencies to search interest rates for separated by comma. Default: BTC,ETH,USDT,USDC. Options: {"","".join(loanscan_model.CRYPTOS)}Platforms to search interest rates in separated by comma. Default: BlockFi,Ledn,SwissBorg,Youhodler. Options: {"","".join(loanscan_model.PLATFORMS)}",55,66,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call_cr(self, other_args): parser = argparse.ArgumentParser( prog=""cr"", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( ""-t"", ""--type"", dest=""type"", type=str, help=""Select interest rate type"", default=""supply"", choices=[""borrow"", ""supply""], ) parser.add_argument( ""-c"", ""--cryptocurrrencies"", dest=""cryptos"", type=loanscan_model.check_valid_coin, help=f, default=""BTC,ETH,USDT,USDC"", ) parser.add_argument( ""-p"", ""--platforms"", dest=""platforms"", type=loanscan_model.check_valid_platform, help=f, default=""BlockFi,Ledn,SwissBorg,Youhodler"", ) if other_args and ""-"" not in other_args[0][0]: other_args.insert(0, ""-t"") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED, limit=10 ) if ns_parser: loanscan_view.display_crypto_rates( rate_type=ns_parser.type, cryptos=ns_parser.cryptos, platforms=ns_parser.platforms, limit=ns_parser.limit, export=ns_parser.export, ) ``` ###Assistant : Process cr commandDisplays crypto {borrow,supply} interest rates for cryptocurrencies across several platforms. You can select rate type with --type {borrow,supply} You can display only N number of platforms with --limit parameter.Cryptocurrencies to search interest rates for separated by comma. Default: BTC,ETH,USDT,USDC. Options: {"","".join(loanscan_model.CRYPTOS)}Platforms to search interest rates in separated by comma. Default: BlockFi,Ledn,SwissBorg,Youhodler. Options: {"","".join(loanscan_model.PLATFORMS)} " 2007,"def export(self): result = {} for name, module in self.nas_modules: if name not in result: result[name] = module.export() return result "," Export the NAS result, ideally the best choice of each nas_modules. You may implement an ``export`` method for your customized nas_module. Returns -------- result : Dict[str, int] Keys are names of nas_modules, and values are the choice indices of them. ",40,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def export(self): result = {} for name, module in self.nas_modules: if name not in result: result[name] = module.export() return result ``` ###Assistant : Export the NAS result, ideally the best choice of each nas_modules. You may implement an ``export`` method for your customized nas_module. Returns -------- result : Dict[str, int] Keys are names of nas_modules, and values are the choice indices of them. " 2008,"def clean_pipeline_string(self, individual): dirty_string = str(individual) # There are many parameter prefixes in the pipeline strings, used solely for # making the terminal name unique, eg. LinearSVC__. parameter_prefixes = [ (m.start(), m.end()) for m in re.finditer("", [\w]+__"", dirty_string) ] # We handle them in reverse so we do not mess up indices pretty = dirty_string for (start, end) in reversed(parameter_prefixes): pretty = pretty[: start + 2] + pretty[end:] return pretty ","Provide a string of the individual without the parameter prefixes. Parameters ---------- individual: individual Individual which should be represented by a pretty string Returns ------- A string like str(individual), but with parameter prefixes removed. ",34,70,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def clean_pipeline_string(self, individual): dirty_string = str(individual) # There are many parameter prefixes in the pipeline strings, used solely for # making the terminal name unique, eg. LinearSVC__. parameter_prefixes = [ (m.start(), m.end()) for m in re.finditer("", [\w]+__"", dirty_string) ] # We handle them in reverse so we do not mess up indices pretty = dirty_string for (start, end) in reversed(parameter_prefixes): pretty = pretty[: start + 2] + pretty[end:] return pretty ``` ###Assistant : Provide a string of the individual without the parameter prefixes. Parameters ---------- individual: individual Individual which should be represented by a pretty string Returns ------- A string like str(individual), but with parameter prefixes removed. " 2009,"def _lin_eq2dict(a, symset): if a in symset: return S.Zero, {a: S.One} elif a.is_Add: terms_list = defaultdict(list) coeff_list = [] for ai in a.args: ci, ti = _lin_eq2dict(ai, symset) coeff_list.append(ci) for mij, cij in ti.items(): terms_list[mij].append(cij) coeff = Add(*coeff_list) terms = {sym: Add(*coeffs) for sym, coeffs in terms_list.items()} return coeff, terms elif a.is_Mul: terms = terms_coeff = None coeff_list = [] for ai in a.args: ci, ti = _lin_eq2dict(ai, symset) if not ti: coeff_list.append(ci) elif terms is None: terms = ti terms_coeff = ci else: raise PolyNonlinearError coeff = Mul(*coeff_list) if terms is None: return coeff, {} else: terms = {sym: coeff * c for sym, c in terms.items()} return coeff * terms_coeff, terms elif a.is_Equality: return _lin_eq2dict(a.lhs - a.rhs, symset) elif not a.has_free(*symset): return a, {} else: raise PolyNonlinearError ",Efficiently convert a linear equation to a dict of coefficients,10,129,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _lin_eq2dict(a, symset): if a in symset: return S.Zero, {a: S.One} elif a.is_Add: terms_list = defaultdict(list) coeff_list = [] for ai in a.args: ci, ti = _lin_eq2dict(ai, symset) coeff_list.append(ci) for mij, cij in ti.items(): terms_list[mij].append(cij) coeff = Add(*coeff_list) terms = {sym: Add(*coeffs) for sym, coeffs in terms_list.items()} return coeff, terms elif a.is_Mul: terms = terms_coeff = None coeff_list = [] for ai in a.args: ci, ti = _lin_eq2dict(ai, symset) if not ti: coeff_list.append(ci) elif terms is None: terms = ti terms_coeff = ci else: raise PolyNonlinearError coeff = Mul(*coeff_list) if terms is None: return coeff, {} else: terms = {sym: coeff * c for sym, c in terms.items()} return coeff * terms_coeff, terms elif a.is_Equality: return _lin_eq2dict(a.lhs - a.rhs, symset) elif not a.has_free(*symset): return a, {} else: raise PolyNonlinearError ``` ###Assistant : Efficiently convert a linear equation to a dict of coefficients " 2010,"def internal_ip(self, node_id): ip = ( self._get_cached_node(node_id=node_id)[""internal_ip""] or self._get_node(node_id=node_id)[""internal_ip""] ) return ip ",Returns the internal ip (Ray ip) of the given node.,10,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def internal_ip(self, node_id): ip = ( self._get_cached_node(node_id=node_id)[""internal_ip""] or self._get_node(node_id=node_id)[""internal_ip""] ) return ip ``` ###Assistant : Returns the internal ip (Ray ip) of the given node. " 2011,"def _write_file(self, source, dest, type, compress=False): start = self.lib.tell() length = os.stat(source).st_size with open(source, 'rb') as f: if compress: buffer = bytearray(16 * 1024) compressor = zlib.compressobj(self.LEVEL) while 1: read = f.readinto(buffer) if not read: break self.lib.write(compressor.compress(buffer[:read])) self.lib.write(compressor.flush()) else: shutil.copyfileobj(f, self.lib) self.toc.add(start, self.lib.tell() - start, length, int(compress), type, dest) "," Stream copy a large file into the archive and update the table of contents. ",14,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _write_file(self, source, dest, type, compress=False): start = self.lib.tell() length = os.stat(source).st_size with open(source, 'rb') as f: if compress: buffer = bytearray(16 * 1024) compressor = zlib.compressobj(self.LEVEL) while 1: read = f.readinto(buffer) if not read: break self.lib.write(compressor.compress(buffer[:read])) self.lib.write(compressor.flush()) else: shutil.copyfileobj(f, self.lib) self.toc.add(start, self.lib.tell() - start, length, int(compress), type, dest) ``` ###Assistant : Stream copy a large file into the archive and update the table of contents. " 2012,"def incidence_matrix(G, nodelist=None, edgelist=None, oriented=False, weight=None): import scipy as sp import scipy.sparse # call as sp.sparse if nodelist is None: nodelist = list(G) if edgelist is None: if G.is_multigraph(): edgelist = list(G.edges(keys=True)) else: edgelist = list(G.edges()) A = sp.sparse.lil_array((len(nodelist), len(edgelist))) node_index = {node: i for i, node in enumerate(nodelist)} for ei, e in enumerate(edgelist): (u, v) = e[:2] if u == v: continue # self loops give zero column try: ui = node_index[u] vi = node_index[v] except KeyError as err: raise nx.NetworkXError( f""node {u} or {v} in edgelist but not in nodelist"" ) from err if weight is None: wt = 1 else: if G.is_multigraph(): ekey = e[2] wt = G[u][v][ekey].get(weight, 1) else: wt = G[u][v].get(weight, 1) if oriented: A[ui, ei] = -wt A[vi, ei] = wt else: A[ui, ei] = wt A[vi, ei] = wt import warnings warnings.warn( ""incidence_matrix will return a scipy.sparse array instead of a matrix in Networkx 3.0."", FutureWarning, stacklevel=2, ) # TODO: Rm sp.sparse.csc_matrix in Networkx 3.0 return A.asformat(""csc"") ","Returns incidence matrix of G. The incidence matrix assigns each row to a node and each column to an edge. For a standard incidence matrix a 1 appears wherever a row's node is incident on the column's edge. For an oriented incidence matrix each edge is assigned an orientation (arbitrarily for undirected and aligning to direction for directed). A -1 appears for the source (tail) of an edge and 1 for the destination (head) of the edge. The elements are zero otherwise. Parameters ---------- G : graph A NetworkX graph nodelist : list, optional (default= all nodes in G) The rows are ordered according to the nodes in nodelist. If nodelist is None, then the ordering is produced by G.nodes(). edgelist : list, optional (default= all edges in G) The columns are ordered according to the edges in edgelist. If edgelist is None, then the ordering is produced by G.edges(). oriented: bool, optional (default=False) If True, matrix elements are +1 or -1 for the head or tail node respectively of each edge. If False, +1 occurs at both nodes. weight : string or None, optional (default=None) The edge data key used to provide each value in the matrix. If None, then each edge has weight 1. Edge weights, if used, should be positive so that the orientation can provide the sign. Returns ------- A : SciPy sparse matrix The incidence matrix of G. Notes ----- For MultiGraph/MultiDiGraph, the edges in edgelist should be (u,v,key) 3-tuples. ""Networks are the best discrete model for so many problems in applied mathematics"" [1]_. References ---------- .. [1] Gil Strang, Network applications: A = incidence matrix, http://videolectures.net/mit18085f07_strang_lec03/ ",272,164,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def incidence_matrix(G, nodelist=None, edgelist=None, oriented=False, weight=None): import scipy as sp import scipy.sparse # call as sp.sparse if nodelist is None: nodelist = list(G) if edgelist is None: if G.is_multigraph(): edgelist = list(G.edges(keys=True)) else: edgelist = list(G.edges()) A = sp.sparse.lil_array((len(nodelist), len(edgelist))) node_index = {node: i for i, node in enumerate(nodelist)} for ei, e in enumerate(edgelist): (u, v) = e[:2] if u == v: continue # self loops give zero column try: ui = node_index[u] vi = node_index[v] except KeyError as err: raise nx.NetworkXError( f""node {u} or {v} in edgelist but not in nodelist"" ) from err if weight is None: wt = 1 else: if G.is_multigraph(): ekey = e[2] wt = G[u][v][ekey].get(weight, 1) else: wt = G[u][v].get(weight, 1) if oriented: A[ui, ei] = -wt A[vi, ei] = wt else: A[ui, ei] = wt A[vi, ei] = wt import warnings warnings.warn( ""incidence_matrix will return a scipy.sparse array instead of a matrix in Networkx 3.0."", FutureWarning, stacklevel=2, ) # TODO: Rm sp.sparse.csc_matrix in Networkx 3.0 return A.asformat(""csc"") ``` ###Assistant : Returns incidence matrix of G. The incidence matrix assigns each row to a node and each column to an edge. For a standard incidence matrix a 1 appears wherever a row's node is incident on the column's edge. For an oriented incidence matrix each edge is assigned an orientation (arbitrarily for undirected and aligning to direction for directed). A -1 appears for the source (tail) of an edge and 1 for the destination (head) of the edge. The elements are zero otherwise. Parameters ---------- G : graph A NetworkX graph nodelist : list, optional (default= all nodes in G) The rows are ordered according to the nodes in nodelist. If nodelist is None, then the ordering is produced by G.nodes(). edgelist : list, optional (default= all edges in G) The columns are ordered according to the edges in edgelist. If edgelist is None, then the ordering is produced by G.edges(). oriented: bool, optional (default=False) If True, matrix elements are +1 or -1 for the head or tail node respectively of each edge. If False, +1 occurs at both nodes. weight : string or None, optional (default=None) The edge data key used to provide each value in the matrix. If None, then each edge has weight 1. Edge weights, if used, should be positive so that the orientation can provide the sign. Returns ------- A : SciPy sparse matrix The incidence matrix of G. Notes ----- For MultiGraph/MultiDiGraph, the edges in edgelist should be (u,v,key) 3-tuples. ""Networks are the best discrete model for so many problems in applied mathematics"" [1]_. References ---------- .. [1] Gil Strang, Network applications: A = incidence matrix, http://videolectures.net/mit18085f07_strang_lec03/ " 2013,"def call(self, features, training=None): if not isinstance(features, dict): raise ValueError( ""We expected a dictionary here. Instead we got: "", features ) if training is None: training = backend.learning_phase() transformation_cache = ( tf.__internal__.feature_column.FeatureTransformationCache(features) ) output_tensors = [] sequence_lengths = [] for column in self._feature_columns: with backend.name_scope(column.name): try: ( dense_tensor, sequence_length, ) = column.get_sequence_dense_tensor( transformation_cache, self._state_manager, training=training, ) except TypeError: ( dense_tensor, sequence_length, ) = column.get_sequence_dense_tensor( transformation_cache, self._state_manager ) # Flattens the final dimension to produce a 3D Tensor. output_tensors.append( self._process_dense_tensor(column, dense_tensor) ) sequence_lengths.append(sequence_length) # Check and process sequence lengths. kfc._verify_static_batch_size_equality( sequence_lengths, self._feature_columns ) sequence_length = _assert_all_equal_and_return(sequence_lengths) return self._verify_and_concat_tensors(output_tensors), sequence_length ","Returns sequence input corresponding to the `feature_columns`. Args: features: A dict mapping keys to tensors. training: Python boolean or None, indicating whether to the layer is being run in training mode. This argument is passed to the call method of any `FeatureColumn` that takes a `training` argument. For example, if a `FeatureColumn` performed dropout, the column could expose a `training` argument to control whether the dropout should be applied. If `None`, defaults to `tf.keras.backend.learning_phase()`. Returns: An `(input_layer, sequence_length)` tuple where: - input_layer: A float `Tensor` of shape `[batch_size, T, D]`. `T` is the maximum sequence length for this batch, which could differ from batch to batch. `D` is the sum of `num_elements` for all `feature_columns`. - sequence_length: An int `Tensor` of shape `[batch_size]`. The sequence length for each example. Raises: ValueError: If features are not a dictionary. ",137,98,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call(self, features, training=None): if not isinstance(features, dict): raise ValueError( ""We expected a dictionary here. Instead we got: "", features ) if training is None: training = backend.learning_phase() transformation_cache = ( tf.__internal__.feature_column.FeatureTransformationCache(features) ) output_tensors = [] sequence_lengths = [] for column in self._feature_columns: with backend.name_scope(column.name): try: ( dense_tensor, sequence_length, ) = column.get_sequence_dense_tensor( transformation_cache, self._state_manager, training=training, ) except TypeError: ( dense_tensor, sequence_length, ) = column.get_sequence_dense_tensor( transformation_cache, self._state_manager ) # Flattens the final dimension to produce a 3D Tensor. output_tensors.append( self._process_dense_tensor(column, dense_tensor) ) sequence_lengths.append(sequence_length) # Check and process sequence lengths. kfc._verify_static_batch_size_equality( sequence_lengths, self._feature_columns ) sequence_length = _assert_all_equal_and_return(sequence_lengths) return self._verify_and_concat_tensors(output_tensors), sequence_length ``` ###Assistant : Returns sequence input corresponding to the `feature_columns`. Args: features: A dict mapping keys to tensors. training: Python boolean or None, indicating whether to the layer is being run in training mode. This argument is passed to the call method of any `FeatureColumn` that takes a `training` argument. For example, if a `FeatureColumn` performed dropout, the column could expose a `training` argument to control whether the dropout should be applied. If `None`, defaults to `tf.keras.backend.learning_phase()`. Returns: An `(input_layer, sequence_length)` tuple where: - input_layer: A float `Tensor` of shape `[batch_size, T, D]`. `T` is the maximum sequence length for this batch, which could differ from batch to batch. `D` is the sum of `num_elements` for all `feature_columns`. - sequence_length: An int `Tensor` of shape `[batch_size]`. The sequence length for each example. Raises: ValueError: If features are not a dictionary. " 2014,"def feed_eof(self): self._incoming.write_eof() ssldata, appdata = self.feed_ssldata(b'') assert appdata == [] or appdata == [b''] ","Send a potentially ""ragged"" EOF. This method will raise an SSL_ERROR_EOF exception if the EOF is unexpected. ",17,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def feed_eof(self): self._incoming.write_eof() ssldata, appdata = self.feed_ssldata(b'') assert appdata == [] or appdata == [b''] ``` ###Assistant : Send a potentially ""ragged"" EOF. This method will raise an SSL_ERROR_EOF exception if the EOF is unexpected. " 2015,"def screen(self) -> Screen: try: return self._screen_stack[-1] except IndexError: raise ScreenStackError(""No screens on stack"") from None ","Get the current screen. Raises: ScreenStackError: If there are no screens on the stack. Returns: Screen: The currently active screen. ",20,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def screen(self) -> Screen: try: return self._screen_stack[-1] except IndexError: raise ScreenStackError(""No screens on stack"") from None ``` ###Assistant : Get the current screen. Raises: ScreenStackError: If there are no screens on the stack. Returns: Screen: The currently active screen. " 2016,"def test_empty_dunder_path_no_dunder_file(self): with self.assertRaises(ImproperlyConfigured): AppConfig(""label"", Stub(__path__=[])) ","If the __path__ attr is empty and there is no __file__, raise.",12,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_empty_dunder_path_no_dunder_file(self): with self.assertRaises(ImproperlyConfigured): AppConfig(""label"", Stub(__path__=[])) ``` ###Assistant : If the __path__ attr is empty and there is no __file__, raise. " 2017,"def train_epoch_ch3(net, train_iter, loss, updater): # Sum of training loss, sum of training accuracy, no. of examples metric = Accumulator(3) for X, y in train_iter: # Compute gradients and update parameters with tf.GradientTape() as tape: y_hat = net(X) # Keras implementations for loss takes (labels, predictions) # instead of (predictions, labels) that users might implement # in this book, e.g. `cross_entropy` that we implemented above if isinstance(loss, tf.keras.losses.Loss): l = loss(y, y_hat) else: l = loss(y_hat, y) if isinstance(updater, tf.keras.optimizers.Optimizer): params = net.trainable_variables grads = tape.gradient(l, params) updater.apply_gradients(zip(grads, params)) else: updater(X.shape[0], tape.gradient(l, updater.params)) # Keras loss by default returns the average loss in a batch l_sum = l * float(tf.size(y)) if isinstance( loss, tf.keras.losses.Loss) else tf.reduce_sum(l) metric.add(l_sum, accuracy(y_hat, y), tf.size(y)) # Return training loss and training accuracy return metric[0] / metric[2], metric[1] / metric[2] ","The training loop defined in Chapter 3. Defined in :numref:`sec_softmax_scratch`",10,134,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def train_epoch_ch3(net, train_iter, loss, updater): # Sum of training loss, sum of training accuracy, no. of examples metric = Accumulator(3) for X, y in train_iter: # Compute gradients and update parameters with tf.GradientTape() as tape: y_hat = net(X) # Keras implementations for loss takes (labels, predictions) # instead of (predictions, labels) that users might implement # in this book, e.g. `cross_entropy` that we implemented above if isinstance(loss, tf.keras.losses.Loss): l = loss(y, y_hat) else: l = loss(y_hat, y) if isinstance(updater, tf.keras.optimizers.Optimizer): params = net.trainable_variables grads = tape.gradient(l, params) updater.apply_gradients(zip(grads, params)) else: updater(X.shape[0], tape.gradient(l, updater.params)) # Keras loss by default returns the average loss in a batch l_sum = l * float(tf.size(y)) if isinstance( loss, tf.keras.losses.Loss) else tf.reduce_sum(l) metric.add(l_sum, accuracy(y_hat, y), tf.size(y)) # Return training loss and training accuracy return metric[0] / metric[2], metric[1] / metric[2] ``` ###Assistant : The training loop defined in Chapter 3. Defined in :numref:`sec_softmax_scratch` " 2018,"def all_estimators(type_filter=None): # lazy import to avoid circular imports from sklearn.base from . import IS_PYPY from ._testing import ignore_warnings from ..base import ( BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin, ClusterMixin, ) ","Get a list of all estimators from `sklearn`. This function crawls the module and gets all classes that inherit from BaseEstimator. Classes that are defined in test-modules are not included. Parameters ---------- type_filter : {""classifier"", ""regressor"", ""cluster"", ""transformer""} \ or list of such str, default=None Which kind of estimators should be returned. If None, no filter is applied and all estimators are returned. Possible values are 'classifier', 'regressor', 'cluster' and 'transformer' to get estimators only of these specific types, or a list of these to get the estimators that fit at least one of the types. Returns ------- estimators : list of tuples List of (name, class), where ``name`` is the class name as string and ``class`` is the actual type of the class. ",124,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def all_estimators(type_filter=None): # lazy import to avoid circular imports from sklearn.base from . import IS_PYPY from ._testing import ignore_warnings from ..base import ( BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin, ClusterMixin, ) ``` ###Assistant : Get a list of all estimators from `sklearn`. This function crawls the module and gets all classes that inherit from BaseEstimator. Classes that are defined in test-modules are not included. Parameters ---------- type_filter : {""classifier"", ""regressor"", ""cluster"", ""transformer""} \ or list of such str, default=None Which kind of estimators should be returned. If None, no filter is applied and all estimators are returned. Possible values are 'classifier', 'regressor', 'cluster' and 'transformer' to get estimators only of these specific types, or a list of these to get the estimators that fit at least one of the types. Returns ------- estimators : list of tuples List of (name, class), where ``name`` is the class name as string and ``class`` is the actual type of the class. " 2019,"def test_legend_auto5(): fig, axs = plt.subplots(ncols=2, figsize=(9.6, 4.8)) leg_bboxes = [] for ax, loc in zip(axs.flat, (""center"", ""best"")): # An Ellipse patch at the top, a U-shaped Polygon patch at the # bottom and a ring-like Wedge patch: the correct placement of # the legend should be in the center. for _patch in [ mpatches.Ellipse( xy=(0.5, 0.9), width=0.8, height=0.2, fc=""C1""), mpatches.Polygon(np.array([ [0, 1], [0, 0], [1, 0], [1, 1], [0.9, 1.0], [0.9, 0.1], [0.1, 0.1], [0.1, 1.0], [0.1, 1.0]]), fc=""C1""), mpatches.Wedge((0.5, 0.5), 0.5, 0, 360, width=0.05, fc=""C0"") ]: ax.add_patch(_patch) ax.plot([0.1, 0.9], [0.9, 0.9], label=""A segment"") # sthg to label leg = ax.legend(loc=loc) fig.canvas.draw() leg_bboxes.append( leg.get_window_extent().transformed(ax.transAxes.inverted())) assert_allclose(leg_bboxes[1].bounds, leg_bboxes[0].bounds) @image_comparison(['legend_various_labels'], remove_text=True)"," Check that the automatic placement handle a rather complex case with non rectangular patch. Related to issue #9580. ",18,109,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_legend_auto5(): fig, axs = plt.subplots(ncols=2, figsize=(9.6, 4.8)) leg_bboxes = [] for ax, loc in zip(axs.flat, (""center"", ""best"")): # An Ellipse patch at the top, a U-shaped Polygon patch at the # bottom and a ring-like Wedge patch: the correct placement of # the legend should be in the center. for _patch in [ mpatches.Ellipse( xy=(0.5, 0.9), width=0.8, height=0.2, fc=""C1""), mpatches.Polygon(np.array([ [0, 1], [0, 0], [1, 0], [1, 1], [0.9, 1.0], [0.9, 0.1], [0.1, 0.1], [0.1, 1.0], [0.1, 1.0]]), fc=""C1""), mpatches.Wedge((0.5, 0.5), 0.5, 0, 360, width=0.05, fc=""C0"") ]: ax.add_patch(_patch) ax.plot([0.1, 0.9], [0.9, 0.9], label=""A segment"") # sthg to label leg = ax.legend(loc=loc) fig.canvas.draw() leg_bboxes.append( leg.get_window_extent().transformed(ax.transAxes.inverted())) assert_allclose(leg_bboxes[1].bounds, leg_bboxes[0].bounds) @image_comparison(['legend_various_labels'], remove_text=True) ``` ###Assistant : Check that the automatic placement handle a rather complex case with non rectangular patch. Related to issue #9580. " 2020,"def _toggle_cursor_visible(self): if time.monotonic() - self._last_keypress_time > self.cursor_blink_period: self._cursor_blink_visible = not self._cursor_blink_visible self.refresh() ","Manages the blinking of the cursor - ensuring blinking only starts when the user hasn't pressed a key in some time",21,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _toggle_cursor_visible(self): if time.monotonic() - self._last_keypress_time > self.cursor_blink_period: self._cursor_blink_visible = not self._cursor_blink_visible self.refresh() ``` ###Assistant : Manages the blinking of the cursor - ensuring blinking only starts when the user hasn't pressed a key in some time " 2021,"def parse_semver(version, operator) -> Optional[SemverFilter]: (operator, negated) = handle_operator_negation(operator) try: operator = OPERATOR_TO_DJANGO[operator] except KeyError: raise InvalidSearchQuery(""Invalid operation 'IN' for semantic version filter."") version = version if ""@"" in version else f""{SEMVER_FAKE_PACKAGE}@{version}"" parsed = parse_release_relay(version) parsed_version = parsed.get(""version_parsed"") if parsed_version: # Convert `pre` to always be a string prerelease = parsed_version[""pre""] if parsed_version[""pre""] else """" semver_filter = SemverFilter( operator, [ parsed_version[""major""], parsed_version[""minor""], parsed_version[""patch""], parsed_version[""revision""], 0 if prerelease else 1, prerelease, ], negated=negated, ) if parsed[""package""] and parsed[""package""] != SEMVER_FAKE_PACKAGE: semver_filter.package = parsed[""package""] return semver_filter else: # Try to parse as a wildcard match package, version = version.split(""@"", 1) version_parts = [] if version: for part in version.split(""."", 3): if part in SEMVER_WILDCARDS: break try: # We assume all ints for a wildcard match - not handling prerelease as # part of these version_parts.append(int(part)) except ValueError: raise InvalidSearchQuery(INVALID_SEMVER_MESSAGE) package = package if package and package != SEMVER_FAKE_PACKAGE else None return SemverFilter(""exact"", version_parts, package, negated) key_conversion_map: Mapping[ str, Callable[[SearchFilter, str, Mapping[str, Union[int, str, datetime]]], Optional[Sequence[any]]], ] = { ""environment"": _environment_filter_converter, ""message"": _message_filter_converter, TRANSACTION_STATUS_ALIAS: _transaction_status_filter_converter, ""issue.id"": _issue_id_filter_converter, USER_DISPLAY_ALIAS: _user_display_filter_converter, ERROR_UNHANDLED_ALIAS: _error_unhandled_filter_converter, ""error.handled"": _error_handled_filter_converter, TEAM_KEY_TRANSACTION_ALIAS: _team_key_transaction_filter_converter, RELEASE_STAGE_ALIAS: _release_stage_filter_converter, SEMVER_ALIAS: _semver_filter_converter, SEMVER_PACKAGE_ALIAS: _semver_package_filter_converter, SEMVER_BUILD_ALIAS: _semver_build_filter_converter, } "," Attempts to parse a release version using our semver syntax. version should be in format `@` or ``, where package_name is a string and version is a version string matching semver format (https://semver.org/). We've slightly extended this format to allow up to 4 integers. EG - sentry@1.2.3.4 - sentry@1.2.3.4-alpha - 1.2.3.4 - 1.2.3.4-alpha - 1.* ",55,191,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parse_semver(version, operator) -> Optional[SemverFilter]: (operator, negated) = handle_operator_negation(operator) try: operator = OPERATOR_TO_DJANGO[operator] except KeyError: raise InvalidSearchQuery(""Invalid operation 'IN' for semantic version filter."") version = version if ""@"" in version else f""{SEMVER_FAKE_PACKAGE}@{version}"" parsed = parse_release_relay(version) parsed_version = parsed.get(""version_parsed"") if parsed_version: # Convert `pre` to always be a string prerelease = parsed_version[""pre""] if parsed_version[""pre""] else """" semver_filter = SemverFilter( operator, [ parsed_version[""major""], parsed_version[""minor""], parsed_version[""patch""], parsed_version[""revision""], 0 if prerelease else 1, prerelease, ], negated=negated, ) if parsed[""package""] and parsed[""package""] != SEMVER_FAKE_PACKAGE: semver_filter.package = parsed[""package""] return semver_filter else: # Try to parse as a wildcard match package, version = version.split(""@"", 1) version_parts = [] if version: for part in version.split(""."", 3): if part in SEMVER_WILDCARDS: break try: # We assume all ints for a wildcard match - not handling prerelease as # part of these version_parts.append(int(part)) except ValueError: raise InvalidSearchQuery(INVALID_SEMVER_MESSAGE) package = package if package and package != SEMVER_FAKE_PACKAGE else None return SemverFilter(""exact"", version_parts, package, negated) key_conversion_map: Mapping[ str, Callable[[SearchFilter, str, Mapping[str, Union[int, str, datetime]]], Optional[Sequence[any]]], ] = { ""environment"": _environment_filter_converter, ""message"": _message_filter_converter, TRANSACTION_STATUS_ALIAS: _transaction_status_filter_converter, ""issue.id"": _issue_id_filter_converter, USER_DISPLAY_ALIAS: _user_display_filter_converter, ERROR_UNHANDLED_ALIAS: _error_unhandled_filter_converter, ""error.handled"": _error_handled_filter_converter, TEAM_KEY_TRANSACTION_ALIAS: _team_key_transaction_filter_converter, RELEASE_STAGE_ALIAS: _release_stage_filter_converter, SEMVER_ALIAS: _semver_filter_converter, SEMVER_PACKAGE_ALIAS: _semver_package_filter_converter, SEMVER_BUILD_ALIAS: _semver_build_filter_converter, } ``` ###Assistant : Attempts to parse a release version using our semver syntax. version should be in format `@` or ``, where package_name is a string and version is a version string matching semver format (https://semver.org/). We've slightly extended this format to allow up to 4 integers. EG - sentry@1.2.3.4 - sentry@1.2.3.4-alpha - 1.2.3.4 - 1.2.3.4-alpha - 1.* " 2022,"def test_not_logged_in_gives_403_to_ajax_requests(self): # Get dashboard response = self.client.get( reverse(""wagtailadmin_home""), HTTP_X_REQUESTED_WITH=""XMLHttpRequest"" ) # AJAX requests should be given a 403 error instead of being redirected self.assertEqual(response.status_code, 403) "," This tests that a not logged in user is given a 403 error on AJAX requests ",16,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_not_logged_in_gives_403_to_ajax_requests(self): # Get dashboard response = self.client.get( reverse(""wagtailadmin_home""), HTTP_X_REQUESTED_WITH=""XMLHttpRequest"" ) # AJAX requests should be given a 403 error instead of being redirected self.assertEqual(response.status_code, 403) ``` ###Assistant : This tests that a not logged in user is given a 403 error on AJAX requests " 2023,"def bytes_to_unicode(self) -> Dict[int, str]: bs: List[int] = ( list(range(ord(""!""), ord(""~"") + 1)) + list(range(ord(""¡""), ord(""¬"") + 1)) + list(range(ord(""®""), ord(""ÿ"") + 1)) ) cs: List[int] = bs[:] n = 0 for b in range(2 ** 8): if b not in bs: bs.append(b) cs.append(2 ** 8 + n) n += 1 str_cs: List[str] = [chr(n) for n in cs] return dict(zip(bs, str_cs)) "," Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. ",93,62,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def bytes_to_unicode(self) -> Dict[int, str]: bs: List[int] = ( list(range(ord(""!""), ord(""~"") + 1)) + list(range(ord(""¡""), ord(""¬"") + 1)) + list(range(ord(""®""), ord(""ÿ"") + 1)) ) cs: List[int] = bs[:] n = 0 for b in range(2 ** 8): if b not in bs: bs.append(b) cs.append(2 ** 8 + n) n += 1 str_cs: List[str] = [chr(n) for n in cs] return dict(zip(bs, str_cs)) ``` ###Assistant : Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. " 2024,"async def test_get_events_custom_calendars(hass, calendar, get_api_events): config = dict(CALDAV_CONFIG) config[""custom_calendars""] = [ {""name"": ""Private"", ""calendar"": ""Private"", ""search"": ""This is a normal event""} ] assert await async_setup_component(hass, ""calendar"", {""calendar"": config}) await hass.async_block_till_done() events = await get_api_events(""calendar.private_private"") assert events == [ { ""end"": {""dateTime"": ""2017-11-27T10:00:00-08:00""}, ""start"": {""dateTime"": ""2017-11-27T09:00:00-08:00""}, ""summary"": ""This is a normal event"", ""location"": ""Hamburg"", ""description"": ""Surprisingly rainy"", ""uid"": None, ""recurrence_id"": None, ""rrule"": None, } ] ",Test that only searched events are returned on API.,9,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_get_events_custom_calendars(hass, calendar, get_api_events): config = dict(CALDAV_CONFIG) config[""custom_calendars""] = [ {""name"": ""Private"", ""calendar"": ""Private"", ""search"": ""This is a normal event""} ] assert await async_setup_component(hass, ""calendar"", {""calendar"": config}) await hass.async_block_till_done() events = await get_api_events(""calendar.private_private"") assert events == [ { ""end"": {""dateTime"": ""2017-11-27T10:00:00-08:00""}, ""start"": {""dateTime"": ""2017-11-27T09:00:00-08:00""}, ""summary"": ""This is a normal event"", ""location"": ""Hamburg"", ""description"": ""Surprisingly rainy"", ""uid"": None, ""recurrence_id"": None, ""rrule"": None, } ] ``` ###Assistant : Test that only searched events are returned on API. " 2025,"def leaf_symbols(self) -> Iterable[Symbol]: for arg in self.arguments: if isinstance(arg, SymbolicExpression): yield from arg.leaf_symbols() "," Return a generator of all leaf symbols. Useful for when you want to inspect when the symbols come from. No deduplication even if the symbols has duplicates. ",27,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def leaf_symbols(self) -> Iterable[Symbol]: for arg in self.arguments: if isinstance(arg, SymbolicExpression): yield from arg.leaf_symbols() ``` ###Assistant : Return a generator of all leaf symbols. Useful for when you want to inspect when the symbols come from. No deduplication even if the symbols has duplicates. " 2026,"def append_step(self, obs, action, next_obs, reward, terminated, truncated, info): if self._outfile: if self._save_info: self._current_rollout.append( [obs, action, next_obs, reward, terminated, truncated, info] ) else: self._current_rollout.append( [obs, action, next_obs, reward, terminated, truncated] ) self._total_steps += 1 @eval_app.command()","Add a step to the current rollout, if we are saving them",12,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def append_step(self, obs, action, next_obs, reward, terminated, truncated, info): if self._outfile: if self._save_info: self._current_rollout.append( [obs, action, next_obs, reward, terminated, truncated, info] ) else: self._current_rollout.append( [obs, action, next_obs, reward, terminated, truncated] ) self._total_steps += 1 @eval_app.command() ``` ###Assistant : Add a step to the current rollout, if we are saving them " 2027,"def test_nested_prefetch_is_not_overwritten_by_related_object(self): queryset = House.objects.only('name').prefetch_related( Prefetch('rooms', queryset=Room.objects.prefetch_related( Prefetch('house', queryset=House.objects.only('address')), )), ) with self.assertNumQueries(3): house = queryset.first() self.assertIs(Room.house.is_cached(self.room), True) with self.assertNumQueries(0): house.rooms.first().house.address "," The prefetched relationship is used rather than populating the reverse relationship from the parent, when prefetching a set of child objects related to a set of parent objects and the child queryset itself specifies a prefetch back to the parent. ",40,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_nested_prefetch_is_not_overwritten_by_related_object(self): queryset = House.objects.only('name').prefetch_related( Prefetch('rooms', queryset=Room.objects.prefetch_related( Prefetch('house', queryset=House.objects.only('address')), )), ) with self.assertNumQueries(3): house = queryset.first() self.assertIs(Room.house.is_cached(self.room), True) with self.assertNumQueries(0): house.rooms.first().house.address ``` ###Assistant : The prefetched relationship is used rather than populating the reverse relationship from the parent, when prefetching a set of child objects related to a set of parent objects and the child queryset itself specifies a prefetch back to the parent. " 2028,"def apply(self, i): r i = _sympify(i) if i.is_integer is False: raise NotImplementedError(""{} should be an integer."".format(i)) n = self.size if (i < 0) == True or (i >= n) == True: raise NotImplementedError( ""{} should be an integer between 0 and {}"".format(i, n-1)) if i.is_Integer: return Integer(self._array_form[i]) return AppliedPermutation(self, i) ","Apply the permutation to an expression. Parameters ========== i : Expr It should be an integer between $0$ and $n-1$ where $n$ is the size of the permutation. If it is a symbol or a symbolic expression that can have integer values, an ``AppliedPermutation`` object will be returned which can represent an unevaluated function. Notes ===== Any permutation can be defined as a bijective function $\sigma : \{ 0, 1, \dots, n-1 \} \rightarrow \{ 0, 1, \dots, n-1 \}$ where $n$ denotes the size of the permutation. The definition may even be extended for any set with distinctive elements, such that the permutation can even be applied for real numbers or such, however, it is not implemented for now for computational reasons and the integrity with the group theory module. This function is similar to the ``__call__`` magic, however, ``__call__`` magic already has some other applications like permuting an array or attaching new cycles, which would not always be mathematically consistent. This also guarantees that the return type is a SymPy integer, which guarantees the safety to use assumptions. ",180,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def apply(self, i): r i = _sympify(i) if i.is_integer is False: raise NotImplementedError(""{} should be an integer."".format(i)) n = self.size if (i < 0) == True or (i >= n) == True: raise NotImplementedError( ""{} should be an integer between 0 and {}"".format(i, n-1)) if i.is_Integer: return Integer(self._array_form[i]) return AppliedPermutation(self, i) ``` ###Assistant : Apply the permutation to an expression. Parameters ========== i : Expr It should be an integer between $0$ and $n-1$ where $n$ is the size of the permutation. If it is a symbol or a symbolic expression that can have integer values, an ``AppliedPermutation`` object will be returned which can represent an unevaluated function. Notes ===== Any permutation can be defined as a bijective function $\sigma : \{ 0, 1, \dots, n-1 \} \rightarrow \{ 0, 1, \dots, n-1 \}$ where $n$ denotes the size of the permutation. The definition may even be extended for any set with distinctive elements, such that the permutation can even be applied for real numbers or such, however, it is not implemented for now for computational reasons and the integrity with the group theory module. This function is similar to the ``__call__`` magic, however, ``__call__`` magic already has some other applications like permuting an array or attaching new cycles, which would not always be mathematically consistent. This also guarantees that the return type is a SymPy integer, which guarantees the safety to use assumptions. " 2029,"def _convert_mesh_to_triangles(self, coordinates): if isinstance(coordinates, np.ma.MaskedArray): p = coordinates.data else: p = coordinates p_a = p[:-1, :-1] p_b = p[:-1, 1:] p_c = p[1:, 1:] p_d = p[1:, :-1] p_center = (p_a + p_b + p_c + p_d) / 4.0 triangles = np.concatenate([ p_a, p_b, p_center, p_b, p_c, p_center, p_c, p_d, p_center, p_d, p_a, p_center, ], axis=2).reshape((-1, 3, 2)) c = self.get_facecolor().reshape((*coordinates.shape[:2], 4)) c_a = c[:-1, :-1] c_b = c[:-1, 1:] c_c = c[1:, 1:] c_d = c[1:, :-1] c_center = (c_a + c_b + c_c + c_d) / 4.0 colors = np.concatenate([ c_a, c_b, c_center, c_b, c_c, c_center, c_c, c_d, c_center, c_d, c_a, c_center, ], axis=2).reshape((-1, 3, 4)) return triangles, colors "," Convert a given mesh into a sequence of triangles, each point with its own color. The result can be used to construct a call to `~.RendererBase.draw_gouraud_triangles`. ",26,112,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _convert_mesh_to_triangles(self, coordinates): if isinstance(coordinates, np.ma.MaskedArray): p = coordinates.data else: p = coordinates p_a = p[:-1, :-1] p_b = p[:-1, 1:] p_c = p[1:, 1:] p_d = p[1:, :-1] p_center = (p_a + p_b + p_c + p_d) / 4.0 triangles = np.concatenate([ p_a, p_b, p_center, p_b, p_c, p_center, p_c, p_d, p_center, p_d, p_a, p_center, ], axis=2).reshape((-1, 3, 2)) c = self.get_facecolor().reshape((*coordinates.shape[:2], 4)) c_a = c[:-1, :-1] c_b = c[:-1, 1:] c_c = c[1:, 1:] c_d = c[1:, :-1] c_center = (c_a + c_b + c_c + c_d) / 4.0 colors = np.concatenate([ c_a, c_b, c_center, c_b, c_c, c_center, c_c, c_d, c_center, c_d, c_a, c_center, ], axis=2).reshape((-1, 3, 4)) return triangles, colors ``` ###Assistant : Convert a given mesh into a sequence of triangles, each point with its own color. The result can be used to construct a call to `~.RendererBase.draw_gouraud_triangles`. " 2030,"def count(self, level=None): if level is None: return notna(self._values).sum().astype(""int64"") else: warnings.warn( ""Using the level keyword in DataFrame and Series aggregations is "" ""deprecated and will be removed in a future version. Use groupby "" ""instead. ser.count(level=1) should use ser.groupby(level=1).count()."", FutureWarning, stacklevel=find_stack_level(), ) if not isinstance(self.index, MultiIndex): raise ValueError(""Series.count level is only valid with a MultiIndex"") index = self.index assert isinstance(index, MultiIndex) # for mypy if isinstance(level, str): level = index._get_level_number(level) lev = index.levels[level] level_codes = np.array(index.codes[level], subok=False, copy=True) mask = level_codes == -1 if mask.any(): level_codes[mask] = cnt = len(lev) lev = lev.insert(cnt, lev._na_value) obs = level_codes[notna(self._values)] # Argument ""minlength"" to ""bincount"" has incompatible type ""Optional[int]""; # expected ""SupportsIndex"" [arg-type] out = np.bincount(obs, minlength=len(lev) or None) # type: ignore[arg-type] return self._constructor(out, index=lev, dtype=""int64"").__finalize__( self, method=""count"" ) "," Return number of non-NA/null observations in the Series. Parameters ---------- level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a smaller Series. Returns ------- int or Series (if level specified) Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2 ",74,126,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def count(self, level=None): if level is None: return notna(self._values).sum().astype(""int64"") else: warnings.warn( ""Using the level keyword in DataFrame and Series aggregations is "" ""deprecated and will be removed in a future version. Use groupby "" ""instead. ser.count(level=1) should use ser.groupby(level=1).count()."", FutureWarning, stacklevel=find_stack_level(), ) if not isinstance(self.index, MultiIndex): raise ValueError(""Series.count level is only valid with a MultiIndex"") index = self.index assert isinstance(index, MultiIndex) # for mypy if isinstance(level, str): level = index._get_level_number(level) lev = index.levels[level] level_codes = np.array(index.codes[level], subok=False, copy=True) mask = level_codes == -1 if mask.any(): level_codes[mask] = cnt = len(lev) lev = lev.insert(cnt, lev._na_value) obs = level_codes[notna(self._values)] # Argument ""minlength"" to ""bincount"" has incompatible type ""Optional[int]""; # expected ""SupportsIndex"" [arg-type] out = np.bincount(obs, minlength=len(lev) or None) # type: ignore[arg-type] return self._constructor(out, index=lev, dtype=""int64"").__finalize__( self, method=""count"" ) ``` ###Assistant : Return number of non-NA/null observations in the Series. Parameters ---------- level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a smaller Series. Returns ------- int or Series (if level specified) Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2 " 2031,"def __format__(self, specifier, context=None, _localeconv=None): # Note: PEP 3101 says that if the type is not present then # there should be at least one digit after the decimal point. # We take the liberty of ignoring this requirement for # Decimal---it's presumably there to make sure that # format(float, '') behaves similarly to str(float). if context is None: context = getcontext() spec = _parse_format_specifier(specifier, _localeconv=_localeconv) # special values don't care about the type or precision if self._is_special: sign = _format_sign(self._sign, spec) body = str(self.copy_abs()) if spec['type'] == '%': body += '%' return _format_align(sign, body, spec) # a type of None defaults to 'g' or 'G', depending on context if spec['type'] is None: spec['type'] = ['g', 'G'][context.capitals] # if type is '%', adjust exponent of self accordingly if spec['type'] == '%': self = _dec_from_triple(self._sign, self._int, self._exp+2) # round if necessary, taking rounding mode from the context rounding = context.rounding precision = spec['precision'] if precision is not None: if spec['type'] in 'eE': self = self._round(precision+1, rounding) elif spec['type'] in 'fF%': self = self._rescale(-precision, rounding) elif spec['type'] in 'gG' and len(self._int) > precision: self = self._round(precision, rounding) # special case: zeros with a positive exponent can't be # represented in fixed point; rescale them to 0e0. if not self and self._exp > 0 and spec['type'] in 'fF%': self = self._rescale(0, rounding) # figure out placement of the decimal point leftdigits = self._exp + len(self._int) if spec['type'] in 'eE': if not self and precision is not None: dotplace = 1 - precision else: dotplace = 1 elif spec['type'] in 'fF%': dotplace = leftdigits elif spec['type'] in 'gG': if self._exp <= 0 and leftdigits > -6: dotplace = leftdigits else: dotplace = 1 # find digits before and after decimal point, and get exponent if dotplace < 0: intpart = '0' fracpart = '0'*(-dotplace) + self._int elif dotplace > len(self._int): intpart = self._int + '0'*(dotplace-len(self._int)) fracpart = '' else: intpart = self._int[:dotplace] or '0' fracpart = self._int[dotplace:] exp = leftdigits-dotplace # done with the decimal-specific stuff; hand over the rest # of the formatting to the _format_number function return _format_number(self._sign, intpart, fracpart, exp, spec) ","Format a Decimal instance according to the given specifier. The specifier should be a standard format specifier, with the form described in PEP 3101. Formatting types 'e', 'E', 'f', 'F', 'g', 'G', 'n' and '%' are supported. If the formatting type is omitted it defaults to 'g' or 'G', depending on the value of context.capitals. ",55,350,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __format__(self, specifier, context=None, _localeconv=None): # Note: PEP 3101 says that if the type is not present then # there should be at least one digit after the decimal point. # We take the liberty of ignoring this requirement for # Decimal---it's presumably there to make sure that # format(float, '') behaves similarly to str(float). if context is None: context = getcontext() spec = _parse_format_specifier(specifier, _localeconv=_localeconv) # special values don't care about the type or precision if self._is_special: sign = _format_sign(self._sign, spec) body = str(self.copy_abs()) if spec['type'] == '%': body += '%' return _format_align(sign, body, spec) # a type of None defaults to 'g' or 'G', depending on context if spec['type'] is None: spec['type'] = ['g', 'G'][context.capitals] # if type is '%', adjust exponent of self accordingly if spec['type'] == '%': self = _dec_from_triple(self._sign, self._int, self._exp+2) # round if necessary, taking rounding mode from the context rounding = context.rounding precision = spec['precision'] if precision is not None: if spec['type'] in 'eE': self = self._round(precision+1, rounding) elif spec['type'] in 'fF%': self = self._rescale(-precision, rounding) elif spec['type'] in 'gG' and len(self._int) > precision: self = self._round(precision, rounding) # special case: zeros with a positive exponent can't be # represented in fixed point; rescale them to 0e0. if not self and self._exp > 0 and spec['type'] in 'fF%': self = self._rescale(0, rounding) # figure out placement of the decimal point leftdigits = self._exp + len(self._int) if spec['type'] in 'eE': if not self and precision is not None: dotplace = 1 - precision else: dotplace = 1 elif spec['type'] in 'fF%': dotplace = leftdigits elif spec['type'] in 'gG': if self._exp <= 0 and leftdigits > -6: dotplace = leftdigits else: dotplace = 1 # find digits before and after decimal point, and get exponent if dotplace < 0: intpart = '0' fracpart = '0'*(-dotplace) + self._int elif dotplace > len(self._int): intpart = self._int + '0'*(dotplace-len(self._int)) fracpart = '' else: intpart = self._int[:dotplace] or '0' fracpart = self._int[dotplace:] exp = leftdigits-dotplace # done with the decimal-specific stuff; hand over the rest # of the formatting to the _format_number function return _format_number(self._sign, intpart, fracpart, exp, spec) ``` ###Assistant : Format a Decimal instance according to the given specifier. The specifier should be a standard format specifier, with the form described in PEP 3101. Formatting types 'e', 'E', 'f', 'F', 'g', 'G', 'n' and '%' are supported. If the formatting type is omitted it defaults to 'g' or 'G', depending on the value of context.capitals. " 2032,"def test_feature_names_in(): pd = pytest.importorskip(""pandas"") iris = datasets.load_iris() X_np = iris.data df = pd.DataFrame(X_np, columns=iris.feature_names) ",Check that feature_name_in are recorded by `_validate_data`,7,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_feature_names_in(): pd = pytest.importorskip(""pandas"") iris = datasets.load_iris() X_np = iris.data df = pd.DataFrame(X_np, columns=iris.feature_names) ``` ###Assistant : Check that feature_name_in are recorded by `_validate_data` " 2033,"def check_interactive_compatibility(self): from pytorch_lightning.utilities import _IS_INTERACTIVE if _IS_INTERACTIVE and self._strategy_type is not None and not self._strategy_type.is_interactive_compatible(): raise MisconfigurationException( f""`Trainer(strategy={self._strategy_type.value!r})` or"" f"" `Trainer(accelerator={self._strategy_type.value!r})` is not compatible with an interactive"" "" environment. Run your code as a script, or choose one of the compatible backends:"" f"" {', '.join(_StrategyType.interactive_compatible_types())}."" "" In case you are spawning processes yourself, make sure to include the Trainer"" "" creation inside the worker function."" ) ","Raises a `MisconfigurationException` if the accelerator and/or plugin is not compatible with an interactive environment.",15,67,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_interactive_compatibility(self): from pytorch_lightning.utilities import _IS_INTERACTIVE if _IS_INTERACTIVE and self._strategy_type is not None and not self._strategy_type.is_interactive_compatible(): raise MisconfigurationException( f""`Trainer(strategy={self._strategy_type.value!r})` or"" f"" `Trainer(accelerator={self._strategy_type.value!r})` is not compatible with an interactive"" "" environment. Run your code as a script, or choose one of the compatible backends:"" f"" {', '.join(_StrategyType.interactive_compatible_types())}."" "" In case you are spawning processes yourself, make sure to include the Trainer"" "" creation inside the worker function."" ) ``` ###Assistant : Raises a `MisconfigurationException` if the accelerator and/or plugin is not compatible with an interactive environment. " 2034,"def theme_global(new_theme=None): if new_theme is not None: if new_theme not in theme_list(): popup_error_with_traceback('Cannot use custom themes with theme_global call', 'Your request to use theme {} cannot be performed.'.format(new_theme), 'The PySimpleGUI Global User Settings are meant for PySimpleGUI standard items, not user config items', 'You can use any of the many built-in themes instead or use your own UserSettings file to store your custom theme') return pysimplegui_user_settings.get('-theme-', CURRENT_LOOK_AND_FEEL) pysimplegui_user_settings.set('-theme-', new_theme) theme(new_theme) return new_theme else: return pysimplegui_user_settings.get('-theme-', CURRENT_LOOK_AND_FEEL) "," Sets / Gets the global PySimpleGUI Theme. If none is specified then returns the global theme from user settings. Note the theme must be a standard, built-in PySimpleGUI theme... not a user-created theme. :param new_theme: the new theme name to use :type new_theme: (str) :return: the currently selected theme :rtype: (str) ",51,76,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def theme_global(new_theme=None): if new_theme is not None: if new_theme not in theme_list(): popup_error_with_traceback('Cannot use custom themes with theme_global call', 'Your request to use theme {} cannot be performed.'.format(new_theme), 'The PySimpleGUI Global User Settings are meant for PySimpleGUI standard items, not user config items', 'You can use any of the many built-in themes instead or use your own UserSettings file to store your custom theme') return pysimplegui_user_settings.get('-theme-', CURRENT_LOOK_AND_FEEL) pysimplegui_user_settings.set('-theme-', new_theme) theme(new_theme) return new_theme else: return pysimplegui_user_settings.get('-theme-', CURRENT_LOOK_AND_FEEL) ``` ###Assistant : Sets / Gets the global PySimpleGUI Theme. If none is specified then returns the global theme from user settings. Note the theme must be a standard, built-in PySimpleGUI theme... not a user-created theme. :param new_theme: the new theme name to use :type new_theme: (str) :return: the currently selected theme :rtype: (str) " 2035,"def close(self): self._reset_retries() self._closed = True # Chunked-encoded posts are terminated with '0\r\n\r\n' # For some reason, either Python or node.js seems to # require an extra \r\n. try: self._conn.send(""\r\n0\r\n\r\n"".encode(""utf-8"")) except http_client.socket.error: # In case the socket has already been closed return """" return self._getresponse() ","Close the connection to server. If available, return a http_client.HTTPResponse object. Closing the connection involves sending the Transfer-Encoding terminating bytes. ",20,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def close(self): self._reset_retries() self._closed = True # Chunked-encoded posts are terminated with '0\r\n\r\n' # For some reason, either Python or node.js seems to # require an extra \r\n. try: self._conn.send(""\r\n0\r\n\r\n"".encode(""utf-8"")) except http_client.socket.error: # In case the socket has already been closed return """" return self._getresponse() ``` ###Assistant : Close the connection to server. If available, return a http_client.HTTPResponse object. Closing the connection involves sending the Transfer-Encoding terminating bytes. " 2036,"def _get_svc_path(name=""*"", status=None): # This is the core routine to work with services, called by many # other functions of this module. # # The name of a service is the ""apparent"" folder's name that contains its # ""run"" script. If its ""folder"" is a symlink, the service is an ""alias"" of # the targeted service. if not SERVICE_DIR: raise CommandExecutionError(""Could not find service directory."") # path list of enabled services as /AVAIL_SVR_DIRS/$service, # taking care of any service aliases (do not use os.path.realpath()). ena = set() for el in glob.glob(os.path.join(SERVICE_DIR, name)): if _is_svc(el): if os.path.islink(el): ena.add(os.readlink(el)) else: ena.add(el) log.trace(""found enabled service path: %s"", el) if status == ""ENABLED"": return sorted(ena) # path list of available services as /AVAIL_SVR_DIRS/$service ava = set() for d in AVAIL_SVR_DIRS: for el in glob.glob(os.path.join(d, name)): if _is_svc(el): ava.add(el) log.trace(""found available service path: %s"", el) if status == ""DISABLED"": # service available but not enabled ret = ava.difference(ena) else: # default: return available services ret = ava.union(ena) return sorted(ret) "," Return a list of paths to services with ``name`` that have the specified ``status`` name a glob for service name. default is '*' status None : all services (no filter, default choice) 'DISABLED' : available service(s) that is not enabled 'ENABLED' : enabled service (whether started on boot or not) ",50,164,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_svc_path(name=""*"", status=None): # This is the core routine to work with services, called by many # other functions of this module. # # The name of a service is the ""apparent"" folder's name that contains its # ""run"" script. If its ""folder"" is a symlink, the service is an ""alias"" of # the targeted service. if not SERVICE_DIR: raise CommandExecutionError(""Could not find service directory."") # path list of enabled services as /AVAIL_SVR_DIRS/$service, # taking care of any service aliases (do not use os.path.realpath()). ena = set() for el in glob.glob(os.path.join(SERVICE_DIR, name)): if _is_svc(el): if os.path.islink(el): ena.add(os.readlink(el)) else: ena.add(el) log.trace(""found enabled service path: %s"", el) if status == ""ENABLED"": return sorted(ena) # path list of available services as /AVAIL_SVR_DIRS/$service ava = set() for d in AVAIL_SVR_DIRS: for el in glob.glob(os.path.join(d, name)): if _is_svc(el): ava.add(el) log.trace(""found available service path: %s"", el) if status == ""DISABLED"": # service available but not enabled ret = ava.difference(ena) else: # default: return available services ret = ava.union(ena) return sorted(ret) ``` ###Assistant : Return a list of paths to services with ``name`` that have the specified ``status`` name a glob for service name. default is '*' status None : all services (no filter, default choice) 'DISABLED' : available service(s) that is not enabled 'ENABLED' : enabled service (whether started on boot or not) " 2037,"def test_model_torch_save_ddp_cpu(tmpdir): model = BoringModel() num_epochs = 1 trainer = Trainer( default_root_dir=tmpdir, max_epochs=num_epochs, strategy=""ddp_spawn"", accelerator=""cpu"", devices=2, logger=False ) temp_path = os.path.join(tmpdir, ""temp.pt"") trainer.fit(model) # Ensure these do not fail torch.save(trainer.model, temp_path) torch.save(trainer, temp_path) @RunIf(min_gpus=2)",Test to ensure torch save does not fail for model and trainer using cpu ddp.,15,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_model_torch_save_ddp_cpu(tmpdir): model = BoringModel() num_epochs = 1 trainer = Trainer( default_root_dir=tmpdir, max_epochs=num_epochs, strategy=""ddp_spawn"", accelerator=""cpu"", devices=2, logger=False ) temp_path = os.path.join(tmpdir, ""temp.pt"") trainer.fit(model) # Ensure these do not fail torch.save(trainer.model, temp_path) torch.save(trainer, temp_path) @RunIf(min_gpus=2) ``` ###Assistant : Test to ensure torch save does not fail for model and trainer using cpu ddp. " 2038,"def default_batch_size(self) -> int: # Using 2 avoid ONNX making assumption about single sample batch return OnnxConfig.default_fixed_batch "," The default batch size to use if no other indication Returns: Integer > 0 ",14,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def default_batch_size(self) -> int: # Using 2 avoid ONNX making assumption about single sample batch return OnnxConfig.default_fixed_batch ``` ###Assistant : The default batch size to use if no other indication Returns: Integer > 0 " 2039,"def addMacOSCodeSignature(filenames): # Weak signing. identity = getMacOSSigningIdentity() command = [ ""codesign"", ""-s"", identity, ""--force"", ""--deep"", ""--preserve-metadata=entitlements"", ] assert type(filenames) is not str command.extend(filenames) with withMadeWritableFileMode(filenames): executeToolChecked( logger=postprocessing_logger, command=command, absence_message=macos_codesign_usage, stderr_filter=_filterSigntoolErrorOutput, ) ","Remove the code signature from a filename. Args: filenames - The files to be signed. Returns: None Notes: This is macOS specific. ",22,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def addMacOSCodeSignature(filenames): # Weak signing. identity = getMacOSSigningIdentity() command = [ ""codesign"", ""-s"", identity, ""--force"", ""--deep"", ""--preserve-metadata=entitlements"", ] assert type(filenames) is not str command.extend(filenames) with withMadeWritableFileMode(filenames): executeToolChecked( logger=postprocessing_logger, command=command, absence_message=macos_codesign_usage, stderr_filter=_filterSigntoolErrorOutput, ) ``` ###Assistant : Remove the code signature from a filename. Args: filenames - The files to be signed. Returns: None Notes: This is macOS specific. " 2040,"def exception_handler(exc, context): if isinstance(exc, Http404): exc = exceptions.NotFound(*(exc.args)) elif isinstance(exc, PermissionDenied): exc = exceptions.PermissionDenied(*(exc.args)) if isinstance(exc, exceptions.APIException): headers = {} if getattr(exc, 'auth_header', None): headers['WWW-Authenticate'] = exc.auth_header if getattr(exc, 'wait', None): headers['Retry-After'] = '%d' % exc.wait if isinstance(exc.detail, (list, dict)): data = exc.detail else: data = {'detail': exc.detail} set_rollback() return Response(data, status=exc.status_code, headers=headers) return None "," Returns the response that should be used for any given exception. By default we handle the REST framework `APIException`, and also Django's built-in `Http404` and `PermissionDenied` exceptions. Any unhandled exceptions may return `None`, which will cause a 500 error to be raised. ",42,56,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def exception_handler(exc, context): if isinstance(exc, Http404): exc = exceptions.NotFound(*(exc.args)) elif isinstance(exc, PermissionDenied): exc = exceptions.PermissionDenied(*(exc.args)) if isinstance(exc, exceptions.APIException): headers = {} if getattr(exc, 'auth_header', None): headers['WWW-Authenticate'] = exc.auth_header if getattr(exc, 'wait', None): headers['Retry-After'] = '%d' % exc.wait if isinstance(exc.detail, (list, dict)): data = exc.detail else: data = {'detail': exc.detail} set_rollback() return Response(data, status=exc.status_code, headers=headers) return None ``` ###Assistant : Returns the response that should be used for any given exception. By default we handle the REST framework `APIException`, and also Django's built-in `Http404` and `PermissionDenied` exceptions. Any unhandled exceptions may return `None`, which will cause a 500 error to be raised. " 2041,"def compute_cooccurrence_matrix(self, df): user_item_hits = sparse.coo_matrix( (np.repeat(1, df.shape[0]), (df[self.col_user_id], df[self.col_item_id])), shape=(self.n_users, self.n_items), ).tocsr() item_cooccurrence = user_item_hits.transpose().dot(user_item_hits) item_cooccurrence = item_cooccurrence.multiply( item_cooccurrence >= self.threshold ) return item_cooccurrence.astype(df[self.col_rating].dtype) ","Co-occurrence matrix. The co-occurrence matrix is defined as :math:`C = U^T * U` where U is the user_affinity matrix with 1's as values (instead of ratings). Args: df (pandas.DataFrame): DataFrame of users and items Returns: numpy.ndarray: Co-occurrence matrix ",38,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def compute_cooccurrence_matrix(self, df): user_item_hits = sparse.coo_matrix( (np.repeat(1, df.shape[0]), (df[self.col_user_id], df[self.col_item_id])), shape=(self.n_users, self.n_items), ).tocsr() item_cooccurrence = user_item_hits.transpose().dot(user_item_hits) item_cooccurrence = item_cooccurrence.multiply( item_cooccurrence >= self.threshold ) return item_cooccurrence.astype(df[self.col_rating].dtype) ``` ###Assistant : Co-occurrence matrix. The co-occurrence matrix is defined as :math:`C = U^T * U` where U is the user_affinity matrix with 1's as values (instead of ratings). Args: df (pandas.DataFrame): DataFrame of users and items Returns: numpy.ndarray: Co-occurrence matrix " 2042,"def _makeTags(tagStr, xml, suppress_LT=Suppress(""<""), suppress_GT=Suppress("">"")): if isinstance(tagStr, str_type): resname = tagStr tagStr = Keyword(tagStr, caseless=not xml) else: resname = tagStr.name tagAttrName = Word(alphas, alphanums + ""_-:"") if xml: tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes) openTag = ( suppress_LT + tagStr(""tag"") + Dict(ZeroOrMore(Group(tagAttrName + Suppress(""="") + tagAttrValue))) + Opt(""/"", default=[False])(""empty"").set_parse_action( lambda s, l, t: t[0] == ""/"" ) + suppress_GT ) else: tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word( printables, exclude_chars="">"" ) openTag = ( suppress_LT + tagStr(""tag"") + Dict( ZeroOrMore( Group( tagAttrName.set_parse_action(lambda t: t[0].lower()) + Opt(Suppress(""="") + tagAttrValue) ) ) ) + Opt(""/"", default=[False])(""empty"").set_parse_action( lambda s, l, t: t[0] == ""/"" ) + suppress_GT ) closeTag = Combine(Literal("""", adjacent=False) openTag.set_name(""<%s>"" % resname) # add start results name in parse action now that ungrouped names are not reported at two levels openTag.add_parse_action( lambda t: t.__setitem__( ""start"" + """".join(resname.replace("":"", "" "").title().split()), t.copy() ) ) closeTag = closeTag( ""end"" + """".join(resname.replace("":"", "" "").title().split()) ).set_name("""" % resname) openTag.tag = resname closeTag.tag = resname openTag.tag_body = SkipTo(closeTag()) return openTag, closeTag ","Internal helper to construct opening and closing tag expressions, given a tag name",13,164,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _makeTags(tagStr, xml, suppress_LT=Suppress(""<""), suppress_GT=Suppress("">"")): if isinstance(tagStr, str_type): resname = tagStr tagStr = Keyword(tagStr, caseless=not xml) else: resname = tagStr.name tagAttrName = Word(alphas, alphanums + ""_-:"") if xml: tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes) openTag = ( suppress_LT + tagStr(""tag"") + Dict(ZeroOrMore(Group(tagAttrName + Suppress(""="") + tagAttrValue))) + Opt(""/"", default=[False])(""empty"").set_parse_action( lambda s, l, t: t[0] == ""/"" ) + suppress_GT ) else: tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word( printables, exclude_chars="">"" ) openTag = ( suppress_LT + tagStr(""tag"") + Dict( ZeroOrMore( Group( tagAttrName.set_parse_action(lambda t: t[0].lower()) + Opt(Suppress(""="") + tagAttrValue) ) ) ) + Opt(""/"", default=[False])(""empty"").set_parse_action( lambda s, l, t: t[0] == ""/"" ) + suppress_GT ) closeTag = Combine(Literal("""", adjacent=False) openTag.set_name(""<%s>"" % resname) # add start results name in parse action now that ungrouped names are not reported at two levels openTag.add_parse_action( lambda t: t.__setitem__( ""start"" + """".join(resname.replace("":"", "" "").title().split()), t.copy() ) ) closeTag = closeTag( ""end"" + """".join(resname.replace("":"", "" "").title().split()) ).set_name("""" % resname) openTag.tag = resname closeTag.tag = resname openTag.tag_body = SkipTo(closeTag()) return openTag, closeTag ``` ###Assistant : Internal helper to construct opening and closing tag expressions, given a tag name " 2043,"def load_data(label_mode=""fine""): if label_mode not in [""fine"", ""coarse""]: raise ValueError( '`label_mode` must be one of `""fine""`, `""coarse""`. ' f""Received: label_mode={label_mode}."" ) dirname = ""cifar-100-python"" origin = ""https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"" path = get_file( dirname, origin=origin, untar=True, file_hash=""85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7"", ) fpath = os.path.join(path, ""train"") x_train, y_train = load_batch(fpath, label_key=label_mode + ""_labels"") fpath = os.path.join(path, ""test"") x_test, y_test = load_batch(fpath, label_key=label_mode + ""_labels"") y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) if backend.image_data_format() == ""channels_last"": x_train = x_train.transpose(0, 2, 3, 1) x_test = x_test.transpose(0, 2, 3, 1) return (x_train, y_train), (x_test, y_test) ","Loads the CIFAR100 dataset. This is a dataset of 50,000 32x32 color training images and 10,000 test images, labeled over 100 fine-grained classes that are grouped into 20 coarse-grained classes. See more info at the [CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html). Args: label_mode: one of ""fine"", ""coarse"". If it is ""fine"" the category labels are the fine-grained labels, if it is ""coarse"" the output labels are the coarse-grained superclasses. Returns: Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`. **x_train**: uint8 NumPy array of grayscale image data with shapes `(50000, 32, 32, 3)`, containing the training data. Pixel values range from 0 to 255. **y_train**: uint8 NumPy array of labels (integers in range 0-99) with shape `(50000, 1)` for the training data. **x_test**: uint8 NumPy array of grayscale image data with shapes `(10000, 32, 32, 3)`, containing the test data. Pixel values range from 0 to 255. **y_test**: uint8 NumPy array of labels (integers in range 0-99) with shape `(10000, 1)` for the test data. Example: ```python (x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data() assert x_train.shape == (50000, 32, 32, 3) assert x_test.shape == (10000, 32, 32, 3) assert y_train.shape == (50000, 1) assert y_test.shape == (10000, 1) ``` ",193,88,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_data(label_mode=""fine""): if label_mode not in [""fine"", ""coarse""]: raise ValueError( '`label_mode` must be one of `""fine""`, `""coarse""`. ' f""Received: label_mode={label_mode}."" ) dirname = ""cifar-100-python"" origin = ""https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"" path = get_file( dirname, origin=origin, untar=True, file_hash=""85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7"", ) fpath = os.path.join(path, ""train"") x_train, y_train = load_batch(fpath, label_key=label_mode + ""_labels"") fpath = os.path.join(path, ""test"") x_test, y_test = load_batch(fpath, label_key=label_mode + ""_labels"") y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) if backend.image_data_format() == ""channels_last"": x_train = x_train.transpose(0, 2, 3, 1) x_test = x_test.transpose(0, 2, 3, 1) return (x_train, y_train), (x_test, y_test) ``` ###Assistant : Loads the CIFAR100 dataset. This is a dataset of 50,000 32x32 color training images and 10,000 test images, labeled over 100 fine-grained classes that are grouped into 20 coarse-grained classes. See more info at the [CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html). Args: label_mode: one of ""fine"", ""coarse"". If it is ""fine"" the category labels are the fine-grained labels, if it is ""coarse"" the output labels are the coarse-grained superclasses. Returns: Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`. **x_train**: uint8 NumPy array of grayscale image data with shapes `(50000, 32, 32, 3)`, containing the training data. Pixel values range from 0 to 255. **y_train**: uint8 NumPy array of labels (integers in range 0-99) with shape `(50000, 1)` for the training data. **x_test**: uint8 NumPy array of grayscale image data with shapes `(10000, 32, 32, 3)`, containing the test data. Pixel values range from 0 to 255. **y_test**: uint8 NumPy array of labels (integers in range 0-99) with shape `(10000, 1)` for the test data. Example: ```python (x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data() assert x_train.shape == (50000, 32, 32, 3) assert x_test.shape == (10000, 32, 32, 3) assert y_train.shape == (50000, 1) assert y_test.shape == (10000, 1) ``` " 2044,"def find_airflow_sources_root() -> Path: default_airflow_sources_root = Path.cwd() # Try to find airflow sources in current working dir airflow_sources_root = search_upwards_for_airflow_sources_root(Path.cwd()) if not airflow_sources_root: # Or if it fails, find it in parents of the directory where the ./breeze.py is. airflow_sources_root = search_upwards_for_airflow_sources_root(Path(__file__).resolve().parent) if airflow_sources_root: os.chdir(airflow_sources_root) return Path(airflow_sources_root) else: console.print( f""\n[bright_yellow]Could not find Airflow sources location. "" f""Assuming {default_airflow_sources_root}"" ) os.chdir(default_airflow_sources_root) return Path(default_airflow_sources_root) AIRFLOW_SOURCES_ROOT = find_airflow_sources_root() BUILD_CACHE_DIR = AIRFLOW_SOURCES_ROOT / '.build' FILES_DIR = AIRFLOW_SOURCES_ROOT / 'files' MSSQL_DATA_VOLUME = AIRFLOW_SOURCES_ROOT / 'tmp_mssql_volume' MYPY_CACHE_DIR = AIRFLOW_SOURCES_ROOT / '.mypy_cache' LOGS_DIR = AIRFLOW_SOURCES_ROOT / 'logs' DIST_DIR = AIRFLOW_SOURCES_ROOT / 'dist' SCRIPTS_CI_DIR = AIRFLOW_SOURCES_ROOT / 'scripts' / 'ci' DOCKER_CONTEXT_DIR = AIRFLOW_SOURCES_ROOT / 'docker-context-files' CACHE_TMP_FILE_DIR = tempfile.TemporaryDirectory() OUTPUT_LOG = Path(CACHE_TMP_FILE_DIR.name, 'out.log') "," Find the root of airflow sources. When Breeze is run from sources, it is easy, but this one also has to handle the case when Breeze is installed via `pipx` so it searches upwards of the current directory to find the right root of airflow directory. If not found, current directory is returned (this handles the case when Breeze is run from the local directory. :return: Path for the found sources. ",71,114,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_airflow_sources_root() -> Path: default_airflow_sources_root = Path.cwd() # Try to find airflow sources in current working dir airflow_sources_root = search_upwards_for_airflow_sources_root(Path.cwd()) if not airflow_sources_root: # Or if it fails, find it in parents of the directory where the ./breeze.py is. airflow_sources_root = search_upwards_for_airflow_sources_root(Path(__file__).resolve().parent) if airflow_sources_root: os.chdir(airflow_sources_root) return Path(airflow_sources_root) else: console.print( f""\n[bright_yellow]Could not find Airflow sources location. "" f""Assuming {default_airflow_sources_root}"" ) os.chdir(default_airflow_sources_root) return Path(default_airflow_sources_root) AIRFLOW_SOURCES_ROOT = find_airflow_sources_root() BUILD_CACHE_DIR = AIRFLOW_SOURCES_ROOT / '.build' FILES_DIR = AIRFLOW_SOURCES_ROOT / 'files' MSSQL_DATA_VOLUME = AIRFLOW_SOURCES_ROOT / 'tmp_mssql_volume' MYPY_CACHE_DIR = AIRFLOW_SOURCES_ROOT / '.mypy_cache' LOGS_DIR = AIRFLOW_SOURCES_ROOT / 'logs' DIST_DIR = AIRFLOW_SOURCES_ROOT / 'dist' SCRIPTS_CI_DIR = AIRFLOW_SOURCES_ROOT / 'scripts' / 'ci' DOCKER_CONTEXT_DIR = AIRFLOW_SOURCES_ROOT / 'docker-context-files' CACHE_TMP_FILE_DIR = tempfile.TemporaryDirectory() OUTPUT_LOG = Path(CACHE_TMP_FILE_DIR.name, 'out.log') ``` ###Assistant : Find the root of airflow sources. When Breeze is run from sources, it is easy, but this one also has to handle the case when Breeze is installed via `pipx` so it searches upwards of the current directory to find the right root of airflow directory. If not found, current directory is returned (this handles the case when Breeze is run from the local directory. :return: Path for the found sources. " 2045,"def _parse_proxy_entry(proxy_str): config = [c.strip() for c in proxy_str.split(' ') if c] if not config: raise ParseProxyError(""Empty proxy entry"") if config[0] == ""DIRECT"": if len(config) != 1: raise ParseProxyError(""Invalid number of parameters for "" + ""DIRECT"") return QNetworkProxy(QNetworkProxy.ProxyType.NoProxy) elif config[0] == ""PROXY"": if len(config) != 2: raise ParseProxyError(""Invalid number of parameters for PROXY"") host, port = PACResolver._parse_proxy_host(config[1]) return QNetworkProxy(QNetworkProxy.ProxyType.HttpProxy, host, port) elif config[0] in [""SOCKS"", ""SOCKS5""]: if len(config) != 2: raise ParseProxyError(""Invalid number of parameters for SOCKS"") host, port = PACResolver._parse_proxy_host(config[1]) return QNetworkProxy(QNetworkProxy.ProxyType.Socks5Proxy, host, port) else: err = ""Unknown proxy type: {}"" raise ParseProxyError(err.format(config[0])) ","Parse one proxy string entry, as described in PAC specification.",10,94,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _parse_proxy_entry(proxy_str): config = [c.strip() for c in proxy_str.split(' ') if c] if not config: raise ParseProxyError(""Empty proxy entry"") if config[0] == ""DIRECT"": if len(config) != 1: raise ParseProxyError(""Invalid number of parameters for "" + ""DIRECT"") return QNetworkProxy(QNetworkProxy.ProxyType.NoProxy) elif config[0] == ""PROXY"": if len(config) != 2: raise ParseProxyError(""Invalid number of parameters for PROXY"") host, port = PACResolver._parse_proxy_host(config[1]) return QNetworkProxy(QNetworkProxy.ProxyType.HttpProxy, host, port) elif config[0] in [""SOCKS"", ""SOCKS5""]: if len(config) != 2: raise ParseProxyError(""Invalid number of parameters for SOCKS"") host, port = PACResolver._parse_proxy_host(config[1]) return QNetworkProxy(QNetworkProxy.ProxyType.Socks5Proxy, host, port) else: err = ""Unknown proxy type: {}"" raise ParseProxyError(err.format(config[0])) ``` ###Assistant : Parse one proxy string entry, as described in PAC specification. " 2046,"def test_gen_pyf(capfd, hello_world_f90, monkeypatch): ipath = Path(hello_world_f90) opath = Path(hello_world_f90).stem + "".pyf"" monkeypatch.setattr(sys, ""argv"", f'f2py -h {opath} {ipath}'.split()) with util.switchdir(ipath.parent): f2pycli() # Generate wrappers out, _ = capfd.readouterr() assert ""Saving signatures to file"" in out assert Path(f'{opath}').exists() ","Ensures that a signature file is generated via the CLI CLI :: -h ",13,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_gen_pyf(capfd, hello_world_f90, monkeypatch): ipath = Path(hello_world_f90) opath = Path(hello_world_f90).stem + "".pyf"" monkeypatch.setattr(sys, ""argv"", f'f2py -h {opath} {ipath}'.split()) with util.switchdir(ipath.parent): f2pycli() # Generate wrappers out, _ = capfd.readouterr() assert ""Saving signatures to file"" in out assert Path(f'{opath}').exists() ``` ###Assistant : Ensures that a signature file is generated via the CLI CLI :: -h " 2047,"def _dictionary(self): # type: () -> Dict[str, Any] # NOTE: Dictionaries are not populated if not loaded. So, conditionals # are not needed here. retval = {} for variant in OVERRIDE_ORDER: retval.update(self._config[variant]) return retval ","A dictionary representing the loaded configuration. ",6,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _dictionary(self): # type: () -> Dict[str, Any] # NOTE: Dictionaries are not populated if not loaded. So, conditionals # are not needed here. retval = {} for variant in OVERRIDE_ORDER: retval.update(self._config[variant]) return retval ``` ###Assistant : A dictionary representing the loaded configuration. " 2048,"def using(self, alias): return RawQuerySet( self.raw_query, model=self.model, query=self.query.chain(using=alias), params=self.params, translations=self.translations, using=alias, ) ",Select the database this RawQuerySet should execute against.,8,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def using(self, alias): return RawQuerySet( self.raw_query, model=self.model, query=self.query.chain(using=alias), params=self.params, translations=self.translations, using=alias, ) ``` ###Assistant : Select the database this RawQuerySet should execute against. " 2049,"def get_weights(self): params = self.weights return backend.batch_get_value(params) # TODO(tanzheny): Maybe share this logic with base_layer.","Returns the current weights of the optimizer. The weights of an optimizer are its state (ie, variables). This function returns the weight values associated with this optimizer as a list of Numpy arrays. The first value is always the iterations count of the optimizer, followed by the optimizer's state variables in the order they were created. The returned list can in turn be used to load state into similarly parameterized optimizers. For example, the RMSprop optimizer for this simple model returns a list of three values-- the iteration count, followed by the root-mean-square value of the kernel and bias of the single Dense layer: >>> opt = tf.keras.optimizers.RMSprop() >>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)]) >>> m.compile(opt, loss='mse') >>> data = np.arange(100).reshape(5, 20) >>> labels = np.zeros(5) >>> results = m.fit(data, labels) # Training. >>> len(opt.get_weights()) 3 Returns: Weights values as a list of numpy arrays. ",143,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_weights(self): params = self.weights return backend.batch_get_value(params) # TODO(tanzheny): Maybe share this logic with base_layer. ``` ###Assistant : Returns the current weights of the optimizer. The weights of an optimizer are its state (ie, variables). This function returns the weight values associated with this optimizer as a list of Numpy arrays. The first value is always the iterations count of the optimizer, followed by the optimizer's state variables in the order they were created. The returned list can in turn be used to load state into similarly parameterized optimizers. For example, the RMSprop optimizer for this simple model returns a list of three values-- the iteration count, followed by the root-mean-square value of the kernel and bias of the single Dense layer: >>> opt = tf.keras.optimizers.RMSprop() >>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)]) >>> m.compile(opt, loss='mse') >>> data = np.arange(100).reshape(5, 20) >>> labels = np.zeros(5) >>> results = m.fit(data, labels) # Training. >>> len(opt.get_weights()) 3 Returns: Weights values as a list of numpy arrays. " 2050,"def _focal_loss_cost(self, cls_pred, gt_labels): cls_pred = cls_pred.sigmoid() neg_cost = -(1 - cls_pred + self.eps).log() * ( 1 - self.alpha) * cls_pred.pow(self.gamma) pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( 1 - cls_pred).pow(self.gamma) cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels] return cls_cost * self.weight "," Args: cls_pred (Tensor): Predicted classification logits, shape (num_query, num_class). gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). Returns: torch.Tensor: cls_cost value with weight ",22,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _focal_loss_cost(self, cls_pred, gt_labels): cls_pred = cls_pred.sigmoid() neg_cost = -(1 - cls_pred + self.eps).log() * ( 1 - self.alpha) * cls_pred.pow(self.gamma) pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( 1 - cls_pred).pow(self.gamma) cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels] return cls_cost * self.weight ``` ###Assistant : Args: cls_pred (Tensor): Predicted classification logits, shape (num_query, num_class). gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). Returns: torch.Tensor: cls_cost value with weight " 2051,"def date_extract_sql(self, lookup_type, field_name): raise NotImplementedError( ""subclasses of BaseDatabaseOperations may require a date_extract_sql() method"" ) "," Given a lookup_type of 'year', 'month', or 'day', return the SQL that extracts a value from the given date field field_name. ",21,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def date_extract_sql(self, lookup_type, field_name): raise NotImplementedError( ""subclasses of BaseDatabaseOperations may require a date_extract_sql() method"" ) ``` ###Assistant : Given a lookup_type of 'year', 'month', or 'day', return the SQL that extracts a value from the given date field field_name. " 2052,"def test_threepid_invite_spamcheck(self) -> None: # Mock a few functions to prevent the test from failing due to failing to talk to # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. make_invite_mock = Mock(return_value=make_awaitable(0)) self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock self.hs.get_identity_handler().lookup_3pid = Mock( return_value=make_awaitable(None), ) # Add a mock to the spamchecker callbacks for user_may_send_3pid_invite. Make it # allow everything for now. # `spec` argument is needed for this function mock to have `__qualname__`, which # is needed for `Measure` metrics buried in SpamChecker. mock = Mock( return_value=make_awaitable(synapse.module_api.NOT_SPAM), spec=lambda *x: None, ) self.hs.get_spam_checker()._user_may_send_3pid_invite_callbacks.append(mock) # Send a 3PID invite into the room and check that it succeeded. email_to_invite = ""teresa@example.com"" channel = self.make_request( method=""POST"", path=""/rooms/"" + self.room_id + ""/invite"", content={ ""id_server"": ""example.com"", ""id_access_token"": ""sometoken"", ""medium"": ""email"", ""address"": email_to_invite, }, access_token=self.tok, ) self.assertEqual(channel.code, 200) # Check that the callback was called with the right params. mock.assert_called_with(self.user_id, ""email"", email_to_invite, self.room_id) # Check that the call to send the invite was made. make_invite_mock.assert_called_once() # Now change the return value of the callback to deny any invite and test that # we can't send the invite. mock.return_value = make_awaitable(Codes.CONSENT_NOT_GIVEN) channel = self.make_request( method=""POST"", path=""/rooms/"" + self.room_id + ""/invite"", content={ ""id_server"": ""example.com"", ""id_access_token"": ""sometoken"", ""medium"": ""email"", ""address"": email_to_invite, }, access_token=self.tok, ) self.assertEqual(channel.code, 403) # Also check that it stopped before calling _make_and_store_3pid_invite. make_invite_mock.assert_called_once() "," Test allowing/blocking threepid invites with a spam-check module. In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal[""NOT_SPAM""]]`.",24,227,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_threepid_invite_spamcheck(self) -> None: # Mock a few functions to prevent the test from failing due to failing to talk to # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. make_invite_mock = Mock(return_value=make_awaitable(0)) self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock self.hs.get_identity_handler().lookup_3pid = Mock( return_value=make_awaitable(None), ) # Add a mock to the spamchecker callbacks for user_may_send_3pid_invite. Make it # allow everything for now. # `spec` argument is needed for this function mock to have `__qualname__`, which # is needed for `Measure` metrics buried in SpamChecker. mock = Mock( return_value=make_awaitable(synapse.module_api.NOT_SPAM), spec=lambda *x: None, ) self.hs.get_spam_checker()._user_may_send_3pid_invite_callbacks.append(mock) # Send a 3PID invite into the room and check that it succeeded. email_to_invite = ""teresa@example.com"" channel = self.make_request( method=""POST"", path=""/rooms/"" + self.room_id + ""/invite"", content={ ""id_server"": ""example.com"", ""id_access_token"": ""sometoken"", ""medium"": ""email"", ""address"": email_to_invite, }, access_token=self.tok, ) self.assertEqual(channel.code, 200) # Check that the callback was called with the right params. mock.assert_called_with(self.user_id, ""email"", email_to_invite, self.room_id) # Check that the call to send the invite was made. make_invite_mock.assert_called_once() # Now change the return value of the callback to deny any invite and test that # we can't send the invite. mock.return_value = make_awaitable(Codes.CONSENT_NOT_GIVEN) channel = self.make_request( method=""POST"", path=""/rooms/"" + self.room_id + ""/invite"", content={ ""id_server"": ""example.com"", ""id_access_token"": ""sometoken"", ""medium"": ""email"", ""address"": email_to_invite, }, access_token=self.tok, ) self.assertEqual(channel.code, 403) # Also check that it stopped before calling _make_and_store_3pid_invite. make_invite_mock.assert_called_once() ``` ###Assistant : Test allowing/blocking threepid invites with a spam-check module. In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal[""NOT_SPAM""]]`. " 2053,"def test_generate_invalid_param_val_all_valid(constraints): with pytest.raises(NotImplementedError): generate_invalid_param_val(constraints[0], constraints=constraints) @pytest.mark.parametrize( ""constraint"", [ _ArrayLikes, _Callables, _InstancesOf, _NoneConstraint, _RandomStates, _SparseMatrices, ], )","Check that the function raises NotImplementedError when there's no invalid value for the constraint. ",14,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_generate_invalid_param_val_all_valid(constraints): with pytest.raises(NotImplementedError): generate_invalid_param_val(constraints[0], constraints=constraints) @pytest.mark.parametrize( ""constraint"", [ _ArrayLikes, _Callables, _InstancesOf, _NoneConstraint, _RandomStates, _SparseMatrices, ], ) ``` ###Assistant : Check that the function raises NotImplementedError when there's no invalid value for the constraint. " 2054,"def load_drawer_from_disk(self): exists = Path(self.full_path / str('pair_dictionary.json')).resolve().exists() if exists: with open(self.full_path / str('pair_dictionary.json'), ""r"") as fp: self.pair_dict = json.load(fp) elif not self.follow_mode: logger.info(""Could not find existing datadrawer, starting from scratch"") else: logger.warning(f'Follower could not find pair_dictionary at {self.full_path} ' 'sending null values back to strategy') return exists "," Locate and load a previously saved data drawer full of all pair model metadata in present model folder. :returns: exists: bool = whether or not the drawer was located ",29,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_drawer_from_disk(self): exists = Path(self.full_path / str('pair_dictionary.json')).resolve().exists() if exists: with open(self.full_path / str('pair_dictionary.json'), ""r"") as fp: self.pair_dict = json.load(fp) elif not self.follow_mode: logger.info(""Could not find existing datadrawer, starting from scratch"") else: logger.warning(f'Follower could not find pair_dictionary at {self.full_path} ' 'sending null values back to strategy') return exists ``` ###Assistant : Locate and load a previously saved data drawer full of all pair model metadata in present model folder. :returns: exists: bool = whether or not the drawer was located " 2055,"def address_exclude(self, other): if not self._version == other._version: raise TypeError(""%s and %s are not of the same version"" % ( self, other)) if not isinstance(other, _BaseNetwork): raise TypeError(""%s is not a network object"" % other) if not other.subnet_of(self): raise ValueError('%s not contained in %s' % (other, self)) if other == self: return # Make sure we're comparing the network of other. other = other.__class__('%s/%s' % (other.network_address, other.prefixlen)) s1, s2 = self.subnets() while s1 != other and s2 != other: if other.subnet_of(s1): yield s2 s1, s2 = s1.subnets() elif other.subnet_of(s2): yield s1 s1, s2 = s2.subnets() else: # If we got here, there's a bug somewhere. raise AssertionError('Error performing exclusion: ' 's1: %s s2: %s other: %s' % (s1, s2, other)) if s1 == other: yield s2 elif s2 == other: yield s1 else: # If we got here, there's a bug somewhere. raise AssertionError('Error performing exclusion: ' 's1: %s s2: %s other: %s' % (s1, s2, other)) ","Remove an address from a larger block. For example: addr1 = ip_network('192.0.2.0/28') addr2 = ip_network('192.0.2.1/32') list(addr1.address_exclude(addr2)) = [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'), IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')] or IPv6: addr1 = ip_network('2001:db8::1/32') addr2 = ip_network('2001:db8::1/128') list(addr1.address_exclude(addr2)) = [ip_network('2001:db8::1/128'), ip_network('2001:db8::2/127'), ip_network('2001:db8::4/126'), ip_network('2001:db8::8/125'), ... ip_network('2001:db8:8000::/33')] Args: other: An IPv4Network or IPv6Network object of the same type. Returns: An iterator of the IPv(4|6)Network objects which is self minus other. Raises: TypeError: If self and other are of differing address versions, or if other is not a network object. ValueError: If other is not completely contained by self. ",88,157,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def address_exclude(self, other): if not self._version == other._version: raise TypeError(""%s and %s are not of the same version"" % ( self, other)) if not isinstance(other, _BaseNetwork): raise TypeError(""%s is not a network object"" % other) if not other.subnet_of(self): raise ValueError('%s not contained in %s' % (other, self)) if other == self: return # Make sure we're comparing the network of other. other = other.__class__('%s/%s' % (other.network_address, other.prefixlen)) s1, s2 = self.subnets() while s1 != other and s2 != other: if other.subnet_of(s1): yield s2 s1, s2 = s1.subnets() elif other.subnet_of(s2): yield s1 s1, s2 = s2.subnets() else: # If we got here, there's a bug somewhere. raise AssertionError('Error performing exclusion: ' 's1: %s s2: %s other: %s' % (s1, s2, other)) if s1 == other: yield s2 elif s2 == other: yield s1 else: # If we got here, there's a bug somewhere. raise AssertionError('Error performing exclusion: ' 's1: %s s2: %s other: %s' % (s1, s2, other)) ``` ###Assistant : Remove an address from a larger block. For example: addr1 = ip_network('192.0.2.0/28') addr2 = ip_network('192.0.2.1/32') list(addr1.address_exclude(addr2)) = [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'), IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')] or IPv6: addr1 = ip_network('2001:db8::1/32') addr2 = ip_network('2001:db8::1/128') list(addr1.address_exclude(addr2)) = [ip_network('2001:db8::1/128'), ip_network('2001:db8::2/127'), ip_network('2001:db8::4/126'), ip_network('2001:db8::8/125'), ... ip_network('2001:db8:8000::/33')] Args: other: An IPv4Network or IPv6Network object of the same type. Returns: An iterator of the IPv(4|6)Network objects which is self minus other. Raises: TypeError: If self and other are of differing address versions, or if other is not a network object. ValueError: If other is not completely contained by self. " 2056,"def mutual_information(cooccurrence): with np.errstate(invalid=""ignore"", divide=""ignore""): result = np.log2(cooccurrence.shape[0] * lift(cooccurrence)) return np.array(result) ","Helper method to calculate the Mutual Information of a matrix of co-occurrences. Mutual information is a measurement of the amount of information explained by the i-th j-th item column vector. Args: cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences of items. Returns: numpy.ndarray: The matrix of mutual information between any two items. ",51,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mutual_information(cooccurrence): with np.errstate(invalid=""ignore"", divide=""ignore""): result = np.log2(cooccurrence.shape[0] * lift(cooccurrence)) return np.array(result) ``` ###Assistant : Helper method to calculate the Mutual Information of a matrix of co-occurrences. Mutual information is a measurement of the amount of information explained by the i-th j-th item column vector. Args: cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences of items. Returns: numpy.ndarray: The matrix of mutual information between any two items. " 2057,"def _is_dunder(name): return len(name) > 4 and name.startswith('__') and name.endswith('__') # Prior to Python 3.7 types did not have `copy_with`. A lot of the equality # checks, argument expansion etc. are done on the _subs_tre. As a result we # can't provide a get_type_hints function that strips out annotations. ",Returns True if name is a __dunder_variable_name__.,7,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _is_dunder(name): return len(name) > 4 and name.startswith('__') and name.endswith('__') # Prior to Python 3.7 types did not have `copy_with`. A lot of the equality # checks, argument expansion etc. are done on the _subs_tre. As a result we # can't provide a get_type_hints function that strips out annotations. ``` ###Assistant : Returns True if name is a __dunder_variable_name__. " 2058,"def fit(self, X, y=None, sample_weight=None): algorithm = self._choose_algorithm(self.algorithm, self.metric) if isinstance(self.bandwidth, str): methods_supported = (""scott"", ""silvermann"") if self.bandwidth not in methods_supported: raise ValueError( ""When `bandwidth` is a string, it should be one of: "" f""{', '.join(methods_supported)}. Got {self.bandwidth!r} instead."" ) if self.bandwidth == ""scott"": self.bandwidth_ = X.shape[0] ** (-1 / (X.shape[1] + 4)) elif self.bandwidth == ""silvermann"": self.bandwidth_ = (X.shape[0] * (X.shape[1] + 2) / 4) ** ( -1 / (X.shape[1] + 4) ) else: check_scalar( self.bandwidth, ""bandwidth"", target_type=numbers.Real, min_val=0, include_boundaries=""neither"", ) self.bandwidth_ = self.bandwidth if self.kernel not in VALID_KERNELS: raise ValueError(""invalid kernel: '{0}'"".format(self.kernel)) X = self._validate_data(X, order=""C"", dtype=DTYPE) if sample_weight is not None: sample_weight = _check_sample_weight( sample_weight, X, DTYPE, only_non_negative=True ) kwargs = self.metric_params if kwargs is None: kwargs = {} self.tree_ = TREE_DICT[algorithm]( X, metric=self.metric, leaf_size=self.leaf_size, sample_weight=sample_weight, **kwargs, ) return self ","Fit the Kernel Density model on the data. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. y : None Ignored. This parameter exists only for compatibility with :class:`~sklearn.pipeline.Pipeline`. sample_weight : array-like of shape (n_samples,), default=None List of sample weights attached to the data X. .. versionadded:: 0.20 Returns ------- self : object Returns the instance itself. ",70,133,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fit(self, X, y=None, sample_weight=None): algorithm = self._choose_algorithm(self.algorithm, self.metric) if isinstance(self.bandwidth, str): methods_supported = (""scott"", ""silvermann"") if self.bandwidth not in methods_supported: raise ValueError( ""When `bandwidth` is a string, it should be one of: "" f""{', '.join(methods_supported)}. Got {self.bandwidth!r} instead."" ) if self.bandwidth == ""scott"": self.bandwidth_ = X.shape[0] ** (-1 / (X.shape[1] + 4)) elif self.bandwidth == ""silvermann"": self.bandwidth_ = (X.shape[0] * (X.shape[1] + 2) / 4) ** ( -1 / (X.shape[1] + 4) ) else: check_scalar( self.bandwidth, ""bandwidth"", target_type=numbers.Real, min_val=0, include_boundaries=""neither"", ) self.bandwidth_ = self.bandwidth if self.kernel not in VALID_KERNELS: raise ValueError(""invalid kernel: '{0}'"".format(self.kernel)) X = self._validate_data(X, order=""C"", dtype=DTYPE) if sample_weight is not None: sample_weight = _check_sample_weight( sample_weight, X, DTYPE, only_non_negative=True ) kwargs = self.metric_params if kwargs is None: kwargs = {} self.tree_ = TREE_DICT[algorithm]( X, metric=self.metric, leaf_size=self.leaf_size, sample_weight=sample_weight, **kwargs, ) return self ``` ###Assistant : Fit the Kernel Density model on the data. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. y : None Ignored. This parameter exists only for compatibility with :class:`~sklearn.pipeline.Pipeline`. sample_weight : array-like of shape (n_samples,), default=None List of sample weights attached to the data X. .. versionadded:: 0.20 Returns ------- self : object Returns the instance itself. " 2059,"def get_on_pixels(self, image): if image.mode != ""L"": msg = ""Image mode must be L"" raise ValueError(msg) return _imagingmorph.get_on_pixels(image.im.id) ","Get a list of all turned on pixels in a binary image Returns a list of tuples of (x,y) coordinates of all matching pixels. See :ref:`coordinate-system`.",26,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_on_pixels(self, image): if image.mode != ""L"": msg = ""Image mode must be L"" raise ValueError(msg) return _imagingmorph.get_on_pixels(image.im.id) ``` ###Assistant : Get a list of all turned on pixels in a binary image Returns a list of tuples of (x,y) coordinates of all matching pixels. See :ref:`coordinate-system`. " 2060,"def visit_Num(self, node): if isinstance(node.n, int): return fix_missing_locations(Call(func=Name('Integer', Load()), args=[node], keywords=[])) elif isinstance(node.n, float): return fix_missing_locations(Call(func=Name('Float', Load()), args=[node], keywords=[])) return node ","This function exists for backwards compatibility with Python 3.7. It should be removed when SymPy removes support for Python 3.7.",20,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def visit_Num(self, node): if isinstance(node.n, int): return fix_missing_locations(Call(func=Name('Integer', Load()), args=[node], keywords=[])) elif isinstance(node.n, float): return fix_missing_locations(Call(func=Name('Float', Load()), args=[node], keywords=[])) return node ``` ###Assistant : This function exists for backwards compatibility with Python 3.7. It should be removed when SymPy removes support for Python 3.7. " 2061,"def enable(display=1, logdir=None, context=5, format=""html""): sys.excepthook = Hook(display=display, logdir=logdir, context=context, format=format) ","Install an exception handler that formats tracebacks as HTML. The optional argument 'display' can be set to 0 to suppress sending the traceback to the browser, and 'logdir' can be set to a directory to cause tracebacks to be written to files there.",43,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def enable(display=1, logdir=None, context=5, format=""html""): sys.excepthook = Hook(display=display, logdir=logdir, context=context, format=format) ``` ###Assistant : Install an exception handler that formats tracebacks as HTML. The optional argument 'display' can be set to 0 to suppress sending the traceback to the browser, and 'logdir' can be set to a directory to cause tracebacks to be written to files there. " 2062,"def parse_list_header(value): result = [] for item in _parse_list_header(value): if item[:1] == item[-1:] == '""': item = unquote_header_value(item[1:-1]) result.append(item) return result # From mitsuhiko/werkzeug (used with permission).","Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. It basically works like :func:`parse_set_header` just that items may appear multiple times and case sensitivity is preserved. The return value is a standard :class:`list`: >>> parse_list_header('token, ""quoted value""') ['token', 'quoted value'] To create a header from the :class:`list` again, use the :func:`dump_header` function. :param value: a string with a list header. :return: :class:`list` :rtype: list ",99,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parse_list_header(value): result = [] for item in _parse_list_header(value): if item[:1] == item[-1:] == '""': item = unquote_header_value(item[1:-1]) result.append(item) return result # From mitsuhiko/werkzeug (used with permission). ``` ###Assistant : Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. It basically works like :func:`parse_set_header` just that items may appear multiple times and case sensitivity is preserved. The return value is a standard :class:`list`: >>> parse_list_header('token, ""quoted value""') ['token', 'quoted value'] To create a header from the :class:`list` again, use the :func:`dump_header` function. :param value: a string with a list header. :return: :class:`list` :rtype: list " 2063,"def media_series_title(self) -> str | None: if self._playing and self._is_feature_available(FeatureName.SeriesName): return self._playing.series_name return None ","Title of series of current playing media, TV show only.",10,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def media_series_title(self) -> str | None: if self._playing and self._is_feature_available(FeatureName.SeriesName): return self._playing.series_name return None ``` ###Assistant : Title of series of current playing media, TV show only. " 2064,"def _build_template(name, template, files, config, nav): # Run `pre_template` plugin events. template = config['plugins'].run_event( 'pre_template', template, template_name=name, config=config ) if utils.is_error_template(name): # Force absolute URLs in the nav of error pages and account for the # possibility that the docs root might be different than the server root. # See https://github.com/mkdocs/mkdocs/issues/77. # However, if site_url is not set, assume the docs root and server root # are the same. See https://github.com/mkdocs/mkdocs/issues/1598. base_url = urlsplit(config['site_url'] or '/').path else: base_url = utils.get_relative_url('.', name) context = get_context(nav, files, config, base_url=base_url) # Run `template_context` plugin events. context = config['plugins'].run_event( 'template_context', context, template_name=name, config=config ) output = template.render(context) # Run `post_template` plugin events. output = config['plugins'].run_event('post_template', output, template_name=name, config=config) return output "," Return rendered output for given template as a string. ",9,116,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _build_template(name, template, files, config, nav): # Run `pre_template` plugin events. template = config['plugins'].run_event( 'pre_template', template, template_name=name, config=config ) if utils.is_error_template(name): # Force absolute URLs in the nav of error pages and account for the # possibility that the docs root might be different than the server root. # See https://github.com/mkdocs/mkdocs/issues/77. # However, if site_url is not set, assume the docs root and server root # are the same. See https://github.com/mkdocs/mkdocs/issues/1598. base_url = urlsplit(config['site_url'] or '/').path else: base_url = utils.get_relative_url('.', name) context = get_context(nav, files, config, base_url=base_url) # Run `template_context` plugin events. context = config['plugins'].run_event( 'template_context', context, template_name=name, config=config ) output = template.render(context) # Run `post_template` plugin events. output = config['plugins'].run_event('post_template', output, template_name=name, config=config) return output ``` ###Assistant : Return rendered output for given template as a string. " 2065,"def matches(self, expr, repl_dict=None, old=False): expr = sympify(expr) if not isinstance(expr, self.__class__): return None if repl_dict is None: repl_dict = {} else: repl_dict = repl_dict.copy() if self == expr: return repl_dict if len(self.args) != len(expr.args): return None d = repl_dict # already a copy for arg, other_arg in zip(self.args, expr.args): if arg == other_arg: continue if arg.is_Relational: try: d = arg.xreplace(d).matches(other_arg, d, old=old) except TypeError: # Should be InvalidComparisonError when introduced d = None else: d = arg.xreplace(d).matches(other_arg, d, old=old) if d is None: return None return d "," Helper method for match() that looks for a match between Wild symbols in self and expressions in expr. Examples ======== >>> from sympy import symbols, Wild, Basic >>> a, b, c = symbols('a b c') >>> x = Wild('x') >>> Basic(a + x, x).matches(Basic(a + b, c)) is None True >>> Basic(a + x, x).matches(Basic(a + b + c, b + c)) {x_: b + c} ",66,88,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def matches(self, expr, repl_dict=None, old=False): expr = sympify(expr) if not isinstance(expr, self.__class__): return None if repl_dict is None: repl_dict = {} else: repl_dict = repl_dict.copy() if self == expr: return repl_dict if len(self.args) != len(expr.args): return None d = repl_dict # already a copy for arg, other_arg in zip(self.args, expr.args): if arg == other_arg: continue if arg.is_Relational: try: d = arg.xreplace(d).matches(other_arg, d, old=old) except TypeError: # Should be InvalidComparisonError when introduced d = None else: d = arg.xreplace(d).matches(other_arg, d, old=old) if d is None: return None return d ``` ###Assistant : Helper method for match() that looks for a match between Wild symbols in self and expressions in expr. Examples ======== >>> from sympy import symbols, Wild, Basic >>> a, b, c = symbols('a b c') >>> x = Wild('x') >>> Basic(a + x, x).matches(Basic(a + b, c)) is None True >>> Basic(a + x, x).matches(Basic(a + b + c, b + c)) {x_: b + c} " 2066,"def _create_placement_group(self, num_workers): pg = get_current_placement_group() if pg is None: bundle = {""CPU"": self._num_cpus_per_worker, ""GPU"": int(self._use_gpu)} bundles = [bundle] * num_workers pg = ray.util.placement_group(bundles, strategy=""SPREAD"") logger.debug(""Waiting for placement group to start."") ready, _ = ray.wait([pg.ready()], timeout=SGD_PLACEMENT_GROUP_TIMEOUT_S) if ready: logger.debug(""Placement group has started."") else: raise TimeoutError( ""Placement group creation timed out. Make sure "" ""your cluster either has enough resources or use "" ""an autoscaling cluster. Current resources "" ""available: {}, resources requested by the "" ""placement group: {}"".format( ray.available_resources(), pg.bundle_specs ) ) self._worker_placement_group = pg ","Creates a placement group for the workers. If this worker is already in a placement group then a new one will not be created. This is primarily for when Tune is the upstream and will allocate resources for SGD workers. If this worker is not in a placement group, a new one will be created and set. The placement group will have a single bundle for each worker and use the SPREAD strategy for an even distribution. ",77,85,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_placement_group(self, num_workers): pg = get_current_placement_group() if pg is None: bundle = {""CPU"": self._num_cpus_per_worker, ""GPU"": int(self._use_gpu)} bundles = [bundle] * num_workers pg = ray.util.placement_group(bundles, strategy=""SPREAD"") logger.debug(""Waiting for placement group to start."") ready, _ = ray.wait([pg.ready()], timeout=SGD_PLACEMENT_GROUP_TIMEOUT_S) if ready: logger.debug(""Placement group has started."") else: raise TimeoutError( ""Placement group creation timed out. Make sure "" ""your cluster either has enough resources or use "" ""an autoscaling cluster. Current resources "" ""available: {}, resources requested by the "" ""placement group: {}"".format( ray.available_resources(), pg.bundle_specs ) ) self._worker_placement_group = pg ``` ###Assistant : Creates a placement group for the workers. If this worker is already in a placement group then a new one will not be created. This is primarily for when Tune is the upstream and will allocate resources for SGD workers. If this worker is not in a placement group, a new one will be created and set. The placement group will have a single bundle for each worker and use the SPREAD strategy for an even distribution. " 2067,"def get_warehouse_list(filters): from frappe.core.doctype.user_permission.user_permission import get_permitted_documents condition = """" user_permitted_warehouse = get_permitted_documents(""Warehouse"") value = () if user_permitted_warehouse: condition = ""and name in %s"" value = set(user_permitted_warehouse) elif not user_permitted_warehouse and filters.get(""warehouse""): condition = ""and name = %s"" value = filters.get(""warehouse"") return frappe.db.sql( .format( condition=condition ), value, as_dict=1, ) ","select name from `tabWarehouse` where is_group = 0 {condition}",9,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_warehouse_list(filters): from frappe.core.doctype.user_permission.user_permission import get_permitted_documents condition = """" user_permitted_warehouse = get_permitted_documents(""Warehouse"") value = () if user_permitted_warehouse: condition = ""and name in %s"" value = set(user_permitted_warehouse) elif not user_permitted_warehouse and filters.get(""warehouse""): condition = ""and name = %s"" value = filters.get(""warehouse"") return frappe.db.sql( .format( condition=condition ), value, as_dict=1, ) ``` ###Assistant : select name from `tabWarehouse` where is_group = 0 {condition} " 2068,"def unmap(self) -> ""BaseOperator"": dag = self.dag if not dag: raise RuntimeError(""Cannot unmap a task without a DAG"") dag._remove_task(self.task_id) if isinstance(self.operator_class, str): raise RuntimeError(""Cannot unmap a deserialized operator"") return self.operator_class(**self._get_unmap_kwargs()) ","Get the ""normal"" Operator after applying the current mapping.",9,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def unmap(self) -> ""BaseOperator"": dag = self.dag if not dag: raise RuntimeError(""Cannot unmap a task without a DAG"") dag._remove_task(self.task_id) if isinstance(self.operator_class, str): raise RuntimeError(""Cannot unmap a deserialized operator"") return self.operator_class(**self._get_unmap_kwargs()) ``` ###Assistant : Get the ""normal"" Operator after applying the current mapping. " 2069,"def require_tensorflow(test_case): if not is_tensorflow_available(): return unittest.skip(""test requires TensorFlow"")(test_case) else: return test_case "," Decorator marking a test that requires TensorFlow installed. These tests are skipped when TensorFlow isn't installed ",16,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def require_tensorflow(test_case): if not is_tensorflow_available(): return unittest.skip(""test requires TensorFlow"")(test_case) else: return test_case ``` ###Assistant : Decorator marking a test that requires TensorFlow installed. These tests are skipped when TensorFlow isn't installed " 2070,"def as_completed(fs, timeout=None): if timeout is not None: end_time = timeout + time.monotonic() fs = set(fs) total_futures = len(fs) with _AcquireFutures(fs): finished = set( f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) pending = fs - finished waiter = _create_and_install_waiters(fs, _AS_COMPLETED) finished = list(finished) try: yield from _yield_finished_futures(finished, waiter, ref_collect=(fs,)) while pending: if timeout is None: wait_timeout = None else: wait_timeout = end_time - time.monotonic() if wait_timeout < 0: raise TimeoutError( '%d (of %d) futures unfinished' % ( len(pending), total_futures)) waiter.event.wait(wait_timeout) with waiter.lock: finished = waiter.finished_futures waiter.finished_futures = [] waiter.event.clear() # reverse to keep finishing order finished.reverse() yield from _yield_finished_futures(finished, waiter, ref_collect=(fs, pending)) finally: # Remove waiter from unfinished futures for f in fs: with f._condition: f._waiters.remove(waiter) DoneAndNotDoneFutures = collections.namedtuple( 'DoneAndNotDoneFutures', 'done not_done')","An iterator over the given futures that yields each as it completes. Args: fs: The sequence of Futures (possibly created by different Executors) to iterate over. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. Returns: An iterator that yields the given Futures as they complete (finished or cancelled). If any given Futures are duplicated, they will be returned once. Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. ",85,125,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def as_completed(fs, timeout=None): if timeout is not None: end_time = timeout + time.monotonic() fs = set(fs) total_futures = len(fs) with _AcquireFutures(fs): finished = set( f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) pending = fs - finished waiter = _create_and_install_waiters(fs, _AS_COMPLETED) finished = list(finished) try: yield from _yield_finished_futures(finished, waiter, ref_collect=(fs,)) while pending: if timeout is None: wait_timeout = None else: wait_timeout = end_time - time.monotonic() if wait_timeout < 0: raise TimeoutError( '%d (of %d) futures unfinished' % ( len(pending), total_futures)) waiter.event.wait(wait_timeout) with waiter.lock: finished = waiter.finished_futures waiter.finished_futures = [] waiter.event.clear() # reverse to keep finishing order finished.reverse() yield from _yield_finished_futures(finished, waiter, ref_collect=(fs, pending)) finally: # Remove waiter from unfinished futures for f in fs: with f._condition: f._waiters.remove(waiter) DoneAndNotDoneFutures = collections.namedtuple( 'DoneAndNotDoneFutures', 'done not_done') ``` ###Assistant : An iterator over the given futures that yields each as it completes. Args: fs: The sequence of Futures (possibly created by different Executors) to iterate over. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. Returns: An iterator that yields the given Futures as they complete (finished or cancelled). If any given Futures are duplicated, they will be returned once. Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. " 2071,"def pack_env_dict(self) -> Dict[str, Any]: env_info = {""window_size"": self.CONV_WIDTH, ""reward_kwargs"": self.reward_params, ""config"": self.config, ""live"": self.live} if self.data_provider: env_info[""fee""] = self.data_provider._exchange \ .get_fee(symbol=self.data_provider.current_whitelist()[0]) # type: ignore return env_info "," Create dictionary of environment arguments ",5,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def pack_env_dict(self) -> Dict[str, Any]: env_info = {""window_size"": self.CONV_WIDTH, ""reward_kwargs"": self.reward_params, ""config"": self.config, ""live"": self.live} if self.data_provider: env_info[""fee""] = self.data_provider._exchange \ .get_fee(symbol=self.data_provider.current_whitelist()[0]) # type: ignore return env_info ``` ###Assistant : Create dictionary of environment arguments " 2072,"def sample(self) -> SampleBatchType: if self.fake_sampler and self.last_batch is not None: return self.last_batch elif self.input_reader is None: raise ValueError( ""RolloutWorker has no `input_reader` object! "" ""Cannot call `sample()`. You can try setting "" ""`create_env_on_driver` to True."" ) if log_once(""sample_start""): logger.info( ""Generating sample batch of size {}"".format( self.rollout_fragment_length ) ) batches = [self.input_reader.next()] steps_so_far = ( batches[0].count if self.count_steps_by == ""env_steps"" else batches[0].agent_steps() ) # In truncate_episodes mode, never pull more than 1 batch per env. # This avoids over-running the target batch size. if self.batch_mode == ""truncate_episodes"": max_batches = self.num_envs else: max_batches = float(""inf"") while ( steps_so_far < self.rollout_fragment_length and len(batches) < max_batches ): batch = self.input_reader.next() steps_so_far += ( batch.count if self.count_steps_by == ""env_steps"" else batch.agent_steps() ) batches.append(batch) batch = batches[0].concat_samples(batches) if len(batches) > 1 else batches[0] self.callbacks.on_sample_end(worker=self, samples=batch) # Always do writes prior to compression for consistency and to allow # for better compression inside the writer. self.output_writer.write(batch) # Do off-policy estimation, if needed. if self.reward_estimators: for sub_batch in batch.split_by_episode(): for estimator in self.reward_estimators: estimator.process(sub_batch) if log_once(""sample_end""): logger.info(""Completed sample batch:\n\n{}\n"".format(summarize(batch))) if self.compress_observations: batch.compress(bulk=self.compress_observations == ""bulk"") if self.fake_sampler: self.last_batch = batch return batch ","Returns a batch of experience sampled from this worker. This method must be implemented by subclasses. Returns: A columnar batch of experiences (e.g., tensors). Examples: >>> import gym >>> from ray.rllib.evaluation.rollout_worker import RolloutWorker >>> from ray.rllib.algorithms.pg.pg_tf_policy import PGTFPolicy >>> worker = RolloutWorker( # doctest: +SKIP ... env_creator=lambda _: gym.make(""CartPole-v0""), # doctest: +SKIP ... policy_spec=PGTFPolicy) # doctest: +SKIP >>> print(worker.sample()) # doctest: +SKIP SampleBatch({""obs"": [...], ""action"": [...], ...}) ",67,184,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def sample(self) -> SampleBatchType: if self.fake_sampler and self.last_batch is not None: return self.last_batch elif self.input_reader is None: raise ValueError( ""RolloutWorker has no `input_reader` object! "" ""Cannot call `sample()`. You can try setting "" ""`create_env_on_driver` to True."" ) if log_once(""sample_start""): logger.info( ""Generating sample batch of size {}"".format( self.rollout_fragment_length ) ) batches = [self.input_reader.next()] steps_so_far = ( batches[0].count if self.count_steps_by == ""env_steps"" else batches[0].agent_steps() ) # In truncate_episodes mode, never pull more than 1 batch per env. # This avoids over-running the target batch size. if self.batch_mode == ""truncate_episodes"": max_batches = self.num_envs else: max_batches = float(""inf"") while ( steps_so_far < self.rollout_fragment_length and len(batches) < max_batches ): batch = self.input_reader.next() steps_so_far += ( batch.count if self.count_steps_by == ""env_steps"" else batch.agent_steps() ) batches.append(batch) batch = batches[0].concat_samples(batches) if len(batches) > 1 else batches[0] self.callbacks.on_sample_end(worker=self, samples=batch) # Always do writes prior to compression for consistency and to allow # for better compression inside the writer. self.output_writer.write(batch) # Do off-policy estimation, if needed. if self.reward_estimators: for sub_batch in batch.split_by_episode(): for estimator in self.reward_estimators: estimator.process(sub_batch) if log_once(""sample_end""): logger.info(""Completed sample batch:\n\n{}\n"".format(summarize(batch))) if self.compress_observations: batch.compress(bulk=self.compress_observations == ""bulk"") if self.fake_sampler: self.last_batch = batch return batch ``` ###Assistant : Returns a batch of experience sampled from this worker. This method must be implemented by subclasses. Returns: A columnar batch of experiences (e.g., tensors). Examples: >>> import gym >>> from ray.rllib.evaluation.rollout_worker import RolloutWorker >>> from ray.rllib.algorithms.pg.pg_tf_policy import PGTFPolicy >>> worker = RolloutWorker( # doctest: +SKIP ... env_creator=lambda _: gym.make(""CartPole-v0""), # doctest: +SKIP ... policy_spec=PGTFPolicy) # doctest: +SKIP >>> print(worker.sample()) # doctest: +SKIP SampleBatch({""obs"": [...], ""action"": [...], ...}) " 2073,"def test_no_duplicates_for_non_unique_related_object_in_list_filter(self): parent = Parent.objects.create(name=""Mary"") # Two children with the same name Child.objects.create(parent=parent, name=""Daniel"") Child.objects.create(parent=parent, name=""Daniel"") m = ParentAdmin(Parent, custom_site) request = self.factory.get(""/parent/"", data={""child__name"": ""Daniel""}) request.user = self.superuser cl = m.get_changelist_instance(request) # Exists() is applied. self.assertEqual(cl.queryset.count(), 1) # Queryset must be deletable. self.assertIs(cl.queryset.query.distinct, False) cl.queryset.delete() self.assertEqual(cl.queryset.count(), 0) "," Regressions tests for #15819: If a field listed in list_filters is a non-unique related object, results shouldn't appear more than once. ",21,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_no_duplicates_for_non_unique_related_object_in_list_filter(self): parent = Parent.objects.create(name=""Mary"") # Two children with the same name Child.objects.create(parent=parent, name=""Daniel"") Child.objects.create(parent=parent, name=""Daniel"") m = ParentAdmin(Parent, custom_site) request = self.factory.get(""/parent/"", data={""child__name"": ""Daniel""}) request.user = self.superuser cl = m.get_changelist_instance(request) # Exists() is applied. self.assertEqual(cl.queryset.count(), 1) # Queryset must be deletable. self.assertIs(cl.queryset.query.distinct, False) cl.queryset.delete() self.assertEqual(cl.queryset.count(), 0) ``` ###Assistant : Regressions tests for #15819: If a field listed in list_filters is a non-unique related object, results shouldn't appear more than once. " 2074,"def get_current_tax_app() -> Optional[App]: return ( App.objects.order_by(""pk"") .for_event_type(WebhookEventSyncType.CHECKOUT_CALCULATE_TAXES) .for_event_type(WebhookEventSyncType.ORDER_CALCULATE_TAXES) .last() ) ","Return currently used tax app or None, if there aren't any.",11,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_current_tax_app() -> Optional[App]: return ( App.objects.order_by(""pk"") .for_event_type(WebhookEventSyncType.CHECKOUT_CALCULATE_TAXES) .for_event_type(WebhookEventSyncType.ORDER_CALCULATE_TAXES) .last() ) ``` ###Assistant : Return currently used tax app or None, if there aren't any. " 2075,"def test_callbacks(self) -> None: cache: DeferredCache[str, int] = DeferredCache(""test"") callbacks = set() # start with an entry, with a callback cache.prefill(""k1"", 10, callback=lambda: callbacks.add(""prefill"")) # now replace that entry with a pending result origin_d: ""defer.Deferred[int]"" = defer.Deferred() set_d = cache.set(""k1"", origin_d, callback=lambda: callbacks.add(""set"")) # ... and also make a get request get_d = cache.get(""k1"", callback=lambda: callbacks.add(""get"")) # we don't expect the invalidation callback for the original value to have # been called yet, even though get() will now return a different result. # I'm not sure if that is by design or not. self.assertEqual(callbacks, set()) # now fire off all the deferreds origin_d.callback(20) self.assertEqual(self.successResultOf(set_d), 20) self.assertEqual(self.successResultOf(get_d), 20) # now the original invalidation callback should have been called, but none of # the others self.assertEqual(callbacks, {""prefill""}) callbacks.clear() # another update should invalidate both the previous results cache.prefill(""k1"", 30) self.assertEqual(callbacks, {""set"", ""get""}) ",Invalidation callbacks are called at the right time,8,140,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_callbacks(self) -> None: cache: DeferredCache[str, int] = DeferredCache(""test"") callbacks = set() # start with an entry, with a callback cache.prefill(""k1"", 10, callback=lambda: callbacks.add(""prefill"")) # now replace that entry with a pending result origin_d: ""defer.Deferred[int]"" = defer.Deferred() set_d = cache.set(""k1"", origin_d, callback=lambda: callbacks.add(""set"")) # ... and also make a get request get_d = cache.get(""k1"", callback=lambda: callbacks.add(""get"")) # we don't expect the invalidation callback for the original value to have # been called yet, even though get() will now return a different result. # I'm not sure if that is by design or not. self.assertEqual(callbacks, set()) # now fire off all the deferreds origin_d.callback(20) self.assertEqual(self.successResultOf(set_d), 20) self.assertEqual(self.successResultOf(get_d), 20) # now the original invalidation callback should have been called, but none of # the others self.assertEqual(callbacks, {""prefill""}) callbacks.clear() # another update should invalidate both the previous results cache.prefill(""k1"", 30) self.assertEqual(callbacks, {""set"", ""get""}) ``` ###Assistant : Invalidation callbacks are called at the right time " 2076,"def sql_flush(style, connection, reset_sequences=True, allow_cascade=False): tables = connection.introspection.django_table_names( only_existing=True, include_views=False ) return connection.ops.sql_flush( style, tables, reset_sequences=reset_sequences, allow_cascade=allow_cascade, ) "," Return a list of the SQL statements used to flush the database. ",12,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def sql_flush(style, connection, reset_sequences=True, allow_cascade=False): tables = connection.introspection.django_table_names( only_existing=True, include_views=False ) return connection.ops.sql_flush( style, tables, reset_sequences=reset_sequences, allow_cascade=allow_cascade, ) ``` ###Assistant : Return a list of the SQL statements used to flush the database. " 2077,"def generate(cls, size, callback, channels=3, target_mode=None): size_1d, size_2d, size_3d = cls._check_size(size) if channels not in (3, 4): raise ValueError(""Only 3 or 4 output channels are supported"") table = [0] * (size_1d * size_2d * size_3d * channels) idx_out = 0 for b in range(size_3d): for g in range(size_2d): for r in range(size_1d): table[idx_out : idx_out + channels] = callback( r / (size_1d - 1), g / (size_2d - 1), b / (size_3d - 1) ) idx_out += channels return cls( (size_1d, size_2d, size_3d), table, channels=channels, target_mode=target_mode, _copy_table=False, ) ","Generates new LUT using provided callback. :param size: Size of the table. Passed to the constructor. :param callback: Function with three parameters which correspond three color channels. Will be called ``size**3`` times with values from 0.0 to 1.0 and should return a tuple with ``channels`` elements. :param channels: The number of channels which should return callback. :param target_mode: Passed to the constructor of the resulting lookup table. ",67,88,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def generate(cls, size, callback, channels=3, target_mode=None): size_1d, size_2d, size_3d = cls._check_size(size) if channels not in (3, 4): raise ValueError(""Only 3 or 4 output channels are supported"") table = [0] * (size_1d * size_2d * size_3d * channels) idx_out = 0 for b in range(size_3d): for g in range(size_2d): for r in range(size_1d): table[idx_out : idx_out + channels] = callback( r / (size_1d - 1), g / (size_2d - 1), b / (size_3d - 1) ) idx_out += channels return cls( (size_1d, size_2d, size_3d), table, channels=channels, target_mode=target_mode, _copy_table=False, ) ``` ###Assistant : Generates new LUT using provided callback. :param size: Size of the table. Passed to the constructor. :param callback: Function with three parameters which correspond three color channels. Will be called ``size**3`` times with values from 0.0 to 1.0 and should return a tuple with ``channels`` elements. :param channels: The number of channels which should return callback. :param target_mode: Passed to the constructor of the resulting lookup table. " 2078,"def regex_lookup(self, lookup_type): raise NotImplementedError( ""subclasses of BaseDatabaseOperations may require a regex_lookup() method"" ) "," Return the string to use in a query when performing regular expression lookups (using ""regex"" or ""iregex""). It should contain a '%s' placeholder for the column being searched against. If the feature is not supported (or part of it is not supported), raise NotImplementedError. ",44,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def regex_lookup(self, lookup_type): raise NotImplementedError( ""subclasses of BaseDatabaseOperations may require a regex_lookup() method"" ) ``` ###Assistant : Return the string to use in a query when performing regular expression lookups (using ""regex"" or ""iregex""). It should contain a '%s' placeholder for the column being searched against. If the feature is not supported (or part of it is not supported), raise NotImplementedError. " 2079,"def link_existing_conversations(doc, state): if doc.doctype != ""Contact"": return try: numbers = [d.phone for d in doc.phone_nos] for number in numbers: number = strip_number(number) if not number: continue logs = frappe.db.sql_list( , dict(phone_number=""%{}"".format(number), docname=doc.name, doctype=doc.doctype), ) for log in logs: call_log = frappe.get_doc(""Call Log"", log) call_log.add_link(link_type=doc.doctype, link_name=doc.name) call_log.save(ignore_permissions=True) frappe.db.commit() except Exception: frappe.log_error(title=_(""Error during caller information update"")) "," Called from hooks on creation of Contact or Lead to link all the existing conversations. SELECT cl.name FROM `tabCall Log` cl LEFT JOIN `tabDynamic Link` dl ON cl.name = dl.parent WHERE (cl.`from` like %(phone_number)s or cl.`to` like %(phone_number)s) GROUP BY cl.name HAVING SUM( CASE WHEN dl.link_doctype = %(doctype)s AND dl.link_name = %(docname)s THEN 1 ELSE 0 END )=0 ",58,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def link_existing_conversations(doc, state): if doc.doctype != ""Contact"": return try: numbers = [d.phone for d in doc.phone_nos] for number in numbers: number = strip_number(number) if not number: continue logs = frappe.db.sql_list( , dict(phone_number=""%{}"".format(number), docname=doc.name, doctype=doc.doctype), ) for log in logs: call_log = frappe.get_doc(""Call Log"", log) call_log.add_link(link_type=doc.doctype, link_name=doc.name) call_log.save(ignore_permissions=True) frappe.db.commit() except Exception: frappe.log_error(title=_(""Error during caller information update"")) ``` ###Assistant : Called from hooks on creation of Contact or Lead to link all the existing conversations. SELECT cl.name FROM `tabCall Log` cl LEFT JOIN `tabDynamic Link` dl ON cl.name = dl.parent WHERE (cl.`from` like %(phone_number)s or cl.`to` like %(phone_number)s) GROUP BY cl.name HAVING SUM( CASE WHEN dl.link_doctype = %(doctype)s AND dl.link_name = %(docname)s THEN 1 ELSE 0 END )=0 " 2080,"async def async_volume_up(self) -> None: if hasattr(self, ""volume_up""): await self.hass.async_add_executor_job(self.volume_up) return if ( self.volume_level is not None and self.volume_level < 1 and self.supported_features & MediaPlayerEntityFeature.VOLUME_SET ): await self.async_set_volume_level(min(1, self.volume_level + 0.1)) ","Turn volume up for media player. This method is a coroutine. ",11,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_volume_up(self) -> None: if hasattr(self, ""volume_up""): await self.hass.async_add_executor_job(self.volume_up) return if ( self.volume_level is not None and self.volume_level < 1 and self.supported_features & MediaPlayerEntityFeature.VOLUME_SET ): await self.async_set_volume_level(min(1, self.volume_level + 0.1)) ``` ###Assistant : Turn volume up for media player. This method is a coroutine. " 2081,"def infer_axes(self) -> bool: s = self.storable if s is None: return False self.get_attrs() return True "," infer the axes of my storer return a boolean indicating if we have a valid storer or not ",18,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def infer_axes(self) -> bool: s = self.storable if s is None: return False self.get_attrs() return True ``` ###Assistant : infer the axes of my storer return a boolean indicating if we have a valid storer or not " 2082,"def _validate_set_axis(self, new_labels, old_labels): new_labels = ensure_index(new_labels) old_len = len(old_labels) new_len = len(new_labels) if old_len != new_len: raise ValueError( f""Length mismatch: Expected axis has {old_len} elements, "" + ""new values have {new_len} elements"" ) return new_labels "," Validate the possibility of replacement of old labels with the new labels. Parameters ---------- new_labels : list-like The labels to replace with. old_labels : list-like The labels to replace. Returns ------- list-like The validated labels. ",35,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _validate_set_axis(self, new_labels, old_labels): new_labels = ensure_index(new_labels) old_len = len(old_labels) new_len = len(new_labels) if old_len != new_len: raise ValueError( f""Length mismatch: Expected axis has {old_len} elements, "" + ""new values have {new_len} elements"" ) return new_labels ``` ###Assistant : Validate the possibility of replacement of old labels with the new labels. Parameters ---------- new_labels : list-like The labels to replace with. old_labels : list-like The labels to replace. Returns ------- list-like The validated labels. " 2083,"def transform_data(result, translated_columns, query_builder) -> EventsResponse: final_result: EventsResponse = {""data"": result[""data""], ""meta"": result[""meta""]} for col in final_result[""meta""]: # Translate back column names that were converted to snuba format col[""name""] = translated_columns.get(col[""name""], col[""name""]) "," Transform internal names back to the public schema ones. When getting timeseries results via rollup, this function will zerofill the output results. ",22,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def transform_data(result, translated_columns, query_builder) -> EventsResponse: final_result: EventsResponse = {""data"": result[""data""], ""meta"": result[""meta""]} for col in final_result[""meta""]: # Translate back column names that were converted to snuba format col[""name""] = translated_columns.get(col[""name""], col[""name""]) ``` ###Assistant : Transform internal names back to the public schema ones. When getting timeseries results via rollup, this function will zerofill the output results. " 2084,"def plot_contour(*args, show=True, **kwargs): args = list(map(sympify, args)) plot_expr = check_arguments(args, 1, 2) series = [ContourSeries(*arg) for arg in plot_expr] plot_contours = Plot(*series, **kwargs) if len(plot_expr[0].free_symbols) > 2: raise ValueError('Contour Plot cannot Plot for more than two variables.') if show: plot_contours.show() return plot_contours "," Draws contour plot of a function Usage ===== Single plot ``plot_contour(expr, range_x, range_y, **kwargs)`` If the ranges are not specified, then a default range of (-10, 10) is used. Multiple plot with the same range. ``plot_contour(expr1, expr2, range_x, range_y, **kwargs)`` If the ranges are not specified, then a default range of (-10, 10) is used. Multiple plots with different ranges. ``plot_contour((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)`` Ranges have to be specified for every expression. Default range may change in the future if a more advanced default range detection algorithm is implemented. Arguments ========= expr : Expression representing the function along x. range_x : (:class:`Symbol`, float, float) A 3-tuple denoting the range of the x variable, e.g. (x, 0, 5). range_y : (:class:`Symbol`, float, float) A 3-tuple denoting the range of the y variable, e.g. (y, 0, 5). Keyword Arguments ================= Arguments for ``ContourSeries`` class: nb_of_points_x : int The x range is sampled uniformly at ``nb_of_points_x`` of points. nb_of_points_y : int The y range is sampled uniformly at ``nb_of_points_y`` of points. Aesthetics: surface_color : Function which returns a float Specifies the color for the surface of the plot. See :class:`sympy.plotting.Plot` for more details. If there are multiple plots, then the same series arguments are applied to all the plots. If you want to set these options separately, you can index the returned ``Plot`` object and set it. Arguments for ``Plot`` class: title : str Title of the plot. size : (float, float), optional A tuple in the form (width, height) in inches to specify the size of the overall figure. The default value is set to ``None``, meaning the size will be set by the default backend. See Also ======== Plot, ContourSeries ",283,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def plot_contour(*args, show=True, **kwargs): args = list(map(sympify, args)) plot_expr = check_arguments(args, 1, 2) series = [ContourSeries(*arg) for arg in plot_expr] plot_contours = Plot(*series, **kwargs) if len(plot_expr[0].free_symbols) > 2: raise ValueError('Contour Plot cannot Plot for more than two variables.') if show: plot_contours.show() return plot_contours ``` ###Assistant : Draws contour plot of a function Usage ===== Single plot ``plot_contour(expr, range_x, range_y, **kwargs)`` If the ranges are not specified, then a default range of (-10, 10) is used. Multiple plot with the same range. ``plot_contour(expr1, expr2, range_x, range_y, **kwargs)`` If the ranges are not specified, then a default range of (-10, 10) is used. Multiple plots with different ranges. ``plot_contour((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)`` Ranges have to be specified for every expression. Default range may change in the future if a more advanced default range detection algorithm is implemented. Arguments ========= expr : Expression representing the function along x. range_x : (:class:`Symbol`, float, float) A 3-tuple denoting the range of the x variable, e.g. (x, 0, 5). range_y : (:class:`Symbol`, float, float) A 3-tuple denoting the range of the y variable, e.g. (y, 0, 5). Keyword Arguments ================= Arguments for ``ContourSeries`` class: nb_of_points_x : int The x range is sampled uniformly at ``nb_of_points_x`` of points. nb_of_points_y : int The y range is sampled uniformly at ``nb_of_points_y`` of points. Aesthetics: surface_color : Function which returns a float Specifies the color for the surface of the plot. See :class:`sympy.plotting.Plot` for more details. If there are multiple plots, then the same series arguments are applied to all the plots. If you want to set these options separately, you can index the returned ``Plot`` object and set it. Arguments for ``Plot`` class: title : str Title of the plot. size : (float, float), optional A tuple in the form (width, height) in inches to specify the size of the overall figure. The default value is set to ``None``, meaning the size will be set by the default backend. See Also ======== Plot, ContourSeries " 2085,"def peek(self, n=0): self._check_can_read() # Relies on the undocumented fact that BufferedReader.peek() # always returns at least one byte (except at EOF), independent # of the value of n return self._buffer.peek(n) ","Return buffered data without advancing the file position. Always returns at least one byte of data, unless at EOF. The exact number of bytes returned is unspecified. ",27,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def peek(self, n=0): self._check_can_read() # Relies on the undocumented fact that BufferedReader.peek() # always returns at least one byte (except at EOF), independent # of the value of n return self._buffer.peek(n) ``` ###Assistant : Return buffered data without advancing the file position. Always returns at least one byte of data, unless at EOF. The exact number of bytes returned is unspecified. " 2086,"def cache_full(self) -> bool: if self._cache_info[""cache_full""]: return self._cache_info[""cache_full""] with self._lock: return self._cache_info[""cache_full""] ","bool: ``True`` if the cache has been fully populated. ``False`` if there are items still to be cached. ",18,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cache_full(self) -> bool: if self._cache_info[""cache_full""]: return self._cache_info[""cache_full""] with self._lock: return self._cache_info[""cache_full""] ``` ###Assistant : bool: ``True`` if the cache has been fully populated. ``False`` if there are items still to be cached. " 2087,"def get_queryset(self, request): if self.queryset is None: raise ImproperlyConfigured( f""{self.__class__.__name__} does not define a queryset. Set queryset on the class or "" f""override its get_queryset() method."" ) return self.queryset.all() "," Return the base queryset for the view. By default, this returns self.queryset.all(). Args: request: The current request ",17,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_queryset(self, request): if self.queryset is None: raise ImproperlyConfigured( f""{self.__class__.__name__} does not define a queryset. Set queryset on the class or "" f""override its get_queryset() method."" ) return self.queryset.all() ``` ###Assistant : Return the base queryset for the view. By default, this returns self.queryset.all(). Args: request: The current request " 2088,"def _preprocess(self, inputs): inputs = self._check_input_text(inputs) self._max_cls_len = 5 num_workers = self.kwargs[ 'num_workers'] if 'num_workers' in self.kwargs else 0 lazy_load = self.kwargs[ 'lazy_load'] if 'lazy_load' in self.kwargs else False # Prompt template: input_text + ""是"" + ""[MASK]"" * cls_seq_length prompt_template = [""是""] + [""[MASK]""] * self._max_cls_len "," Create the dataset and dataloader for the predict. ",8,46,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _preprocess(self, inputs): inputs = self._check_input_text(inputs) self._max_cls_len = 5 num_workers = self.kwargs[ 'num_workers'] if 'num_workers' in self.kwargs else 0 lazy_load = self.kwargs[ 'lazy_load'] if 'lazy_load' in self.kwargs else False # Prompt template: input_text + ""是"" + ""[MASK]"" * cls_seq_length prompt_template = [""是""] + [""[MASK]""] * self._max_cls_len ``` ###Assistant : Create the dataset and dataloader for the predict. " 2089,"def session_destroy(consul_url=None, token=None, session=None, **kwargs): ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error(""No Consul URL found."") ret[""message""] = ""No Consul URL found."" ret[""res""] = False return ret if not session: raise SaltInvocationError('Required argument ""session"" is missing.') query_params = {} if ""dc"" in kwargs: query_params[""dc""] = kwargs[""dc""] function = ""session/destroy/{}"".format(session) res = _query( consul_url=consul_url, function=function, token=token, method=""PUT"", query_params=query_params, ) if res[""res""]: ret[""res""] = True ret[""message""] = ""Destroyed Session {}."".format(session) else: ret[""res""] = False ret[""message""] = ""Unable to destroy session {}."".format(session) return ret "," Destroy session :param consul_url: The Consul server URL. :param session: The ID of the session to destroy. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the ""dc"" parameter. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_destroy session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ",55,86,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def session_destroy(consul_url=None, token=None, session=None, **kwargs): ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error(""No Consul URL found."") ret[""message""] = ""No Consul URL found."" ret[""res""] = False return ret if not session: raise SaltInvocationError('Required argument ""session"" is missing.') query_params = {} if ""dc"" in kwargs: query_params[""dc""] = kwargs[""dc""] function = ""session/destroy/{}"".format(session) res = _query( consul_url=consul_url, function=function, token=token, method=""PUT"", query_params=query_params, ) if res[""res""]: ret[""res""] = True ret[""message""] = ""Destroyed Session {}."".format(session) else: ret[""res""] = False ret[""message""] = ""Unable to destroy session {}."".format(session) return ret ``` ###Assistant : Destroy session :param consul_url: The Consul server URL. :param session: The ID of the session to destroy. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the ""dc"" parameter. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_destroy session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' " 2090,"def post_save_action(cls, info, instance, cleaned_input): manager = load_plugin_manager(info.context) cls.call_event(manager.collection_updated, instance) ",Override this method with `pass` to avoid triggering product webhook.,10,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def post_save_action(cls, info, instance, cleaned_input): manager = load_plugin_manager(info.context) cls.call_event(manager.collection_updated, instance) ``` ###Assistant : Override this method with `pass` to avoid triggering product webhook. " 2091,"def _deserialize_metric(metric_config): from keras import ( metrics as metrics_module, ) # pylint:disable=g-import-not-at-top if metric_config in [""accuracy"", ""acc"", ""crossentropy"", ""ce""]: # Do not deserialize accuracy and cross-entropy strings as we have special # case handling for these in compile, based on model output shape. return metric_config return metrics_module.deserialize(metric_config) ","Deserialize metrics, leaving special strings untouched.",6,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _deserialize_metric(metric_config): from keras import ( metrics as metrics_module, ) # pylint:disable=g-import-not-at-top if metric_config in [""accuracy"", ""acc"", ""crossentropy"", ""ce""]: # Do not deserialize accuracy and cross-entropy strings as we have special # case handling for these in compile, based on model output shape. return metric_config return metrics_module.deserialize(metric_config) ``` ###Assistant : Deserialize metrics, leaving special strings untouched. " 2092,"def softsign(x): return tf.math.softsign(x) @keras_export(""keras.activations.swish"") @tf.__internal__.dispatch.add_dispatch_support","Softsign activation function, `softsign(x) = x / (abs(x) + 1)`. Example Usage: >>> a = tf.constant([-1.0, 0.0, 1.0], dtype = tf.float32) >>> b = tf.keras.activations.softsign(a) >>> b.numpy() array([-0.5, 0. , 0.5], dtype=float32) Args: x: Input tensor. Returns: The softsign activation: `x / (abs(x) + 1)`. ",45,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def softsign(x): return tf.math.softsign(x) @keras_export(""keras.activations.swish"") @tf.__internal__.dispatch.add_dispatch_support ``` ###Assistant : Softsign activation function, `softsign(x) = x / (abs(x) + 1)`. Example Usage: >>> a = tf.constant([-1.0, 0.0, 1.0], dtype = tf.float32) >>> b = tf.keras.activations.softsign(a) >>> b.numpy() array([-0.5, 0. , 0.5], dtype=float32) Args: x: Input tensor. Returns: The softsign activation: `x / (abs(x) + 1)`. " 2093,"def compute_inlier_metric(self) -> None: import scipy.stats as ss nmb_previous_points = self.data['InlierMetric_nmb_points'] weibull_percentile = self.data['InlierMetric_weib_perc'] train_ft_df = self.data_dictionary['train_features'] train_ft_df_reindexed = train_ft_df.reindex( index=np.flip(train_ft_df.index) ) pairwise = pd.DataFrame( np.triu( pairwise_distances(train_ft_df_reindexed, n_jobs=self.thread_count) ), columns=train_ft_df_reindexed.index, index=train_ft_df_reindexed.index ) pairwise = pairwise.round(5) column_labels = [ '{}{}'.format('d', i) for i in range(1, nmb_previous_points+1) ] distances = pd.DataFrame( columns=column_labels, index=train_ft_df.index ) for index in train_ft_df.index[nmb_previous_points]: current_row = pairwise.loc[[index]] current_row_no_zeros = current_row.loc[ :, (current_row!=0).any(axis=0) ] distances.loc[[index]] = current_row_no_zeros.iloc[ :, :nmb_previous_points ] distances = distances.replace([np.inf, -np.inf], np.nan) drop_index = pd.isnull(distances).any(1) distances = distances[drop_index==0] inliers = pd.DataFrame(index=distances.index) for key in distances.keys(): current_distances = distances[key].dropna() fit_params = ss.weibull_min.fit(current_distances) cutoff = ss.weibull_min.ppf(weibull_percentile, *fit_params) is_inlier = np.where( current_distances<=cutoff, 1, 0 ) df_inlier = pd.DataFrame( {key+'_IsInlier':is_inlier}, index=distances.index ) inliers = pd.concat( [inliers, df_inlier], axis=1 ) self.data_dictionary['train_features'] = pd.DataFrame( data=inliers.sum(axis=1)/nmb_previous_points, columns=['inlier_metric'], index = train_ft_df.index ) percent_outliers = np.round( 100*(1-self.data_dictionary['iniler_metric'].sum()/ len(train_ft_df.index)), 2 ) logger.info('{percent_outliers}%% of data points were identified as outliers') return None "," Compute inlier metric from backwards distance distributions. This metric defines how well features from a timepoint fit into previous timepoints. ",20,145,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def compute_inlier_metric(self) -> None: import scipy.stats as ss nmb_previous_points = self.data['InlierMetric_nmb_points'] weibull_percentile = self.data['InlierMetric_weib_perc'] train_ft_df = self.data_dictionary['train_features'] train_ft_df_reindexed = train_ft_df.reindex( index=np.flip(train_ft_df.index) ) pairwise = pd.DataFrame( np.triu( pairwise_distances(train_ft_df_reindexed, n_jobs=self.thread_count) ), columns=train_ft_df_reindexed.index, index=train_ft_df_reindexed.index ) pairwise = pairwise.round(5) column_labels = [ '{}{}'.format('d', i) for i in range(1, nmb_previous_points+1) ] distances = pd.DataFrame( columns=column_labels, index=train_ft_df.index ) for index in train_ft_df.index[nmb_previous_points]: current_row = pairwise.loc[[index]] current_row_no_zeros = current_row.loc[ :, (current_row!=0).any(axis=0) ] distances.loc[[index]] = current_row_no_zeros.iloc[ :, :nmb_previous_points ] distances = distances.replace([np.inf, -np.inf], np.nan) drop_index = pd.isnull(distances).any(1) distances = distances[drop_index==0] inliers = pd.DataFrame(index=distances.index) for key in distances.keys(): current_distances = distances[key].dropna() fit_params = ss.weibull_min.fit(current_distances) cutoff = ss.weibull_min.ppf(weibull_percentile, *fit_params) is_inlier = np.where( current_distances<=cutoff, 1, 0 ) df_inlier = pd.DataFrame( {key+'_IsInlier':is_inlier}, index=distances.index ) inliers = pd.concat( [inliers, df_inlier], axis=1 ) self.data_dictionary['train_features'] = pd.DataFrame( data=inliers.sum(axis=1)/nmb_previous_points, columns=['inlier_metric'], index = train_ft_df.index ) percent_outliers = np.round( 100*(1-self.data_dictionary['iniler_metric'].sum()/ len(train_ft_df.index)), 2 ) logger.info('{percent_outliers}%% of data points were identified as outliers') return None ``` ###Assistant : Compute inlier metric from backwards distance distributions. This metric defines how well features from a timepoint fit into previous timepoints. " 2094,"def items_view(self, traverser, items): if len(items) == 1: traverser(items[0]) self.write("","") else: self.interleave(lambda: self.write("", ""), traverser, items) ","Traverse and separate the given *items* with a comma and append it to the buffer. If *items* is a single item sequence, a trailing comma will be added.",28,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def items_view(self, traverser, items): if len(items) == 1: traverser(items[0]) self.write("","") else: self.interleave(lambda: self.write("", ""), traverser, items) ``` ###Assistant : Traverse and separate the given *items* with a comma and append it to the buffer. If *items* is a single item sequence, a trailing comma will be added. " 2095,"def refresh_stats(self) -> None: try: self._mallctl(""epoch"", read=False, write=1) except Exception as e: logger.warning(""Failed to reload jemalloc stats: %s"", e) ","Request that jemalloc updates its internal statistics. This needs to be called before querying for stats, otherwise it will return stale values. ",22,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def refresh_stats(self) -> None: try: self._mallctl(""epoch"", read=False, write=1) except Exception as e: logger.warning(""Failed to reload jemalloc stats: %s"", e) ``` ###Assistant : Request that jemalloc updates its internal statistics. This needs to be called before querying for stats, otherwise it will return stale values. " 2096,"def test_to_numpy_array_multiweight_reduction(func, expected): G = nx.MultiDiGraph() weights = [-1, 2, 10.0] for w in weights: G.add_edge(0, 1, weight=w) A = nx.to_numpy_array(G, multigraph_weight=func, dtype=float) assert np.allclose(A, [[0, expected], [0, 0]]) # Undirected case A = nx.to_numpy_array(G.to_undirected(), multigraph_weight=func, dtype=float) assert np.allclose(A, [[0, expected], [expected, 0]]) ",Test various functions for reducing multiedge weights.,7,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_to_numpy_array_multiweight_reduction(func, expected): G = nx.MultiDiGraph() weights = [-1, 2, 10.0] for w in weights: G.add_edge(0, 1, weight=w) A = nx.to_numpy_array(G, multigraph_weight=func, dtype=float) assert np.allclose(A, [[0, expected], [0, 0]]) # Undirected case A = nx.to_numpy_array(G.to_undirected(), multigraph_weight=func, dtype=float) assert np.allclose(A, [[0, expected], [expected, 0]]) ``` ###Assistant : Test various functions for reducing multiedge weights. " 2097,"def test_vr_connector_causal_slice(self): view_rq_dict = { ""state"": ViewRequirement(""obs""), # shift array should be [-2, -1, 0] ""prev_states"": ViewRequirement(""obs"", shift=""-2:0""), # shift array should be [-4, -2, 0] ""prev_strided_states_even"": ViewRequirement(""obs"", shift=""-4:0:2""), # shift array should be [-3, -1] ""prev_strided_states_odd"": ViewRequirement(""obs"", shift=""-3:0:2""), } obs_arrs = np.arange(10)[:, None] + 1 config = PPOConfig().to_dict() ctx = ConnectorContext( view_requirements=view_rq_dict, config=config, is_policy_recurrent=True ) c = ViewRequirementAgentConnector(ctx) # keep a queue of observations obs_list = [] for t, obs in enumerate(obs_arrs): # t=0 is the next state of t=-1 data = AgentConnectorDataType( 0, 1, {SampleBatch.NEXT_OBS: obs, SampleBatch.T: t - 1} ) processed = c([data]) for_action = processed[0].data.for_action if t == 0: obs_list.extend([obs for _ in range(5)]) else: # remove the first obs and add the current obs to the end obs_list.pop(0) obs_list.append(obs) # check state check(for_action[""state""], obs[None]) # check prev_states check( for_action[""prev_states""], np.stack(obs_list)[np.array([-3, -2, -1])][None], ) # check prev_strided_states_even check( for_action[""prev_strided_states_even""], np.stack(obs_list)[np.array([-5, -3, -1])][None], ) check( for_action[""prev_strided_states_odd""], np.stack(obs_list)[np.array([-4, -2])][None], ) ",Test that the ViewRequirementConnector can handle slice shifts correctly.,9,152,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_vr_connector_causal_slice(self): view_rq_dict = { ""state"": ViewRequirement(""obs""), # shift array should be [-2, -1, 0] ""prev_states"": ViewRequirement(""obs"", shift=""-2:0""), # shift array should be [-4, -2, 0] ""prev_strided_states_even"": ViewRequirement(""obs"", shift=""-4:0:2""), # shift array should be [-3, -1] ""prev_strided_states_odd"": ViewRequirement(""obs"", shift=""-3:0:2""), } obs_arrs = np.arange(10)[:, None] + 1 config = PPOConfig().to_dict() ctx = ConnectorContext( view_requirements=view_rq_dict, config=config, is_policy_recurrent=True ) c = ViewRequirementAgentConnector(ctx) # keep a queue of observations obs_list = [] for t, obs in enumerate(obs_arrs): # t=0 is the next state of t=-1 data = AgentConnectorDataType( 0, 1, {SampleBatch.NEXT_OBS: obs, SampleBatch.T: t - 1} ) processed = c([data]) for_action = processed[0].data.for_action if t == 0: obs_list.extend([obs for _ in range(5)]) else: # remove the first obs and add the current obs to the end obs_list.pop(0) obs_list.append(obs) # check state check(for_action[""state""], obs[None]) # check prev_states check( for_action[""prev_states""], np.stack(obs_list)[np.array([-3, -2, -1])][None], ) # check prev_strided_states_even check( for_action[""prev_strided_states_even""], np.stack(obs_list)[np.array([-5, -3, -1])][None], ) check( for_action[""prev_strided_states_odd""], np.stack(obs_list)[np.array([-4, -2])][None], ) ``` ###Assistant : Test that the ViewRequirementConnector can handle slice shifts correctly. " 2098,"def render(self) -> RenderableType: return Padding( Align.right(FigletText(self.value), vertical=""middle""), (0, 1), style=""white on rgb(51,51,51)"", ) ",Build a Rich renderable to render the calculator display.,9,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def render(self) -> RenderableType: return Padding( Align.right(FigletText(self.value), vertical=""middle""), (0, 1), style=""white on rgb(51,51,51)"", ) ``` ###Assistant : Build a Rich renderable to render the calculator display. " 2099,"def in_place_subclassed_model_state_restoration(model): assert not model._is_graph_network # Restore layers and build attributes if ( hasattr(model, ""_original_attributes_cache"") and model._original_attributes_cache is not None ): # Models have sticky attribute assignment, so we want to be careful to # add back the previous attributes and track Layers by their original # names without adding dependencies on ""utility"" attributes which Models # exempt when they're constructed. setattr_tracking = model._setattr_tracking model._setattr_tracking = False model._self_tracked_trackables = [] for name, value in model._original_attributes_cache.items(): setattr(model, name, value) if isinstance(value, Layer): model._self_tracked_trackables.append(value) model._original_attributes_cache = None model._setattr_tracking = setattr_tracking else: # Restore to the state of a never-called model. _reset_build_compile_trackers(model) @keras_export(""keras.__internal__.models.clone_and_build_model"", v1=[])","Restores the original state of a model after it was ""reset"". This undoes this action of `_in_place_subclassed_model_reset`, which is called in `clone_and_build_model` if `in_place_reset` is set to True. Args: model: Instance of a Keras model created via subclassing, on which `_in_place_subclassed_model_reset` was previously called. ",44,101,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def in_place_subclassed_model_state_restoration(model): assert not model._is_graph_network # Restore layers and build attributes if ( hasattr(model, ""_original_attributes_cache"") and model._original_attributes_cache is not None ): # Models have sticky attribute assignment, so we want to be careful to # add back the previous attributes and track Layers by their original # names without adding dependencies on ""utility"" attributes which Models # exempt when they're constructed. setattr_tracking = model._setattr_tracking model._setattr_tracking = False model._self_tracked_trackables = [] for name, value in model._original_attributes_cache.items(): setattr(model, name, value) if isinstance(value, Layer): model._self_tracked_trackables.append(value) model._original_attributes_cache = None model._setattr_tracking = setattr_tracking else: # Restore to the state of a never-called model. _reset_build_compile_trackers(model) @keras_export(""keras.__internal__.models.clone_and_build_model"", v1=[]) ``` ###Assistant : Restores the original state of a model after it was ""reset"". This undoes this action of `_in_place_subclassed_model_reset`, which is called in `clone_and_build_model` if `in_place_reset` is set to True. Args: model: Instance of a Keras model created via subclassing, on which `_in_place_subclassed_model_reset` was previously called. " 2100,"def _imaginary_unit_as_coefficient(arg): if getattr(arg, 'is_real', True): return None else: return arg.as_coefficient(S.ImaginaryUnit) @sympify_method_args", Helper to extract symbolic coefficient for imaginary unit ,8,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _imaginary_unit_as_coefficient(arg): if getattr(arg, 'is_real', True): return None else: return arg.as_coefficient(S.ImaginaryUnit) @sympify_method_args ``` ###Assistant : Helper to extract symbolic coefficient for imaginary unit " 2101,"def from_config(cls, config, custom_objects=None): if ""learning_rate"" in config: if isinstance(config[""learning_rate""], dict): config[""learning_rate""] = learning_rate_schedule.deserialize( config[""learning_rate""], custom_objects=custom_objects ) return cls(**config) ","Creates an optimizer from its config. This method is the reverse of `get_config`, capable of instantiating the same optimizer from the config dictionary. Args: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional user-defined Python objects needed to recreate this optimizer. Returns: An optimizer instance. ",53,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def from_config(cls, config, custom_objects=None): if ""learning_rate"" in config: if isinstance(config[""learning_rate""], dict): config[""learning_rate""] = learning_rate_schedule.deserialize( config[""learning_rate""], custom_objects=custom_objects ) return cls(**config) ``` ###Assistant : Creates an optimizer from its config. This method is the reverse of `get_config`, capable of instantiating the same optimizer from the config dictionary. Args: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional user-defined Python objects needed to recreate this optimizer. Returns: An optimizer instance. " 2102,"def test_generate_pdf_from_mail(self): mail = self.parser.get_parsed(os.path.join(self.SAMPLE_FILES, ""html.eml"")) pdf_path = os.path.join(self.parser.tempdir, ""html.eml.pdf"") with open(pdf_path, ""wb"") as file: file.write(self.parser.generate_pdf_from_mail(mail)) converted = os.path.join( self.parser.tempdir, ""html.eml.pdf.webp"", ) run_convert( density=300, scale=""500x5000>"", alpha=""remove"", strip=True, trim=False, auto_orient=True, input_file=f""{pdf_path}"", # Do net define an index to convert all pages. output_file=converted, logging_group=None, ) self.assertTrue(os.path.isfile(converted)) thumb_hash = self.imagehash(converted) # The created pdf is not reproducible. But the converted image should always look the same. expected_hash = self.imagehash( os.path.join(self.SAMPLE_FILES, ""html.eml.pdf.webp""), ) self.assertEqual( thumb_hash, expected_hash, f""PDF looks different. Check if {converted} looks weird."", ) "," GIVEN: - Fresh start WHEN: - pdf generation from simple eml file is requested THEN: - gotenberg is called and the resulting file is returned and look as expected. ",29,81,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_generate_pdf_from_mail(self): mail = self.parser.get_parsed(os.path.join(self.SAMPLE_FILES, ""html.eml"")) pdf_path = os.path.join(self.parser.tempdir, ""html.eml.pdf"") with open(pdf_path, ""wb"") as file: file.write(self.parser.generate_pdf_from_mail(mail)) converted = os.path.join( self.parser.tempdir, ""html.eml.pdf.webp"", ) run_convert( density=300, scale=""500x5000>"", alpha=""remove"", strip=True, trim=False, auto_orient=True, input_file=f""{pdf_path}"", # Do net define an index to convert all pages. output_file=converted, logging_group=None, ) self.assertTrue(os.path.isfile(converted)) thumb_hash = self.imagehash(converted) # The created pdf is not reproducible. But the converted image should always look the same. expected_hash = self.imagehash( os.path.join(self.SAMPLE_FILES, ""html.eml.pdf.webp""), ) self.assertEqual( thumb_hash, expected_hash, f""PDF looks different. Check if {converted} looks weird."", ) ``` ###Assistant : GIVEN: - Fresh start WHEN: - pdf generation from simple eml file is requested THEN: - gotenberg is called and the resulting file is returned and look as expected. " 2103,"def new(self, degree=0, color=None): (center_x, center_y, angle, inner_radius, outer_radius, outer_color, pointer_color, origin_color, line_width) = self.all pointer_color = color or pointer_color if self.figure != []: for figure in self.figure: self.graph_elem.DeleteFigure(figure) self.figure = [] d = degree - 90 self.all[2] = degree dx1 = int(2 * inner_radius * math.sin(d / 180 * math.pi)) dy1 = int(2 * inner_radius * math.cos(d / 180 * math.pi)) dx2 = int(outer_radius * math.sin(d / 180 * math.pi)) dy2 = int(outer_radius * math.cos(d / 180 * math.pi)) self.figure.append(self.graph_elem.DrawLine((center_x - dx1, center_y - dy1), (center_x + dx2, center_y + dy2), color=pointer_color, width=line_width)) self.figure.append(self.graph_elem.DrawCircle((center_x, center_y), inner_radius, fill_color=origin_color, line_color=outer_color, line_width=line_width)) "," Draw new pointer by angle, erase old pointer if exist degree defined as clockwise from negative x-axis. ",17,100,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def new(self, degree=0, color=None): (center_x, center_y, angle, inner_radius, outer_radius, outer_color, pointer_color, origin_color, line_width) = self.all pointer_color = color or pointer_color if self.figure != []: for figure in self.figure: self.graph_elem.DeleteFigure(figure) self.figure = [] d = degree - 90 self.all[2] = degree dx1 = int(2 * inner_radius * math.sin(d / 180 * math.pi)) dy1 = int(2 * inner_radius * math.cos(d / 180 * math.pi)) dx2 = int(outer_radius * math.sin(d / 180 * math.pi)) dy2 = int(outer_radius * math.cos(d / 180 * math.pi)) self.figure.append(self.graph_elem.DrawLine((center_x - dx1, center_y - dy1), (center_x + dx2, center_y + dy2), color=pointer_color, width=line_width)) self.figure.append(self.graph_elem.DrawCircle((center_x, center_y), inner_radius, fill_color=origin_color, line_color=outer_color, line_width=line_width)) ``` ###Assistant : Draw new pointer by angle, erase old pointer if exist degree defined as clockwise from negative x-axis. " 2104,"def postprocess(paths, images, data_out, score_thresh, label_names, output_dir, handle_id, visualization=True): results = data_out.copy_to_cpu() lod = data_out.lod()[0] check_dir(output_dir) if paths: assert type(paths) is list, ""type(paths) is not list."" if handle_id < len(paths): unhandled_paths = paths[handle_id:] unhandled_paths_num = len(unhandled_paths) else: unhandled_paths_num = 0 if images is not None: if handle_id < len(images): unhandled_paths = None unhandled_paths_num = len(images) - handle_id else: unhandled_paths_num = 0 output = list() for index in range(len(lod) - 1): output_i = {'data': []} if unhandled_paths and index < unhandled_paths_num: org_img_path = unhandled_paths[index] org_img = Image.open(org_img_path) else: org_img = images[index - unhandled_paths_num] org_img = org_img.astype(np.uint8) org_img = Image.fromarray(org_img[:, :, ::-1]) if visualization: org_img_path = get_save_image_name(org_img, output_dir, 'image_numpy_{}'.format((handle_id + index))) org_img.save(org_img_path) org_img_height = org_img.height org_img_width = org_img.width result_i = results[lod[index]:lod[index + 1]] for row in result_i: if len(row) != 6: continue if row[1] < score_thresh: continue category_id = int(row[0]) confidence = row[1] bbox = row[2:] dt = {} dt['label'] = label_names[category_id] dt['confidence'] = float(confidence) dt['left'], dt['top'], dt['right'], dt['bottom'] = clip_bbox(bbox, org_img_width, org_img_height) output_i['data'].append(dt) output.append(output_i) if visualization: output_i['save_path'] = draw_bounding_box_on_image(org_img_path, output_i['data'], output_dir) return output "," postprocess the lod_tensor produced by Executor.run Args: paths (list[str]): The paths of images. images (list(numpy.ndarray)): images data, shape of each is [H, W, C] data_out (lod_tensor): data output of predictor. output_dir (str): The path to store output images. visualization (bool): Whether to save image or not. score_thresh (float): the low limit of bounding box. label_names (list[str]): label names. handle_id (int): The number of images that have been handled. Returns: res (list[dict]): The result of vehicles detecion. keys include 'data', 'save_path', the corresponding value is: data (dict): the result of object detection, keys include 'left', 'top', 'right', 'bottom', 'label', 'confidence', the corresponding value is: left (float): The X coordinate of the upper left corner of the bounding box; top (float): The Y coordinate of the upper left corner of the bounding box; right (float): The X coordinate of the lower right corner of the bounding box; bottom (float): The Y coordinate of the lower right corner of the bounding box; label (str): The label of detection result; confidence (float): The confidence of detection result. save_path (str): The path to save output images. ",181,172,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def postprocess(paths, images, data_out, score_thresh, label_names, output_dir, handle_id, visualization=True): results = data_out.copy_to_cpu() lod = data_out.lod()[0] check_dir(output_dir) if paths: assert type(paths) is list, ""type(paths) is not list."" if handle_id < len(paths): unhandled_paths = paths[handle_id:] unhandled_paths_num = len(unhandled_paths) else: unhandled_paths_num = 0 if images is not None: if handle_id < len(images): unhandled_paths = None unhandled_paths_num = len(images) - handle_id else: unhandled_paths_num = 0 output = list() for index in range(len(lod) - 1): output_i = {'data': []} if unhandled_paths and index < unhandled_paths_num: org_img_path = unhandled_paths[index] org_img = Image.open(org_img_path) else: org_img = images[index - unhandled_paths_num] org_img = org_img.astype(np.uint8) org_img = Image.fromarray(org_img[:, :, ::-1]) if visualization: org_img_path = get_save_image_name(org_img, output_dir, 'image_numpy_{}'.format((handle_id + index))) org_img.save(org_img_path) org_img_height = org_img.height org_img_width = org_img.width result_i = results[lod[index]:lod[index + 1]] for row in result_i: if len(row) != 6: continue if row[1] < score_thresh: continue category_id = int(row[0]) confidence = row[1] bbox = row[2:] dt = {} dt['label'] = label_names[category_id] dt['confidence'] = float(confidence) dt['left'], dt['top'], dt['right'], dt['bottom'] = clip_bbox(bbox, org_img_width, org_img_height) output_i['data'].append(dt) output.append(output_i) if visualization: output_i['save_path'] = draw_bounding_box_on_image(org_img_path, output_i['data'], output_dir) return output ``` ###Assistant : postprocess the lod_tensor produced by Executor.run Args: paths (list[str]): The paths of images. images (list(numpy.ndarray)): images data, shape of each is [H, W, C] data_out (lod_tensor): data output of predictor. output_dir (str): The path to store output images. visualization (bool): Whether to save image or not. score_thresh (float): the low limit of bounding box. label_names (list[str]): label names. handle_id (int): The number of images that have been handled. Returns: res (list[dict]): The result of vehicles detecion. keys include 'data', 'save_path', the corresponding value is: data (dict): the result of object detection, keys include 'left', 'top', 'right', 'bottom', 'label', 'confidence', the corresponding value is: left (float): The X coordinate of the upper left corner of the bounding box; top (float): The Y coordinate of the upper left corner of the bounding box; right (float): The X coordinate of the lower right corner of the bounding box; bottom (float): The Y coordinate of the lower right corner of the bounding box; label (str): The label of detection result; confidence (float): The confidence of detection result. save_path (str): The path to save output images. " 2105,"def series_with_multilevel_index() -> Series: arrays = [ [""bar"", ""bar"", ""baz"", ""baz"", ""qux"", ""qux"", ""foo"", ""foo""], [""one"", ""two"", ""one"", ""two"", ""one"", ""two"", ""one"", ""two""], ] tuples = zip(*arrays) index = MultiIndex.from_tuples(tuples) data = np.random.randn(8) ser = Series(data, index=index) ser[3] = np.NaN return ser _narrow_series = { f""{dtype.__name__}-series"": tm.make_rand_series(name=""a"", dtype=dtype) for dtype in tm.NARROW_NP_DTYPES } _index_or_series_objs = {**indices_dict, **_series, **_narrow_series} @pytest.fixture(params=_index_or_series_objs.keys())"," Fixture with a Series with a 2-level MultiIndex. ",8,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def series_with_multilevel_index() -> Series: arrays = [ [""bar"", ""bar"", ""baz"", ""baz"", ""qux"", ""qux"", ""foo"", ""foo""], [""one"", ""two"", ""one"", ""two"", ""one"", ""two"", ""one"", ""two""], ] tuples = zip(*arrays) index = MultiIndex.from_tuples(tuples) data = np.random.randn(8) ser = Series(data, index=index) ser[3] = np.NaN return ser _narrow_series = { f""{dtype.__name__}-series"": tm.make_rand_series(name=""a"", dtype=dtype) for dtype in tm.NARROW_NP_DTYPES } _index_or_series_objs = {**indices_dict, **_series, **_narrow_series} @pytest.fixture(params=_index_or_series_objs.keys()) ``` ###Assistant : Fixture with a Series with a 2-level MultiIndex. " 2106,"def _async_stop(self) -> None: if self._async_unsub_state_changed: self._async_unsub_state_changed() self._async_unsub_state_changed = None ","Unregister the group from Home Assistant. This method must be run in the event loop. ",15,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _async_stop(self) -> None: if self._async_unsub_state_changed: self._async_unsub_state_changed() self._async_unsub_state_changed = None ``` ###Assistant : Unregister the group from Home Assistant. This method must be run in the event loop. " 2107,"def test_database_connection_url(generate_test_database_connection_url): url = generate_test_database_connection_url if url is None: yield None else: # TODO: https://github.com/PrefectHQ/orion/issues/2045 # Also temporarily override the environment variable, so that child # subprocesses that we spin off are correctly configured as well original_envvar = os.environ.get(""PREFECT_ORION_DATABASE_CONNECTION_URL"") os.environ[""PREFECT_ORION_DATABASE_CONNECTION_URL""] = url with temporary_settings({PREFECT_ORION_DATABASE_CONNECTION_URL: url}): yield url os.environ[""PREFECT_ORION_DATABASE_CONNECTION_URL""] = original_envvar @pytest.fixture(scope=""session"")"," Update the setting for the database connection url to the generated value from `generate_test_database_connection_url` This _must_ be separate from the generation of the test url because async fixtures are run in a separate context from the test suite. ",38,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_database_connection_url(generate_test_database_connection_url): url = generate_test_database_connection_url if url is None: yield None else: # TODO: https://github.com/PrefectHQ/orion/issues/2045 # Also temporarily override the environment variable, so that child # subprocesses that we spin off are correctly configured as well original_envvar = os.environ.get(""PREFECT_ORION_DATABASE_CONNECTION_URL"") os.environ[""PREFECT_ORION_DATABASE_CONNECTION_URL""] = url with temporary_settings({PREFECT_ORION_DATABASE_CONNECTION_URL: url}): yield url os.environ[""PREFECT_ORION_DATABASE_CONNECTION_URL""] = original_envvar @pytest.fixture(scope=""session"") ``` ###Assistant : Update the setting for the database connection url to the generated value from `generate_test_database_connection_url` This _must_ be separate from the generation of the test url because async fixtures are run in a separate context from the test suite. " 2108,"def define_by_run_func(trial) -> Optional[Dict[str, Any]]: # This param is not used in the objective function. activation = trial.suggest_categorical(""activation"", [""relu"", ""tanh""]) trial.suggest_float(""width"", 0, 20) trial.suggest_float(""height"", -100, 100) # Define-by-run allows for conditional search spaces. if activation == ""relu"": trial.suggest_float(""mult"", 1, 2) # Return all constants in a dictionary. return {""steps"": 100} ","Define-by-run function to create the search space. Ensure no actual computation takes place here. That should go into the trainable passed to ``Tuner`` (in this example, that's ``easy_objective``). For more information, see https://optuna.readthedocs.io/en/stable\ /tutorial/10_key_features/002_configurations.html This function should either return None or a dict with constant values. ",46,50,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def define_by_run_func(trial) -> Optional[Dict[str, Any]]: # This param is not used in the objective function. activation = trial.suggest_categorical(""activation"", [""relu"", ""tanh""]) trial.suggest_float(""width"", 0, 20) trial.suggest_float(""height"", -100, 100) # Define-by-run allows for conditional search spaces. if activation == ""relu"": trial.suggest_float(""mult"", 1, 2) # Return all constants in a dictionary. return {""steps"": 100} ``` ###Assistant : Define-by-run function to create the search space. Ensure no actual computation takes place here. That should go into the trainable passed to ``Tuner`` (in this example, that's ``easy_objective``). For more information, see https://optuna.readthedocs.io/en/stable\ /tutorial/10_key_features/002_configurations.html This function should either return None or a dict with constant values. " 2109,"def _compile_to_sklearn(self, expr): sklearn_pipeline_str = generate_pipeline_code( expr_to_tree(expr, self._pset), self.operators ) sklearn_pipeline = eval(sklearn_pipeline_str, self.operators_context) sklearn_pipeline.memory = self._memory if self.random_state: # Fix random state when the operator allows set_param_recursive( sklearn_pipeline.steps, ""random_state"", self.random_state ) return sklearn_pipeline ","Compile a DEAP pipeline into a sklearn pipeline. Parameters ---------- expr: DEAP individual The DEAP pipeline to be compiled Returns ------- sklearn_pipeline: sklearn.pipeline.Pipeline ",23,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _compile_to_sklearn(self, expr): sklearn_pipeline_str = generate_pipeline_code( expr_to_tree(expr, self._pset), self.operators ) sklearn_pipeline = eval(sklearn_pipeline_str, self.operators_context) sklearn_pipeline.memory = self._memory if self.random_state: # Fix random state when the operator allows set_param_recursive( sklearn_pipeline.steps, ""random_state"", self.random_state ) return sklearn_pipeline ``` ###Assistant : Compile a DEAP pipeline into a sklearn pipeline. Parameters ---------- expr: DEAP individual The DEAP pipeline to be compiled Returns ------- sklearn_pipeline: sklearn.pipeline.Pipeline " 2110,"def equals(self, other, failing_expression=False): if self.shape != getattr(other, 'shape', None): return False rv = True for i in range(self.rows): for j in range(self.cols): ans = self[i, j].equals(other[i, j], failing_expression) if ans is False: return False elif ans is not True and rv is True: rv = ans return rv ","Applies ``equals`` to corresponding elements of the matrices, trying to prove that the elements are equivalent, returning True if they are, False if any pair is not, and None (or the first failing expression if failing_expression is True) if it cannot be decided if the expressions are equivalent or not. This is, in general, an expensive operation. Examples ======== >>> from sympy import Matrix >>> from sympy.abc import x >>> A = Matrix([x*(x - 1), 0]) >>> B = Matrix([x**2 - x, 0]) >>> A == B False >>> A.simplify() == B.simplify() True >>> A.equals(B) True >>> A.equals(2) False See Also ======== sympy.core.expr.Expr.equals ",103,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def equals(self, other, failing_expression=False): if self.shape != getattr(other, 'shape', None): return False rv = True for i in range(self.rows): for j in range(self.cols): ans = self[i, j].equals(other[i, j], failing_expression) if ans is False: return False elif ans is not True and rv is True: rv = ans return rv ``` ###Assistant : Applies ``equals`` to corresponding elements of the matrices, trying to prove that the elements are equivalent, returning True if they are, False if any pair is not, and None (or the first failing expression if failing_expression is True) if it cannot be decided if the expressions are equivalent or not. This is, in general, an expensive operation. Examples ======== >>> from sympy import Matrix >>> from sympy.abc import x >>> A = Matrix([x*(x - 1), 0]) >>> B = Matrix([x**2 - x, 0]) >>> A == B False >>> A.simplify() == B.simplify() True >>> A.equals(B) True >>> A.equals(2) False See Also ======== sympy.core.expr.Expr.equals " 2111,"def load(self, loader): loader.add_option( ""block_list"", Sequence[str], [], ) "," Block matching requests and return an empty response with the specified HTTP status. Option syntax is ""/flow-filter/status-code"", where flow-filter describes which requests this rule should be applied to and status-code is the HTTP status code to return for blocked requests. The separator (""/"" in the example) can be any character. Setting a non-standard status code of 444 will close the connection without sending a response. ",65,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load(self, loader): loader.add_option( ""block_list"", Sequence[str], [], ) ``` ###Assistant : Block matching requests and return an empty response with the specified HTTP status. Option syntax is ""/flow-filter/status-code"", where flow-filter describes which requests this rule should be applied to and status-code is the HTTP status code to return for blocked requests. The separator (""/"" in the example) can be any character. Setting a non-standard status code of 444 will close the connection without sending a response. " 2112,"def train(self, tagged_docs): m1 = [self.source_lang_vec.dv[item.tags].flatten() for item in tagged_docs] m2 = [self.target_lang_vec.dv[item.tags].flatten() for item in tagged_docs] self.translation_matrix = np.linalg.lstsq(m2, m1, -1)[0] return self.translation_matrix ","Build the translation matrix to map from the source model's vectors to target model's vectors Parameters ---------- tagged_docs : list of :class:`~gensim.models.doc2vec.TaggedDocument`, Documents that will be used for training, both the source language document vector and target language document vector trained on those tagged documents. Returns ------- numpy.ndarray Translation matrix that maps from the source model's vectors to target model's vectors. ",61,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def train(self, tagged_docs): m1 = [self.source_lang_vec.dv[item.tags].flatten() for item in tagged_docs] m2 = [self.target_lang_vec.dv[item.tags].flatten() for item in tagged_docs] self.translation_matrix = np.linalg.lstsq(m2, m1, -1)[0] return self.translation_matrix ``` ###Assistant : Build the translation matrix to map from the source model's vectors to target model's vectors Parameters ---------- tagged_docs : list of :class:`~gensim.models.doc2vec.TaggedDocument`, Documents that will be used for training, both the source language document vector and target language document vector trained on those tagged documents. Returns ------- numpy.ndarray Translation matrix that maps from the source model's vectors to target model's vectors. " 2113,"def serialize(input, tree=""etree"", encoding=None, **serializer_opts): # XXX: Should we cache this? walker = treewalkers.getTreeWalker(tree) s = HTMLSerializer(**serializer_opts) return s.render(walker(input), encoding) ","Serializes the input token stream using the specified treewalker :arg input: the token stream to serialize :arg tree: the treewalker to use :arg encoding: the encoding to use :arg serializer_opts: any options to pass to the :py:class:`html5lib.serializer.HTMLSerializer` that gets created :returns: the tree serialized as a string Example: >>> from html5lib.html5parser import parse >>> from html5lib.serializer import serialize >>> token_stream = parse('

    Hi!

    ') >>> serialize(token_stream, omit_optional_tags=False) '

    Hi!

    ' ",66,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def serialize(input, tree=""etree"", encoding=None, **serializer_opts): # XXX: Should we cache this? walker = treewalkers.getTreeWalker(tree) s = HTMLSerializer(**serializer_opts) return s.render(walker(input), encoding) ``` ###Assistant : Serializes the input token stream using the specified treewalker :arg input: the token stream to serialize :arg tree: the treewalker to use :arg encoding: the encoding to use :arg serializer_opts: any options to pass to the :py:class:`html5lib.serializer.HTMLSerializer` that gets created :returns: the tree serialized as a string Example: >>> from html5lib.html5parser import parse >>> from html5lib.serializer import serialize >>> token_stream = parse('

    Hi!

    ') >>> serialize(token_stream, omit_optional_tags=False) '

    Hi!

    ' " 2114,"def set_client_cli_parser(parser=None): if not parser: from jina.parsers.base import set_base_parser parser = set_base_parser() from jina.parsers.peapods.runtimes.remote import mixin_client_gateway_parser from jina.parsers.client import ( mixin_client_features_parser, mixin_comm_protocol_parser, ) mixin_client_gateway_parser(parser) mixin_client_features_parser(parser) mixin_comm_protocol_parser(parser) return parser ","Set the parser for the cli client :param parser: an optional existing parser to build upon :return: the parser ",19,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_client_cli_parser(parser=None): if not parser: from jina.parsers.base import set_base_parser parser = set_base_parser() from jina.parsers.peapods.runtimes.remote import mixin_client_gateway_parser from jina.parsers.client import ( mixin_client_features_parser, mixin_comm_protocol_parser, ) mixin_client_gateway_parser(parser) mixin_client_features_parser(parser) mixin_comm_protocol_parser(parser) return parser ``` ###Assistant : Set the parser for the cli client :param parser: an optional existing parser to build upon :return: the parser " 2115,"def send_sale_toggle_notification(info, instance, catalogue): manager = info.context.plugins now = datetime.now(pytz.utc) start_date = instance.start_date end_date = instance.end_date if (start_date and start_date <= now) and (not end_date or not end_date <= now): manager.sale_toggle(instance, catalogue) instance.notification_sent_datetime = now instance.save(update_fields=[""notification_sent_datetime""]) ","Send a notification about starting or ending sale if it hasn't been sent yet. Send the notification when the start date is before the current date and the sale is not already finished. ",33,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def send_sale_toggle_notification(info, instance, catalogue): manager = info.context.plugins now = datetime.now(pytz.utc) start_date = instance.start_date end_date = instance.end_date if (start_date and start_date <= now) and (not end_date or not end_date <= now): manager.sale_toggle(instance, catalogue) instance.notification_sent_datetime = now instance.save(update_fields=[""notification_sent_datetime""]) ``` ###Assistant : Send a notification about starting or ending sale if it hasn't been sent yet. Send the notification when the start date is before the current date and the sale is not already finished. " 2116,"def bcoo_todense(data, indices, *, spinfo): return bcoo_todense_p.bind(jnp.asarray(data), jnp.asarray(indices), spinfo=spinfo) @bcoo_todense_p.def_impl","Convert batched sparse matrix to a dense matrix. Args: data : array of shape ``batch_dims + (nse,) + block_dims``. indices : array of shape ``batch_dims + (n_sparse, nse)`` spinfo : BCOOInfo. In particular, this includes the shape of the matrix, which is equal to ``batch_dims + sparse_dims + block_dims`` where ``len(sparse_dims) == n_sparse`` Returns: mat : array with specified shape and dtype matching ``data`` ",64,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def bcoo_todense(data, indices, *, spinfo): return bcoo_todense_p.bind(jnp.asarray(data), jnp.asarray(indices), spinfo=spinfo) @bcoo_todense_p.def_impl ``` ###Assistant : Convert batched sparse matrix to a dense matrix. Args: data : array of shape ``batch_dims + (nse,) + block_dims``. indices : array of shape ``batch_dims + (n_sparse, nse)`` spinfo : BCOOInfo. In particular, this includes the shape of the matrix, which is equal to ``batch_dims + sparse_dims + block_dims`` where ``len(sparse_dims) == n_sparse`` Returns: mat : array with specified shape and dtype matching ``data`` " 2117,"def ignore_ray_errors(self) -> Iterator[ResultOrError]: return self._Iterator( [r for r in self.result_or_errors if not isinstance(r.get(), RayError)] ) ","Return an iterator over the results, skipping only Ray errors. Similar to ignore_errors, but only skips Errors raised from the Ray framework. This is useful for application that wants to handle errors from user code differently. ",36,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ignore_ray_errors(self) -> Iterator[ResultOrError]: return self._Iterator( [r for r in self.result_or_errors if not isinstance(r.get(), RayError)] ) ``` ###Assistant : Return an iterator over the results, skipping only Ray errors. Similar to ignore_errors, but only skips Errors raised from the Ray framework. This is useful for application that wants to handle errors from user code differently. " 2118,"def certificate_was_accepted(self) -> None: if not self.is_overridable(): return False if self._certificate_accepted is None: raise ValueError(""No decision taken yet"") return self._certificate_accepted @dataclasses.dataclass",Check whether the certificate was accepted by the user.,9,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def certificate_was_accepted(self) -> None: if not self.is_overridable(): return False if self._certificate_accepted is None: raise ValueError(""No decision taken yet"") return self._certificate_accepted @dataclasses.dataclass ``` ###Assistant : Check whether the certificate was accepted by the user. " 2119,"def mask(self, row_indices, col_indices): return ( self.force_materialization() .list_of_partitions_to_combine[0] .mask(row_indices, col_indices) ) "," Create (synchronously) a mask that extracts the indices provided. Parameters ---------- row_indices : list-like, slice or label The row labels for the rows to extract. col_indices : list-like, slice or label The column labels for the columns to extract. Returns ------- PandasOnRayDataframeVirtualPartition A new ``PandasOnRayDataframeVirtualPartition`` object, materialized. ",47,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mask(self, row_indices, col_indices): return ( self.force_materialization() .list_of_partitions_to_combine[0] .mask(row_indices, col_indices) ) ``` ###Assistant : Create (synchronously) a mask that extracts the indices provided. Parameters ---------- row_indices : list-like, slice or label The row labels for the rows to extract. col_indices : list-like, slice or label The column labels for the columns to extract. Returns ------- PandasOnRayDataframeVirtualPartition A new ``PandasOnRayDataframeVirtualPartition`` object, materialized. " 2120,"def left_integral3D(facets, index, expr, vertices, hp_param, degree): value = S.Zero facet = facets[index] x0 = vertices[facet[0]] facet_len = len(facet) for i, fac in enumerate(facet): side = (vertices[fac], vertices[facet[(i + 1) % facet_len]]) value += distance_to_side(x0, side, hp_param[0]) * \ lineseg_integrate(facet, i, side, expr, degree) return value ","Computes the left integral of Eq 10 in Chin et al. Explanation =========== For the 3D case, this is the sum of the integral values over constituting line segments of the face (which is accessed by facets[index]) multiplied by the distance between the first point of facet and that line segment. Parameters ========== facets : List of faces of the 3-Polytope. index : Index of face over which integral is to be calculated. expr : Input polynomial. vertices : List of vertices that constitute the 3-Polytope. hp_param : The hyperplane parameters of the face. degree : Degree of the ``expr``. Examples ======== >>> from sympy.integrals.intpoly import left_integral3D >>> cube = [[(0, 0, 0), (0, 0, 5), (0, 5, 0), (0, 5, 5), (5, 0, 0),\ (5, 0, 5), (5, 5, 0), (5, 5, 5)],\ [2, 6, 7, 3], [3, 7, 5, 1], [7, 6, 4, 5], [1, 5, 4, 0],\ [3, 1, 0, 2], [0, 4, 6, 2]] >>> facets = cube[1:] >>> vertices = cube[0] >>> left_integral3D(facets, 3, 1, vertices, ([0, -1, 0], -5), 0) -50 ",177,46,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def left_integral3D(facets, index, expr, vertices, hp_param, degree): value = S.Zero facet = facets[index] x0 = vertices[facet[0]] facet_len = len(facet) for i, fac in enumerate(facet): side = (vertices[fac], vertices[facet[(i + 1) % facet_len]]) value += distance_to_side(x0, side, hp_param[0]) * \ lineseg_integrate(facet, i, side, expr, degree) return value ``` ###Assistant : Computes the left integral of Eq 10 in Chin et al. Explanation =========== For the 3D case, this is the sum of the integral values over constituting line segments of the face (which is accessed by facets[index]) multiplied by the distance between the first point of facet and that line segment. Parameters ========== facets : List of faces of the 3-Polytope. index : Index of face over which integral is to be calculated. expr : Input polynomial. vertices : List of vertices that constitute the 3-Polytope. hp_param : The hyperplane parameters of the face. degree : Degree of the ``expr``. Examples ======== >>> from sympy.integrals.intpoly import left_integral3D >>> cube = [[(0, 0, 0), (0, 0, 5), (0, 5, 0), (0, 5, 5), (5, 0, 0),\ (5, 0, 5), (5, 5, 0), (5, 5, 5)],\ [2, 6, 7, 3], [3, 7, 5, 1], [7, 6, 4, 5], [1, 5, 4, 0],\ [3, 1, 0, 2], [0, 4, 6, 2]] >>> facets = cube[1:] >>> vertices = cube[0] >>> left_integral3D(facets, 3, 1, vertices, ([0, -1, 0], -5), 0) -50 " 2121,"def xdropout(self, inputs): mask = tf.cast( 1 - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)), tf.bool, ) scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=tf.float32) if self.drop_prob > 0: inputs = tf.where(mask, 0.0, inputs) * scale "," Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob. ",17,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def xdropout(self, inputs): mask = tf.cast( 1 - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)), tf.bool, ) scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=tf.float32) if self.drop_prob > 0: inputs = tf.where(mask, 0.0, inputs) * scale ``` ###Assistant : Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob. " 2122,"def test_deltas_that_melt_dataframes(self): deltas = self._get_deltas_that_melt_dataframes() for delta in deltas: el = delta(DATAFRAME) el._legacy_add_rows(NEW_ROWS) df_proto = _get_data_frame(self.get_delta_from_queue()) # Test that the add_rows delta is properly melted rows = df_proto.data.cols[0].int64s.data self.assertEqual([2, 3, 4, 2, 3, 4], rows) ","Some element types require that their dataframes are 'melted' (https://pandas.pydata.org/docs/reference/api/pandas.melt.html) before being sent to the frontend. Test that the melting occurs. ",21,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_deltas_that_melt_dataframes(self): deltas = self._get_deltas_that_melt_dataframes() for delta in deltas: el = delta(DATAFRAME) el._legacy_add_rows(NEW_ROWS) df_proto = _get_data_frame(self.get_delta_from_queue()) # Test that the add_rows delta is properly melted rows = df_proto.data.cols[0].int64s.data self.assertEqual([2, 3, 4, 2, 3, 4], rows) ``` ###Assistant : Some element types require that their dataframes are 'melted' (https://pandas.pydata.org/docs/reference/api/pandas.melt.html) before being sent to the frontend. Test that the melting occurs. " 2123,"def sensors_temperatures(): ret = collections.defaultdict(list) basenames = glob.glob('/sys/class/hwmon/hwmon*/temp*_*') # CentOS has an intermediate /device directory: # https://github.com/giampaolo/psutil/issues/971 # https://github.com/nicolargo/glances/issues/1060 basenames.extend(glob.glob('/sys/class/hwmon/hwmon*/device/temp*_*')) basenames = sorted(set([x.split('_')[0] for x in basenames])) # Only add the coretemp hwmon entries if they're not already in # /sys/class/hwmon/ # https://github.com/giampaolo/psutil/issues/1708 # https://github.com/giampaolo/psutil/pull/1648 basenames2 = glob.glob( '/sys/devices/platform/coretemp.*/hwmon/hwmon*/temp*_*') repl = re.compile('/sys/devices/platform/coretemp.*/hwmon/') for name in basenames2: altname = repl.sub('/sys/class/hwmon/', name) if altname not in basenames: basenames.append(name) for base in basenames: try: path = base + '_input' current = float(bcat(path)) / 1000.0 path = os.path.join(os.path.dirname(base), 'name') unit_name = cat(path).strip() except (IOError, OSError, ValueError): # A lot of things can go wrong here, so let's just skip the # whole entry. Sure thing is Linux's /sys/class/hwmon really # is a stinky broken mess. # https://github.com/giampaolo/psutil/issues/1009 # https://github.com/giampaolo/psutil/issues/1101 # https://github.com/giampaolo/psutil/issues/1129 # https://github.com/giampaolo/psutil/issues/1245 # https://github.com/giampaolo/psutil/issues/1323 continue high = bcat(base + '_max', fallback=None) critical = bcat(base + '_crit', fallback=None) label = cat(base + '_label', fallback='').strip() if high is not None: try: high = float(high) / 1000.0 except ValueError: high = None if critical is not None: try: critical = float(critical) / 1000.0 except ValueError: critical = None ret[unit_name].append((label, current, high, critical)) # Indication that no sensors were detected in /sys/class/hwmon/ if not basenames: basenames = glob.glob('/sys/class/thermal/thermal_zone*') basenames = sorted(set(basenames)) for base in basenames: try: path = os.path.join(base, 'temp') current = float(bcat(path)) / 1000.0 path = os.path.join(base, 'type') unit_name = cat(path).strip() except (IOError, OSError, ValueError) as err: debug(err) continue trip_paths = glob.glob(base + '/trip_point*') trip_points = set(['_'.join( os.path.basename(p).split('_')[0:3]) for p in trip_paths]) critical = None high = None for trip_point in trip_points: path = os.path.join(base, trip_point + ""_type"") trip_type = cat(path, fallback='').strip() if trip_type == 'critical': critical = bcat(os.path.join(base, trip_point + ""_temp""), fallback=None) elif trip_type == 'high': high = bcat(os.path.join(base, trip_point + ""_temp""), fallback=None) if high is not None: try: high = float(high) / 1000.0 except ValueError: high = None if critical is not None: try: critical = float(critical) / 1000.0 except ValueError: critical = None ret[unit_name].append(('', current, high, critical)) return dict(ret) ","Return hardware (CPU and others) temperatures as a dict including hardware name, label, current, max and critical temperatures. Implementation notes: - /sys/class/hwmon looks like the most recent interface to retrieve this info, and this implementation relies on it only (old distros will probably use something else) - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon - /sys/class/thermal/thermal_zone* is another one but it's more difficult to parse ",65,326,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def sensors_temperatures(): ret = collections.defaultdict(list) basenames = glob.glob('/sys/class/hwmon/hwmon*/temp*_*') # CentOS has an intermediate /device directory: # https://github.com/giampaolo/psutil/issues/971 # https://github.com/nicolargo/glances/issues/1060 basenames.extend(glob.glob('/sys/class/hwmon/hwmon*/device/temp*_*')) basenames = sorted(set([x.split('_')[0] for x in basenames])) # Only add the coretemp hwmon entries if they're not already in # /sys/class/hwmon/ # https://github.com/giampaolo/psutil/issues/1708 # https://github.com/giampaolo/psutil/pull/1648 basenames2 = glob.glob( '/sys/devices/platform/coretemp.*/hwmon/hwmon*/temp*_*') repl = re.compile('/sys/devices/platform/coretemp.*/hwmon/') for name in basenames2: altname = repl.sub('/sys/class/hwmon/', name) if altname not in basenames: basenames.append(name) for base in basenames: try: path = base + '_input' current = float(bcat(path)) / 1000.0 path = os.path.join(os.path.dirname(base), 'name') unit_name = cat(path).strip() except (IOError, OSError, ValueError): # A lot of things can go wrong here, so let's just skip the # whole entry. Sure thing is Linux's /sys/class/hwmon really # is a stinky broken mess. # https://github.com/giampaolo/psutil/issues/1009 # https://github.com/giampaolo/psutil/issues/1101 # https://github.com/giampaolo/psutil/issues/1129 # https://github.com/giampaolo/psutil/issues/1245 # https://github.com/giampaolo/psutil/issues/1323 continue high = bcat(base + '_max', fallback=None) critical = bcat(base + '_crit', fallback=None) label = cat(base + '_label', fallback='').strip() if high is not None: try: high = float(high) / 1000.0 except ValueError: high = None if critical is not None: try: critical = float(critical) / 1000.0 except ValueError: critical = None ret[unit_name].append((label, current, high, critical)) # Indication that no sensors were detected in /sys/class/hwmon/ if not basenames: basenames = glob.glob('/sys/class/thermal/thermal_zone*') basenames = sorted(set(basenames)) for base in basenames: try: path = os.path.join(base, 'temp') current = float(bcat(path)) / 1000.0 path = os.path.join(base, 'type') unit_name = cat(path).strip() except (IOError, OSError, ValueError) as err: debug(err) continue trip_paths = glob.glob(base + '/trip_point*') trip_points = set(['_'.join( os.path.basename(p).split('_')[0:3]) for p in trip_paths]) critical = None high = None for trip_point in trip_points: path = os.path.join(base, trip_point + ""_type"") trip_type = cat(path, fallback='').strip() if trip_type == 'critical': critical = bcat(os.path.join(base, trip_point + ""_temp""), fallback=None) elif trip_type == 'high': high = bcat(os.path.join(base, trip_point + ""_temp""), fallback=None) if high is not None: try: high = float(high) / 1000.0 except ValueError: high = None if critical is not None: try: critical = float(critical) / 1000.0 except ValueError: critical = None ret[unit_name].append(('', current, high, critical)) return dict(ret) ``` ###Assistant : Return hardware (CPU and others) temperatures as a dict including hardware name, label, current, max and critical temperatures. Implementation notes: - /sys/class/hwmon looks like the most recent interface to retrieve this info, and this implementation relies on it only (old distros will probably use something else) - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon - /sys/class/thermal/thermal_zone* is another one but it's more difficult to parse " 2124,"def preview_revision_for_task(request, page_id, task_id): page = get_object_or_404(Page, id=page_id) task = get_object_or_404(Task, id=task_id).specific try: task_state = TaskState.objects.get( page_revision__page=page, task=task, status=TaskState.STATUS_IN_PROGRESS ) except TaskState.DoesNotExist: messages.error( request, _( ""The page '{0}' is not currently awaiting moderation in task '{1}'."" ).format(page.get_admin_display_title(), task.name), ) return redirect(""wagtailadmin_home"") revision = task_state.page_revision if not task.get_actions(page, request.user): raise PermissionDenied page_to_view = revision.as_page_object() # TODO: provide workflow actions within this view return page_to_view.make_preview_request( request, page.default_preview_mode, extra_request_attrs={""revision_id"": revision.id}, ) ","Preview the revision linked to the in-progress TaskState of a specified Task. This enables pages in moderation to be edited and new TaskStates linked to the new revisions created, with preview links remaining valid",34,68,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def preview_revision_for_task(request, page_id, task_id): page = get_object_or_404(Page, id=page_id) task = get_object_or_404(Task, id=task_id).specific try: task_state = TaskState.objects.get( page_revision__page=page, task=task, status=TaskState.STATUS_IN_PROGRESS ) except TaskState.DoesNotExist: messages.error( request, _( ""The page '{0}' is not currently awaiting moderation in task '{1}'."" ).format(page.get_admin_display_title(), task.name), ) return redirect(""wagtailadmin_home"") revision = task_state.page_revision if not task.get_actions(page, request.user): raise PermissionDenied page_to_view = revision.as_page_object() # TODO: provide workflow actions within this view return page_to_view.make_preview_request( request, page.default_preview_mode, extra_request_attrs={""revision_id"": revision.id}, ) ``` ###Assistant : Preview the revision linked to the in-progress TaskState of a specified Task. This enables pages in moderation to be edited and new TaskStates linked to the new revisions created, with preview links remaining valid " 2125,"def push_async_callback(self, callback, /, *args, **kwds): _exit_wrapper = self._create_async_cb_wrapper(callback, *args, **kwds) # We changed the signature, so using @wraps is not appropriate, but # setting __wrapped__ may still help with introspection. _exit_wrapper.__wrapped__ = callback self._push_exit_callback(_exit_wrapper, False) return callback # Allow use as a decorator ","Registers an arbitrary coroutine function and arguments. Cannot suppress exceptions. ",10,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def push_async_callback(self, callback, /, *args, **kwds): _exit_wrapper = self._create_async_cb_wrapper(callback, *args, **kwds) # We changed the signature, so using @wraps is not appropriate, but # setting __wrapped__ may still help with introspection. _exit_wrapper.__wrapped__ = callback self._push_exit_callback(_exit_wrapper, False) return callback # Allow use as a decorator ``` ###Assistant : Registers an arbitrary coroutine function and arguments. Cannot suppress exceptions. " 2126,"def scanString(self, instring, maxMatches=_MAX_INT, overlap=False): if not self.streamlined: self.streamline() for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = _ustr(instring).expandtabs() instrlen = len(instring) loc = 0 preparseFn = self.preParse parseFn = self._parse ParserElement.resetCache() matches = 0 try: while loc <= instrlen and matches < maxMatches: try: preloc = preparseFn(instring, loc) nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) except ParseException: loc = preloc + 1 else: if nextLoc > loc: matches += 1 yield tokens, preloc, nextLoc if overlap: nextloc = preparseFn(instring, loc) if nextloc > loc: loc = nextLoc else: loc += 1 else: loc = nextLoc else: loc = preloc + 1 except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clearing out pyparsing internal stack trace if getattr(exc, '__traceback__', None) is not None: exc.__traceback__ = self._trim_traceback(exc.__traceback__) raise exc "," Scan the input string for expression matches. Each match will return the matching tokens, start location, and end location. May be called with optional ``maxMatches`` argument, to clip scanning after 'n' matches are found. If ``overlap`` is specified, then overlapping matches will be reported. Note that the start and end locations are reported relative to the string being parsed. See :class:`parseString` for more information on parsing strings with embedded tabs. Example:: source = ""sldjf123lsdjjkf345sldkjf879lkjsfd987"" print(source) for tokens, start, end in Word(alphas).scanString(source): print(' '*start + '^'*(end-start)) print(' '*start + tokens[0]) prints:: sldjf123lsdjjkf345sldkjf879lkjsfd987 ^^^^^ sldjf ^^^^^^^ lsdjjkf ^^^^^^ sldkjf ^^^^^^ lkjsfd ",99,135,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def scanString(self, instring, maxMatches=_MAX_INT, overlap=False): if not self.streamlined: self.streamline() for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = _ustr(instring).expandtabs() instrlen = len(instring) loc = 0 preparseFn = self.preParse parseFn = self._parse ParserElement.resetCache() matches = 0 try: while loc <= instrlen and matches < maxMatches: try: preloc = preparseFn(instring, loc) nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) except ParseException: loc = preloc + 1 else: if nextLoc > loc: matches += 1 yield tokens, preloc, nextLoc if overlap: nextloc = preparseFn(instring, loc) if nextloc > loc: loc = nextLoc else: loc += 1 else: loc = nextLoc else: loc = preloc + 1 except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clearing out pyparsing internal stack trace if getattr(exc, '__traceback__', None) is not None: exc.__traceback__ = self._trim_traceback(exc.__traceback__) raise exc ``` ###Assistant : Scan the input string for expression matches. Each match will return the matching tokens, start location, and end location. May be called with optional ``maxMatches`` argument, to clip scanning after 'n' matches are found. If ``overlap`` is specified, then overlapping matches will be reported. Note that the start and end locations are reported relative to the string being parsed. See :class:`parseString` for more information on parsing strings with embedded tabs. Example:: source = ""sldjf123lsdjjkf345sldkjf879lkjsfd987"" print(source) for tokens, start, end in Word(alphas).scanString(source): print(' '*start + '^'*(end-start)) print(' '*start + tokens[0]) prints:: sldjf123lsdjjkf345sldkjf879lkjsfd987 ^^^^^ sldjf ^^^^^^^ lsdjjkf ^^^^^^ sldkjf ^^^^^^ lkjsfd " 2127,"def add_to_apply_calls(self, func, *args, length=None, width=None, **kwargs): return PandasOnDaskDataframePartition( self._data, call_queue=self.call_queue + [[func, args, kwargs]], length=length, width=width, ) "," Add a function to the call queue. Parameters ---------- func : callable Function to be added to the call queue. *args : iterable Additional positional arguments to be passed in `func`. length : distributed.Future or int, optional Length, or reference to length, of wrapped ``pandas.DataFrame``. width : distributed.Future or int, optional Width, or reference to width, of wrapped ``pandas.DataFrame``. **kwargs : dict Additional keyword arguments to be passed in `func`. Returns ------- PandasOnDaskDataframePartition A new ``PandasOnDaskDataframePartition`` object. Notes ----- The keyword arguments are sent as a dictionary. ",87,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add_to_apply_calls(self, func, *args, length=None, width=None, **kwargs): return PandasOnDaskDataframePartition( self._data, call_queue=self.call_queue + [[func, args, kwargs]], length=length, width=width, ) ``` ###Assistant : Add a function to the call queue. Parameters ---------- func : callable Function to be added to the call queue. *args : iterable Additional positional arguments to be passed in `func`. length : distributed.Future or int, optional Length, or reference to length, of wrapped ``pandas.DataFrame``. width : distributed.Future or int, optional Width, or reference to width, of wrapped ``pandas.DataFrame``. **kwargs : dict Additional keyword arguments to be passed in `func`. Returns ------- PandasOnDaskDataframePartition A new ``PandasOnDaskDataframePartition`` object. Notes ----- The keyword arguments are sent as a dictionary. " 2128,"def to_json_string(self) -> str: dictionary = self.to_dict() for key, value in dictionary.items(): if isinstance(value, np.ndarray): dictionary[key] = value.tolist() # make sure private name ""_processor_class"" is correctly # saved as ""processor_class"" _processor_class = dictionary.pop(""_processor_class"", None) if _processor_class is not None: dictionary[""processor_class""] = _processor_class return json.dumps(dictionary, indent=2, sort_keys=True) + ""\n"" "," Serializes this instance to a JSON string. Returns: `str`: String containing all the attributes that make up this feature_extractor instance in JSON format. ",23,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def to_json_string(self) -> str: dictionary = self.to_dict() for key, value in dictionary.items(): if isinstance(value, np.ndarray): dictionary[key] = value.tolist() # make sure private name ""_processor_class"" is correctly # saved as ""processor_class"" _processor_class = dictionary.pop(""_processor_class"", None) if _processor_class is not None: dictionary[""processor_class""] = _processor_class return json.dumps(dictionary, indent=2, sort_keys=True) + ""\n"" ``` ###Assistant : Serializes this instance to a JSON string. Returns: `str`: String containing all the attributes that make up this feature_extractor instance in JSON format. " 2129,"def one_of_permissions_or_auth_filter_required(context, permissions): if not permissions: return True authorization_filters = [ p for p in permissions if isinstance(p, AuthorizationFilters) ] permissions = [p for p in permissions if not isinstance(p, AuthorizationFilters)] granted_by_permissions = False granted_by_authorization_filters = False # TODO: move this function from graphql to core from saleor.graphql.utils import get_user_or_app_from_context is_app = bool(getattr(context, ""app"", None)) requestor = get_user_or_app_from_context(context) if permissions: perm_checks_results = [] for permission in permissions: if is_app and permission == AccountPermissions.MANAGE_STAFF: # `MANAGE_STAFF` permission for apps is not supported, as apps using it # could create a staff user with full access. perm_checks_results.append(False) else: perm_checks_results.append(requestor.has_perm(permission)) granted_by_permissions = any(perm_checks_results) if authorization_filters: auth_filters_results = [] for p in authorization_filters: perm_fn = resolve_authorization_filter_fn(p) if perm_fn: res = perm_fn(context) auth_filters_results.append(bool(res)) granted_by_authorization_filters = any(auth_filters_results) return granted_by_permissions or granted_by_authorization_filters ","Determine whether user or app has rights to perform an action. The `context` parameter is the Context instance associated with the request. ",22,125,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def one_of_permissions_or_auth_filter_required(context, permissions): if not permissions: return True authorization_filters = [ p for p in permissions if isinstance(p, AuthorizationFilters) ] permissions = [p for p in permissions if not isinstance(p, AuthorizationFilters)] granted_by_permissions = False granted_by_authorization_filters = False # TODO: move this function from graphql to core from saleor.graphql.utils import get_user_or_app_from_context is_app = bool(getattr(context, ""app"", None)) requestor = get_user_or_app_from_context(context) if permissions: perm_checks_results = [] for permission in permissions: if is_app and permission == AccountPermissions.MANAGE_STAFF: # `MANAGE_STAFF` permission for apps is not supported, as apps using it # could create a staff user with full access. perm_checks_results.append(False) else: perm_checks_results.append(requestor.has_perm(permission)) granted_by_permissions = any(perm_checks_results) if authorization_filters: auth_filters_results = [] for p in authorization_filters: perm_fn = resolve_authorization_filter_fn(p) if perm_fn: res = perm_fn(context) auth_filters_results.append(bool(res)) granted_by_authorization_filters = any(auth_filters_results) return granted_by_permissions or granted_by_authorization_filters ``` ###Assistant : Determine whether user or app has rights to perform an action. The `context` parameter is the Context instance associated with the request. " 2130,"def _dirmatch(path, matchwith): matchlen = len(matchwith) if (path.startswith(matchwith) and path[matchlen:matchlen + 1] in [os.sep, '']): return True return False ","Check if path is within matchwith's tree. >>> _dirmatch('/home/foo/bar', '/home/foo/bar') True >>> _dirmatch('/home/foo/bar/', '/home/foo/bar') True >>> _dirmatch('/home/foo/bar/etc', '/home/foo/bar') True >>> _dirmatch('/home/foo/bar2', '/home/foo/bar') False >>> _dirmatch('/home/foo/bar2/etc', '/home/foo/bar') False ",27,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _dirmatch(path, matchwith): matchlen = len(matchwith) if (path.startswith(matchwith) and path[matchlen:matchlen + 1] in [os.sep, '']): return True return False ``` ###Assistant : Check if path is within matchwith's tree. >>> _dirmatch('/home/foo/bar', '/home/foo/bar') True >>> _dirmatch('/home/foo/bar/', '/home/foo/bar') True >>> _dirmatch('/home/foo/bar/etc', '/home/foo/bar') True >>> _dirmatch('/home/foo/bar2', '/home/foo/bar') False >>> _dirmatch('/home/foo/bar2/etc', '/home/foo/bar') False " 2131,"def deform_sampling(self, feat, offset): # it is an equivalent implementation of bilinear interpolation b, c, h, w = feat.shape weight = feat.new_ones(c, 1, 1, 1) y = deform_conv2d(feat, offset, weight, 1, 0, 1, c, c) return y ","Sampling the feature x according to offset. Args: feat (Tensor): Feature offset (Tensor): Spatial offset for feature sampling ",18,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def deform_sampling(self, feat, offset): # it is an equivalent implementation of bilinear interpolation b, c, h, w = feat.shape weight = feat.new_ones(c, 1, 1, 1) y = deform_conv2d(feat, offset, weight, 1, 0, 1, c, c) return y ``` ###Assistant : Sampling the feature x according to offset. Args: feat (Tensor): Feature offset (Tensor): Spatial offset for feature sampling " 2132,"def page_identity(self, response, request_json=None): request_path = response.request.path_url if request_path == '/migrations_notran/': raise exc.IsMigrating('You have been redirected to the migration-in-progress page.') request_method = response.request.method.lower() self.last_elapsed = response.elapsed if isinstance(request_json, dict) and 'ds' in request_json: ds = request_json.ds else: ds = None data = self.extract_data(response) exc_str = ""%s (%s) received"" % (http.responses[response.status_code], response.status_code) exception = exception_from_status_code(response.status_code) if exception: raise exception(exc_str, data) if response.status_code in (http.OK, http.CREATED, http.ACCEPTED): # Not all JSON responses include a URL. Grab it from the request # object, if needed. if 'url' in data: endpoint = data['url'] else: endpoint = request_path data = objectify_response_json(response) if request_method in ('get', 'patch', 'put'): # Update existing resource and return it if are_same_endpoint(self.endpoint, request_path): self.json = data self.r = response return self registered_type = get_registered_page(request_path, request_method) return registered_type(self.connection, endpoint=endpoint, json=data, last_elapsed=response.elapsed, r=response, ds=ds) elif response.status_code == http.FORBIDDEN: if is_license_invalid(response): raise exc.LicenseInvalid(exc_str, data) elif is_license_exceeded(response): raise exc.LicenseExceeded(exc_str, data) else: raise exc.Forbidden(exc_str, data) elif response.status_code == http.BAD_REQUEST: if is_license_invalid(response): raise exc.LicenseInvalid(exc_str, data) if is_duplicate_error(response): raise exc.Duplicate(exc_str, data) else: raise exc.BadRequest(exc_str, data) else: raise exc.Unknown(exc_str, data) ","Takes a `requests.Response` and returns a new __item_class__ instance if the request method is not a get, or returns a __class__ instance if the request path is different than the caller's `endpoint`. ",32,171,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def page_identity(self, response, request_json=None): request_path = response.request.path_url if request_path == '/migrations_notran/': raise exc.IsMigrating('You have been redirected to the migration-in-progress page.') request_method = response.request.method.lower() self.last_elapsed = response.elapsed if isinstance(request_json, dict) and 'ds' in request_json: ds = request_json.ds else: ds = None data = self.extract_data(response) exc_str = ""%s (%s) received"" % (http.responses[response.status_code], response.status_code) exception = exception_from_status_code(response.status_code) if exception: raise exception(exc_str, data) if response.status_code in (http.OK, http.CREATED, http.ACCEPTED): # Not all JSON responses include a URL. Grab it from the request # object, if needed. if 'url' in data: endpoint = data['url'] else: endpoint = request_path data = objectify_response_json(response) if request_method in ('get', 'patch', 'put'): # Update existing resource and return it if are_same_endpoint(self.endpoint, request_path): self.json = data self.r = response return self registered_type = get_registered_page(request_path, request_method) return registered_type(self.connection, endpoint=endpoint, json=data, last_elapsed=response.elapsed, r=response, ds=ds) elif response.status_code == http.FORBIDDEN: if is_license_invalid(response): raise exc.LicenseInvalid(exc_str, data) elif is_license_exceeded(response): raise exc.LicenseExceeded(exc_str, data) else: raise exc.Forbidden(exc_str, data) elif response.status_code == http.BAD_REQUEST: if is_license_invalid(response): raise exc.LicenseInvalid(exc_str, data) if is_duplicate_error(response): raise exc.Duplicate(exc_str, data) else: raise exc.BadRequest(exc_str, data) else: raise exc.Unknown(exc_str, data) ``` ###Assistant : Takes a `requests.Response` and returns a new __item_class__ instance if the request method is not a get, or returns a __class__ instance if the request path is different than the caller's `endpoint`. " 2133,"def _installed_conda(self): if not self._is_conda: return None with Popen(""conda list"", shell=True, stdout=PIPE, stderr=PIPE) as conda: stdout, stderr = conda.communicate() if stderr: return ""Could not get package list"" installed = stdout.decode(self._encoding, errors=""replace"").splitlines() return ""\n"".join(installed) ", str: The list of installed Conda packages within Faceswap's scope. ,10,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _installed_conda(self): if not self._is_conda: return None with Popen(""conda list"", shell=True, stdout=PIPE, stderr=PIPE) as conda: stdout, stderr = conda.communicate() if stderr: return ""Could not get package list"" installed = stdout.decode(self._encoding, errors=""replace"").splitlines() return ""\n"".join(installed) ``` ###Assistant : str: The list of installed Conda packages within Faceswap's scope. " 2134,"def embedding(self, input_seq): seq_embeddings = self.item_embedding_layer(input_seq) seq_embeddings = seq_embeddings * (self.embedding_dim ** 0.5) # FIXME positional_seq = tf.expand_dims(tf.range(tf.shape(input_seq)[1]), 0) positional_seq = tf.tile(positional_seq, [tf.shape(input_seq)[0], 1]) positional_embeddings = self.positional_embedding_layer(positional_seq) return seq_embeddings, positional_embeddings ","Compute the sequence and positional embeddings. Args: input_seq (tf.Tensor): Input sequence Returns: tf.Tensor, tf.Tensor: - Sequence embeddings. - Positional embeddings. ",20,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def embedding(self, input_seq): seq_embeddings = self.item_embedding_layer(input_seq) seq_embeddings = seq_embeddings * (self.embedding_dim ** 0.5) # FIXME positional_seq = tf.expand_dims(tf.range(tf.shape(input_seq)[1]), 0) positional_seq = tf.tile(positional_seq, [tf.shape(input_seq)[0], 1]) positional_embeddings = self.positional_embedding_layer(positional_seq) return seq_embeddings, positional_embeddings ``` ###Assistant : Compute the sequence and positional embeddings. Args: input_seq (tf.Tensor): Input sequence Returns: tf.Tensor, tf.Tensor: - Sequence embeddings. - Positional embeddings. " 2135,"def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False, newline=True): nocolor = msg if not log_only: has_newline = msg.endswith(u'\n') if has_newline: msg2 = msg[:-1] else: msg2 = msg if color: msg2 = stringc(msg2, color) if has_newline or newline: msg2 = msg2 + u'\n' msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr)) # Convert back to text string # We first convert to a byte string so that we get rid of # characters that are invalid in the user's locale msg2 = to_text(msg2, self._output_encoding(stderr=stderr), errors='replace') # Note: After Display() class is refactored need to update the log capture # code in 'bin/ansible-connection' (and other relevant places). if not stderr: fileobj = sys.stdout else: fileobj = sys.stderr fileobj.write(msg2) try: fileobj.flush() except IOError as e: # Ignore EPIPE in case fileobj has been prematurely closed, eg. # when piping to ""head -n1"" if e.errno != errno.EPIPE: raise if logger and not screen_only: # We first convert to a byte string so that we get rid of # color and characters that are invalid in the user's locale msg2 = to_bytes(nocolor.lstrip(u'\n')) # Convert back to text string msg2 = to_text(msg2, self._output_encoding(stderr=stderr)) lvl = logging.INFO if color: # set logger level based on color (not great) try: lvl = color_to_log_level[color] except KeyError: # this should not happen, but JIC raise AnsibleAssertionError('Invalid color supplied to display: %s' % color) # actually log logger.log(lvl, msg2) "," Display a message to the user Note: msg *must* be a unicode string to prevent UnicodeError tracebacks. ",17,223,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False, newline=True): nocolor = msg if not log_only: has_newline = msg.endswith(u'\n') if has_newline: msg2 = msg[:-1] else: msg2 = msg if color: msg2 = stringc(msg2, color) if has_newline or newline: msg2 = msg2 + u'\n' msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr)) # Convert back to text string # We first convert to a byte string so that we get rid of # characters that are invalid in the user's locale msg2 = to_text(msg2, self._output_encoding(stderr=stderr), errors='replace') # Note: After Display() class is refactored need to update the log capture # code in 'bin/ansible-connection' (and other relevant places). if not stderr: fileobj = sys.stdout else: fileobj = sys.stderr fileobj.write(msg2) try: fileobj.flush() except IOError as e: # Ignore EPIPE in case fileobj has been prematurely closed, eg. # when piping to ""head -n1"" if e.errno != errno.EPIPE: raise if logger and not screen_only: # We first convert to a byte string so that we get rid of # color and characters that are invalid in the user's locale msg2 = to_bytes(nocolor.lstrip(u'\n')) # Convert back to text string msg2 = to_text(msg2, self._output_encoding(stderr=stderr)) lvl = logging.INFO if color: # set logger level based on color (not great) try: lvl = color_to_log_level[color] except KeyError: # this should not happen, but JIC raise AnsibleAssertionError('Invalid color supplied to display: %s' % color) # actually log logger.log(lvl, msg2) ``` ###Assistant : Display a message to the user Note: msg *must* be a unicode string to prevent UnicodeError tracebacks. " 2136,"def get_queryset(self, request): queryset = SavedFilter.objects.all() user = request.user if user.is_superuser: return queryset if user.is_anonymous: return queryset.filter(shared=True) return queryset.filter( Q(shared=True) | Q(user=user) ) "," Return only shared SavedFilters, or those owned by the current user, unless this is a superuser. ",16,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_queryset(self, request): queryset = SavedFilter.objects.all() user = request.user if user.is_superuser: return queryset if user.is_anonymous: return queryset.filter(shared=True) return queryset.filter( Q(shared=True) | Q(user=user) ) ``` ###Assistant : Return only shared SavedFilters, or those owned by the current user, unless this is a superuser. " 2137,"def list(self, request): report_list = [] report_content_type = ContentType.objects.get(app_label='extras', model='report') results = { r.name: r for r in JobResult.objects.filter( obj_type=report_content_type, status__in=JobResultStatusChoices.TERMINAL_STATE_CHOICES ).order_by('name', '-created').distinct('name').defer('data') } # Iterate through all available Reports. for module_name, reports in get_reports(): for report in reports: # Attach the relevant JobResult (if any) to each Report. report.result = results.get(report.full_name, None) report_list.append(report) serializer = serializers.ReportSerializer(report_list, many=True, context={ 'request': request, }) return Response(serializer.data) "," Compile all reports and their related results (if any). Result data is deferred in the list view. ",17,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def list(self, request): report_list = [] report_content_type = ContentType.objects.get(app_label='extras', model='report') results = { r.name: r for r in JobResult.objects.filter( obj_type=report_content_type, status__in=JobResultStatusChoices.TERMINAL_STATE_CHOICES ).order_by('name', '-created').distinct('name').defer('data') } # Iterate through all available Reports. for module_name, reports in get_reports(): for report in reports: # Attach the relevant JobResult (if any) to each Report. report.result = results.get(report.full_name, None) report_list.append(report) serializer = serializers.ReportSerializer(report_list, many=True, context={ 'request': request, }) return Response(serializer.data) ``` ###Assistant : Compile all reports and their related results (if any). Result data is deferred in the list view. " 2138,"def _usable(self, queryset, name, value): user = self.request.user if self.request else None if not user or user.is_anonymous: if value: return queryset.filter(enabled=True, shared=True) return queryset.filter(Q(enabled=False) | Q(shared=False)) if value: return queryset.filter(enabled=True).filter(Q(shared=True) | Q(user=user)) return queryset.filter(Q(enabled=False) | Q(Q(shared=False) & ~Q(user=user))) "," Return only SavedFilters that are both enabled and are shared (or belong to the current user). ",16,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _usable(self, queryset, name, value): user = self.request.user if self.request else None if not user or user.is_anonymous: if value: return queryset.filter(enabled=True, shared=True) return queryset.filter(Q(enabled=False) | Q(shared=False)) if value: return queryset.filter(enabled=True).filter(Q(shared=True) | Q(user=user)) return queryset.filter(Q(enabled=False) | Q(Q(shared=False) & ~Q(user=user))) ``` ###Assistant : Return only SavedFilters that are both enabled and are shared (or belong to the current user). " 2139,"def get_current_enrollment(student, academic_year=None): current_academic_year = academic_year or frappe.defaults.get_defaults().academic_year program_enrollment_list = frappe.db.sql( , (student, current_academic_year), as_dict=1, ) if program_enrollment_list: return program_enrollment_list[0] else: return None "," select name as program_enrollment, student_name, program, student_batch_name as student_batch, student_category, academic_term, academic_year from `tabProgram Enrollment` where student = %s and academic_year = %s order by creation",26,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_current_enrollment(student, academic_year=None): current_academic_year = academic_year or frappe.defaults.get_defaults().academic_year program_enrollment_list = frappe.db.sql( , (student, current_academic_year), as_dict=1, ) if program_enrollment_list: return program_enrollment_list[0] else: return None ``` ###Assistant : select name as program_enrollment, student_name, program, student_batch_name as student_batch, student_category, academic_term, academic_year from `tabProgram Enrollment` where student = %s and academic_year = %s order by creation " 2140,"def inception_resnet_block(x, scale, block_type, block_idx, activation=""relu""): if block_type == ""block35"": branch_0 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(branch_1, 32, 3) branch_2 = conv2d_bn(x, 32, 1) branch_2 = conv2d_bn(branch_2, 48, 3) branch_2 = conv2d_bn(branch_2, 64, 3) branches = [branch_0, branch_1, branch_2] elif block_type == ""block17"": branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 128, 1) branch_1 = conv2d_bn(branch_1, 160, [1, 7]) branch_1 = conv2d_bn(branch_1, 192, [7, 1]) branches = [branch_0, branch_1] elif block_type == ""block8"": branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(branch_1, 224, [1, 3]) branch_1 = conv2d_bn(branch_1, 256, [3, 1]) branches = [branch_0, branch_1] else: raise ValueError( ""Unknown Inception-ResNet block type. "" 'Expects ""block35"", ""block17"" or ""block8"", ' ""but got: "" + str(block_type) ) block_name = block_type + ""_"" + str(block_idx) channel_axis = 1 if backend.image_data_format() == ""channels_first"" else 3 mixed = layers.Concatenate(axis=channel_axis, name=block_name + ""_mixed"")( branches ) up = conv2d_bn( mixed, backend.int_shape(x)[channel_axis], 1, activation=None, use_bias=True, name=block_name + ""_conv"", ) x = CustomScaleLayer()(x, up, scale) if activation is not None: x = layers.Activation(activation, name=block_name + ""_ac"")(x) return x @keras_export(""keras.applications.inception_resnet_v2.preprocess_input"")","Adds an Inception-ResNet block. This function builds 3 types of Inception-ResNet blocks mentioned in the paper, controlled by the `block_type` argument (which is the block name used in the official TF-slim implementation): - Inception-ResNet-A: `block_type='block35'` - Inception-ResNet-B: `block_type='block17'` - Inception-ResNet-C: `block_type='block8'` Args: x: input tensor. scale: scaling factor to scale the residuals (i.e., the output of passing `x` through an inception module) before adding them to the shortcut branch. Let `r` be the output from the residual branch, the output of this block will be `x + scale * r`. block_type: `'block35'`, `'block17'` or `'block8'`, determines the network structure in the residual branch. block_idx: an `int` used for generating layer names. The Inception-ResNet blocks are repeated many times in this network. We use `block_idx` to identify each of the repetitions. For example, the first Inception-ResNet-A block will have `block_type='block35', block_idx=0`, and the layer names will have a common prefix `'block35_0'`. activation: activation function to use at the end of the block (see [activations](../activations.md)). When `activation=None`, no activation is applied (i.e., ""linear"" activation: `a(x) = x`). Returns: Output tensor for the block. Raises: ValueError: if `block_type` is not one of `'block35'`, `'block17'` or `'block8'`. ",193,180,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def inception_resnet_block(x, scale, block_type, block_idx, activation=""relu""): if block_type == ""block35"": branch_0 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(branch_1, 32, 3) branch_2 = conv2d_bn(x, 32, 1) branch_2 = conv2d_bn(branch_2, 48, 3) branch_2 = conv2d_bn(branch_2, 64, 3) branches = [branch_0, branch_1, branch_2] elif block_type == ""block17"": branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 128, 1) branch_1 = conv2d_bn(branch_1, 160, [1, 7]) branch_1 = conv2d_bn(branch_1, 192, [7, 1]) branches = [branch_0, branch_1] elif block_type == ""block8"": branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(branch_1, 224, [1, 3]) branch_1 = conv2d_bn(branch_1, 256, [3, 1]) branches = [branch_0, branch_1] else: raise ValueError( ""Unknown Inception-ResNet block type. "" 'Expects ""block35"", ""block17"" or ""block8"", ' ""but got: "" + str(block_type) ) block_name = block_type + ""_"" + str(block_idx) channel_axis = 1 if backend.image_data_format() == ""channels_first"" else 3 mixed = layers.Concatenate(axis=channel_axis, name=block_name + ""_mixed"")( branches ) up = conv2d_bn( mixed, backend.int_shape(x)[channel_axis], 1, activation=None, use_bias=True, name=block_name + ""_conv"", ) x = CustomScaleLayer()(x, up, scale) if activation is not None: x = layers.Activation(activation, name=block_name + ""_ac"")(x) return x @keras_export(""keras.applications.inception_resnet_v2.preprocess_input"") ``` ###Assistant : Adds an Inception-ResNet block. This function builds 3 types of Inception-ResNet blocks mentioned in the paper, controlled by the `block_type` argument (which is the block name used in the official TF-slim implementation): - Inception-ResNet-A: `block_type='block35'` - Inception-ResNet-B: `block_type='block17'` - Inception-ResNet-C: `block_type='block8'` Args: x: input tensor. scale: scaling factor to scale the residuals (i.e., the output of passing `x` through an inception module) before adding them to the shortcut branch. Let `r` be the output from the residual branch, the output of this block will be `x + scale * r`. block_type: `'block35'`, `'block17'` or `'block8'`, determines the network structure in the residual branch. block_idx: an `int` used for generating layer names. The Inception-ResNet blocks are repeated many times in this network. We use `block_idx` to identify each of the repetitions. For example, the first Inception-ResNet-A block will have `block_type='block35', block_idx=0`, and the layer names will have a common prefix `'block35_0'`. activation: activation function to use at the end of the block (see [activations](../activations.md)). When `activation=None`, no activation is applied (i.e., ""linear"" activation: `a(x) = x`). Returns: Output tensor for the block. Raises: ValueError: if `block_type` is not one of `'block35'`, `'block17'` or `'block8'`. " 2141,"def _get_calculations(self): for selection in self._selections: if selection == ""raw"": continue logger.debug(""Calculating: %s"", selection) method = getattr(self, f""_calc_{selection}"") raw_keys = [key for key in self._stats if key.startswith(""raw_"")] for key in raw_keys: selected_key = f""{selection}_{key.replace('raw_', '')}"" self._stats[selected_key] = method(self._stats[key]) ", Perform the required calculations and populate :attr:`stats`. ,7,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_calculations(self): for selection in self._selections: if selection == ""raw"": continue logger.debug(""Calculating: %s"", selection) method = getattr(self, f""_calc_{selection}"") raw_keys = [key for key in self._stats if key.startswith(""raw_"")] for key in raw_keys: selected_key = f""{selection}_{key.replace('raw_', '')}"" self._stats[selected_key] = method(self._stats[key]) ``` ###Assistant : Perform the required calculations and populate :attr:`stats`. " 2142,"def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type=""dpm_solver""): if solver_type not in ['dpm_solver', 'taylor']: raise ValueError(""'solver_type' must be either 'dpm_solver' or 'taylor', got {}"".format(solver_type)) ns = self.noise_schedule dims = x.dim() model_prev_1, model_prev_0 = model_prev_list t_prev_1, t_prev_0 = t_prev_list lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda( t_prev_0), ns.marginal_lambda(t) log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) alpha_t = torch.exp(log_alpha_t) h_0 = lambda_prev_0 - lambda_prev_1 h = lambda_t - lambda_prev_0 r0 = h_0 / h D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) if self.predict_x0: if solver_type == 'dpm_solver': x_t = ( expand_dims(sigma_t / sigma_prev_0, dims) * x - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0 ) elif solver_type == 'taylor': x_t = ( expand_dims(sigma_t / sigma_prev_0, dims) * x - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0 ) else: if solver_type == 'dpm_solver': x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0 ) elif solver_type == 'taylor': x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0 ) return x_t "," Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`. Args: x: A pytorch tensor. The initial value at time `s`. model_prev_list: A list of pytorch tensor. The previous computed model values. t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) t: A pytorch tensor. The ending time, with the shape (x.shape[0],). solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. The type slightly impacts the performance. We recommend to use 'dpm_solver' type. Returns: x_t: A pytorch tensor. The approximated solution at time `t`. ",91,228,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type=""dpm_solver""): if solver_type not in ['dpm_solver', 'taylor']: raise ValueError(""'solver_type' must be either 'dpm_solver' or 'taylor', got {}"".format(solver_type)) ns = self.noise_schedule dims = x.dim() model_prev_1, model_prev_0 = model_prev_list t_prev_1, t_prev_0 = t_prev_list lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda( t_prev_0), ns.marginal_lambda(t) log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) alpha_t = torch.exp(log_alpha_t) h_0 = lambda_prev_0 - lambda_prev_1 h = lambda_t - lambda_prev_0 r0 = h_0 / h D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) if self.predict_x0: if solver_type == 'dpm_solver': x_t = ( expand_dims(sigma_t / sigma_prev_0, dims) * x - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0 ) elif solver_type == 'taylor': x_t = ( expand_dims(sigma_t / sigma_prev_0, dims) * x - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0 ) else: if solver_type == 'dpm_solver': x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0 ) elif solver_type == 'taylor': x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0 ) return x_t ``` ###Assistant : Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`. Args: x: A pytorch tensor. The initial value at time `s`. model_prev_list: A list of pytorch tensor. The previous computed model values. t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) t: A pytorch tensor. The ending time, with the shape (x.shape[0],). solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. The type slightly impacts the performance. We recommend to use 'dpm_solver' type. Returns: x_t: A pytorch tensor. The approximated solution at time `t`. " 2143,"def global_enum(cls, update_str=False): if issubclass(cls, Flag): cls.__repr__ = global_flag_repr else: cls.__repr__ = global_enum_repr if not issubclass(cls, ReprEnum) or update_str: cls.__str__ = global_str sys.modules[cls.__module__].__dict__.update(cls.__members__) return cls "," decorator that makes the repr() of an enum member reference its module instead of its class; also exports all members to the enum's module's global namespace ",26,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def global_enum(cls, update_str=False): if issubclass(cls, Flag): cls.__repr__ = global_flag_repr else: cls.__repr__ = global_enum_repr if not issubclass(cls, ReprEnum) or update_str: cls.__str__ = global_str sys.modules[cls.__module__].__dict__.update(cls.__members__) return cls ``` ###Assistant : decorator that makes the repr() of an enum member reference its module instead of its class; also exports all members to the enum's module's global namespace " 2144,"def create_training_target(self, target, run_eagerly=False): if self.has_training_target(): raise ValueError( ""The training_target field for the _TrainingEndpoint "" ""instance has already been populated"" ) if run_eagerly: # When run_eagerly, the target tensor is ignored, and the None placeholder # is created instead. self.training_target = _TrainingTarget( None, feedable=True, skip_target_weights=False ) return if self.should_skip_target(): self.training_target = _TrainingTarget(None) else: if target is not None and not backend.is_placeholder(target): feedable = False skip_target_weights = True else: feedable = True skip_target_weights = False if target is None: target_dtype = losses.LABEL_DTYPES_FOR_LOSSES.get( self.loss_fn, backend.dtype(self.output) ) target = backend.placeholder( ndim=len(self.shape), name=self.output_name + ""_target"", sparse=backend.is_sparse(self.output), dtype=target_dtype, ) self.training_target = _TrainingTarget( target, feedable=feedable, skip_target_weights=skip_target_weights, ) ","Create training_target instance and update the self.training_target. Note that the input target should just be a tensor or None, and corresponding training target will be created based on the output and loss_fn. Args: target: the target tensor for the current output. Could be None. run_eagerly: boolean, whether the model is in run_eagerly mode. Raises: ValueError if the training_target field for the current instance has already been populated. ",67,101,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def create_training_target(self, target, run_eagerly=False): if self.has_training_target(): raise ValueError( ""The training_target field for the _TrainingEndpoint "" ""instance has already been populated"" ) if run_eagerly: # When run_eagerly, the target tensor is ignored, and the None placeholder # is created instead. self.training_target = _TrainingTarget( None, feedable=True, skip_target_weights=False ) return if self.should_skip_target(): self.training_target = _TrainingTarget(None) else: if target is not None and not backend.is_placeholder(target): feedable = False skip_target_weights = True else: feedable = True skip_target_weights = False if target is None: target_dtype = losses.LABEL_DTYPES_FOR_LOSSES.get( self.loss_fn, backend.dtype(self.output) ) target = backend.placeholder( ndim=len(self.shape), name=self.output_name + ""_target"", sparse=backend.is_sparse(self.output), dtype=target_dtype, ) self.training_target = _TrainingTarget( target, feedable=feedable, skip_target_weights=skip_target_weights, ) ``` ###Assistant : Create training_target instance and update the self.training_target. Note that the input target should just be a tensor or None, and corresponding training target will be created based on the output and loss_fn. Args: target: the target tensor for the current output. Could be None. run_eagerly: boolean, whether the model is in run_eagerly mode. Raises: ValueError if the training_target field for the current instance has already been populated. " 2145,"def test_set_task_instance_state(run_id, execution_date, session, dag_maker): start_date = datetime_tz(2020, 1, 1) with dag_maker(""test_set_task_instance_state"", start_date=start_date, session=session) as dag: task_1 = DummyOperator(task_id=""task_1"") task_2 = DummyOperator(task_id=""task_2"") task_3 = DummyOperator(task_id=""task_3"") task_4 = DummyOperator(task_id=""task_4"") task_5 = DummyOperator(task_id=""task_5"") task_1 >> [task_2, task_3, task_4, task_5] dagrun = dag_maker.create_dagrun( run_id=run_id, execution_date=execution_date, state=State.FAILED, run_type=DagRunType.SCHEDULED, ) ",Test that set_task_instance_state updates the TaskInstance state and clear downstream failed,11,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_set_task_instance_state(run_id, execution_date, session, dag_maker): start_date = datetime_tz(2020, 1, 1) with dag_maker(""test_set_task_instance_state"", start_date=start_date, session=session) as dag: task_1 = DummyOperator(task_id=""task_1"") task_2 = DummyOperator(task_id=""task_2"") task_3 = DummyOperator(task_id=""task_3"") task_4 = DummyOperator(task_id=""task_4"") task_5 = DummyOperator(task_id=""task_5"") task_1 >> [task_2, task_3, task_4, task_5] dagrun = dag_maker.create_dagrun( run_id=run_id, execution_date=execution_date, state=State.FAILED, run_type=DagRunType.SCHEDULED, ) ``` ###Assistant : Test that set_task_instance_state updates the TaskInstance state and clear downstream failed " 2146,"async def async_step_link(self, user_input=None): errors = {} if user_input is not None: # Do not authenticate if the host is already configured self._async_abort_entries_match({CONF_HOST: self._host}) try: info = await authenticate( self.hass, self._host, self._port, self._servers ) except InvalidAuth: errors[""base""] = ""invalid_auth"" except Exception: # pylint: disable=broad-except _LOGGER.exception(""Unexpected exception"") errors[""base""] = ""unknown"" else: return self.async_create_entry(title=DEFAULT_NAME, data=info) return self.async_show_form(step_id=""link"", errors=errors) ",Handle linking and authenticting with the roon server.,8,56,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_step_link(self, user_input=None): errors = {} if user_input is not None: # Do not authenticate if the host is already configured self._async_abort_entries_match({CONF_HOST: self._host}) try: info = await authenticate( self.hass, self._host, self._port, self._servers ) except InvalidAuth: errors[""base""] = ""invalid_auth"" except Exception: # pylint: disable=broad-except _LOGGER.exception(""Unexpected exception"") errors[""base""] = ""unknown"" else: return self.async_create_entry(title=DEFAULT_NAME, data=info) return self.async_show_form(step_id=""link"", errors=errors) ``` ###Assistant : Handle linking and authenticting with the roon server. " 2147,"def test_get_sid_string_none(): sid_obj = salt.utils.win_dacl.get_sid(None) assert isinstance(sid_obj, pywintypes.SIDType) assert salt.utils.win_dacl.get_sid_string(sid_obj) == ""S-1-0-0"" "," Validate getting a null sid (S-1-0-0) when a null sid is passed ",12,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_sid_string_none(): sid_obj = salt.utils.win_dacl.get_sid(None) assert isinstance(sid_obj, pywintypes.SIDType) assert salt.utils.win_dacl.get_sid_string(sid_obj) == ""S-1-0-0"" ``` ###Assistant : Validate getting a null sid (S-1-0-0) when a null sid is passed " 2148,"def read(self, filename, split='train'): label_list = self.get_labels() vocab_info = self.get_vocab() "," Returns a dataset containing all the examples that can be read from the file path. If `self.lazy` is False, this eagerly reads all instances from `self._read()` and returns a `MapDataset`. If `self.lazy` is True, this returns an `IterDataset`, which internally relies on the generator created from `self._read()` to lazily produce examples. In this case your implementation of `_read()` must also be lazy (that is, not load all examples into memory at once). Args: filename (str): Path of data file to read, usually provided by `_get_data` function. split (str, optional): The split name of selected dataset. This only makes a different when data files of different splits have different structures. Returns: A `MapDataset|IterDataset`. ",112,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def read(self, filename, split='train'): label_list = self.get_labels() vocab_info = self.get_vocab() ``` ###Assistant : Returns a dataset containing all the examples that can be read from the file path. If `self.lazy` is False, this eagerly reads all instances from `self._read()` and returns a `MapDataset`. If `self.lazy` is True, this returns an `IterDataset`, which internally relies on the generator created from `self._read()` to lazily produce examples. In this case your implementation of `_read()` must also be lazy (that is, not load all examples into memory at once). Args: filename (str): Path of data file to read, usually provided by `_get_data` function. split (str, optional): The split name of selected dataset. This only makes a different when data files of different splits have different structures. Returns: A `MapDataset|IterDataset`. " 2149,"def test_no_rule_for_attr(self): tag = self.soup.b fn = attribute_rule({""snowman"": ""barbecue""}) fn(tag) self.assertEqual(str(tag), ""baz"") "," Test that attribute_rule() drops attributes for which no rule has been defined. ",12,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_no_rule_for_attr(self): tag = self.soup.b fn = attribute_rule({""snowman"": ""barbecue""}) fn(tag) self.assertEqual(str(tag), ""baz"") ``` ###Assistant : Test that attribute_rule() drops attributes for which no rule has been defined. " 2150,"def _inspect_generic_serialization(base_obj, depth, parent, failure_set): assert not inspect.isfunction(base_obj) functions = inspect.getmembers(base_obj, predicate=inspect.isfunction) found = False with _printer.indent(): for name, obj in functions: serializable, _ = inspect_serializability( obj, name=name, depth=depth - 1, _parent=parent, _failure_set=failure_set, ) found = found or not serializable if found: break with _printer.indent(): members = inspect.getmembers(base_obj) for name, obj in members: if name.startswith(""__"") and name.endswith(""__"") or inspect.isbuiltin(obj): continue serializable, _ = inspect_serializability( obj, name=name, depth=depth - 1, _parent=parent, _failure_set=failure_set, ) found = found or not serializable if found: break if not found: _printer.print( f""WARNING: Did not find non-serializable object in {base_obj}. "" ""This may be an oversight."" ) return found @DeveloperAPI",Adds the first-found non-serializable element to the failure_set.,8,103,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _inspect_generic_serialization(base_obj, depth, parent, failure_set): assert not inspect.isfunction(base_obj) functions = inspect.getmembers(base_obj, predicate=inspect.isfunction) found = False with _printer.indent(): for name, obj in functions: serializable, _ = inspect_serializability( obj, name=name, depth=depth - 1, _parent=parent, _failure_set=failure_set, ) found = found or not serializable if found: break with _printer.indent(): members = inspect.getmembers(base_obj) for name, obj in members: if name.startswith(""__"") and name.endswith(""__"") or inspect.isbuiltin(obj): continue serializable, _ = inspect_serializability( obj, name=name, depth=depth - 1, _parent=parent, _failure_set=failure_set, ) found = found or not serializable if found: break if not found: _printer.print( f""WARNING: Did not find non-serializable object in {base_obj}. "" ""This may be an oversight."" ) return found @DeveloperAPI ``` ###Assistant : Adds the first-found non-serializable element to the failure_set. " 2151,"def is_dihedral(self): r if self._is_dihedral is not None: return self._is_dihedral order = self.order() if order % 2 == 1: self._is_dihedral = False return False if order == 2: self._is_dihedral = True return True if order == 4: # The dihedral group of order 4 is the Klein 4-group. self._is_dihedral = not self.is_cyclic return self._is_dihedral if self.is_abelian: # The only abelian dihedral groups are the ones of orders 2 and 4. self._is_dihedral = False return False # Now we know the group is of even order >= 6, and nonabelian. n = order // 2 # Handle special cases where there are exactly two generators. gens = self.generators if len(gens) == 2: x, y = gens a, b = x.order(), y.order() # Make a >= b if a < b: x, y, a, b = y, x, b, a # Using Theorem 2.1 of [3]: if {a, b} == {2}: self._is_dihedral = True return True # Using Theorem 1.1 of [3]: if (a, b) == (n, 2) and y*x*y == ~x: self._is_dihedral = True return True # Procede with algorithm of [1] # Find elements of orders 2 and n order_2, order_n = [], [] for p in self.elements: k = p.order() if k == 2: order_2.append(p) elif k == n: order_n.append(p) if len(order_2) != n + 1 - (n % 2): self._is_dihedral = False return False if not order_n: self._is_dihedral = False return False x = order_n[0] # Want an element y of order 2 that is not a power of x # (i.e. that is not the 180-deg rotation, when n is even). y = order_2[0] if n % 2 == 0 and y == x**(n//2): y = order_2[1] self._is_dihedral = (y*x*y == ~x) return self._is_dihedral "," Return ``True`` if the group is dihedral. Examples ======== >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.named_groups import SymmetricGroup, CyclicGroup >>> G = PermutationGroup(Permutation(1, 6)(2, 5)(3, 4), Permutation(0, 1, 2, 3, 4, 5, 6)) >>> G.is_dihedral True >>> G = SymmetricGroup(3) >>> G.is_dihedral True >>> G = CyclicGroup(6) >>> G.is_dihedral False References ========== .. [1] https://math.stackexchange.com/a/827273 .. [2] https://kconrad.math.uconn.edu/blurbs/grouptheory/dihedral.pdf .. [3] https://kconrad.math.uconn.edu/blurbs/grouptheory/dihedral2.pdf .. [4] https://en.wikipedia.org/wiki/Dihedral_group ",70,287,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_dihedral(self): r if self._is_dihedral is not None: return self._is_dihedral order = self.order() if order % 2 == 1: self._is_dihedral = False return False if order == 2: self._is_dihedral = True return True if order == 4: # The dihedral group of order 4 is the Klein 4-group. self._is_dihedral = not self.is_cyclic return self._is_dihedral if self.is_abelian: # The only abelian dihedral groups are the ones of orders 2 and 4. self._is_dihedral = False return False # Now we know the group is of even order >= 6, and nonabelian. n = order // 2 # Handle special cases where there are exactly two generators. gens = self.generators if len(gens) == 2: x, y = gens a, b = x.order(), y.order() # Make a >= b if a < b: x, y, a, b = y, x, b, a # Using Theorem 2.1 of [3]: if {a, b} == {2}: self._is_dihedral = True return True # Using Theorem 1.1 of [3]: if (a, b) == (n, 2) and y*x*y == ~x: self._is_dihedral = True return True # Procede with algorithm of [1] # Find elements of orders 2 and n order_2, order_n = [], [] for p in self.elements: k = p.order() if k == 2: order_2.append(p) elif k == n: order_n.append(p) if len(order_2) != n + 1 - (n % 2): self._is_dihedral = False return False if not order_n: self._is_dihedral = False return False x = order_n[0] # Want an element y of order 2 that is not a power of x # (i.e. that is not the 180-deg rotation, when n is even). y = order_2[0] if n % 2 == 0 and y == x**(n//2): y = order_2[1] self._is_dihedral = (y*x*y == ~x) return self._is_dihedral ``` ###Assistant : Return ``True`` if the group is dihedral. Examples ======== >>> from sympy.combinatorics.perm_groups import PermutationGroup >>> from sympy.combinatorics.permutations import Permutation >>> from sympy.combinatorics.named_groups import SymmetricGroup, CyclicGroup >>> G = PermutationGroup(Permutation(1, 6)(2, 5)(3, 4), Permutation(0, 1, 2, 3, 4, 5, 6)) >>> G.is_dihedral True >>> G = SymmetricGroup(3) >>> G.is_dihedral True >>> G = CyclicGroup(6) >>> G.is_dihedral False References ========== .. [1] https://math.stackexchange.com/a/827273 .. [2] https://kconrad.math.uconn.edu/blurbs/grouptheory/dihedral.pdf .. [3] https://kconrad.math.uconn.edu/blurbs/grouptheory/dihedral2.pdf .. [4] https://en.wikipedia.org/wiki/Dihedral_group " 2152,"def extend_rearport_cable_paths(instance, created, **kwargs): if created: rearport = instance.rear_port for cablepath in CablePath.objects.filter(_nodes__contains=rearport): cablepath.retrace() "," When a new FrontPort is created, add it to any CablePaths which end at its corresponding RearPort. ",17,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def extend_rearport_cable_paths(instance, created, **kwargs): if created: rearport = instance.rear_port for cablepath in CablePath.objects.filter(_nodes__contains=rearport): cablepath.retrace() ``` ###Assistant : When a new FrontPort is created, add it to any CablePaths which end at its corresponding RearPort. " 2153,"def _load_images_to_cache(self, image_files, frame_dims, thumbnail_size): logger.debug(""Number image_files: %s, frame_dims: %s, thumbnail_size: %s"", len(image_files), frame_dims, thumbnail_size) num_images = (frame_dims[0] // thumbnail_size) * (frame_dims[1] // thumbnail_size) logger.debug(""num_images: %s"", num_images) if num_images == 0: return False samples = [] start_idx = len(image_files) - num_images if len(image_files) > num_images else 0 show_files = sorted(image_files, key=os.path.getctime)[start_idx:] dropped_files = [] for fname in show_files: try: img = Image.open(fname) except PermissionError as err: logger.debug(""Permission error opening preview file: '%s'. Original error: %s"", fname, str(err)) dropped_files.append(fname) continue except Exception as err: # pylint:disable=broad-except # Swallow any issues with opening an image rather than spamming console # Can happen when trying to read partially saved images logger.debug(""Error opening preview file: '%s'. Original error: %s"", fname, str(err)) dropped_files.append(fname) continue width, height = img.size scaling = thumbnail_size / max(width, height) logger.debug(""image width: %s, height: %s, scaling: %s"", width, height, scaling) try: img = img.resize((int(width * scaling), int(height * scaling))) except OSError as err: # Image only gets loaded when we call a method, so may error on partial loads logger.debug(""OS Error resizing preview image: '%s'. Original error: %s"", fname, err) dropped_files.append(fname) continue if img.size[0] != img.size[1]: # Pad to square new_img = Image.new(""RGB"", (thumbnail_size, thumbnail_size)) new_img.paste(img, ((thumbnail_size - img.size[0])//2, (thumbnail_size - img.size[1])//2)) img = new_img draw = ImageDraw.Draw(img) draw.rectangle(((0, 0), (thumbnail_size, thumbnail_size)), outline=""#E5E5E5"", width=1) samples.append(np.array(img)) samples = np.array(samples) if not np.any(samples): logger.debug(""No preview images collected."") return False if dropped_files: logger.debug(""Removing dropped files: %s"", dropped_files) show_files = [fname for fname in show_files if fname not in dropped_files] self._previewcache[""filenames""] = (self._previewcache[""filenames""] + show_files)[-num_images:] cache = self._previewcache[""images""] if cache is None: logger.debug(""Creating new cache"") cache = samples[-num_images:] else: logger.debug(""Appending to existing cache"") cache = np.concatenate((cache, samples))[-num_images:] self._previewcache[""images""] = cache logger.debug(""Cache shape: %s"", self._previewcache[""images""].shape) return True "," Load preview images to the image cache. Load new images and append to cache, filtering the cache the number of thumbnails that will fit inside the display panel. Parameters ---------- image_files: list A list of new image files that have been modified since the last check frame_dims: tuple The (width (`int`), height (`int`)) of the display panel that will display the preview thumbnail_size: int The size of each thumbnail that should be created Returns ------- bool ``True`` if images were successfully loaded to cache otherwise ``False`` ",86,281,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _load_images_to_cache(self, image_files, frame_dims, thumbnail_size): logger.debug(""Number image_files: %s, frame_dims: %s, thumbnail_size: %s"", len(image_files), frame_dims, thumbnail_size) num_images = (frame_dims[0] // thumbnail_size) * (frame_dims[1] // thumbnail_size) logger.debug(""num_images: %s"", num_images) if num_images == 0: return False samples = [] start_idx = len(image_files) - num_images if len(image_files) > num_images else 0 show_files = sorted(image_files, key=os.path.getctime)[start_idx:] dropped_files = [] for fname in show_files: try: img = Image.open(fname) except PermissionError as err: logger.debug(""Permission error opening preview file: '%s'. Original error: %s"", fname, str(err)) dropped_files.append(fname) continue except Exception as err: # pylint:disable=broad-except # Swallow any issues with opening an image rather than spamming console # Can happen when trying to read partially saved images logger.debug(""Error opening preview file: '%s'. Original error: %s"", fname, str(err)) dropped_files.append(fname) continue width, height = img.size scaling = thumbnail_size / max(width, height) logger.debug(""image width: %s, height: %s, scaling: %s"", width, height, scaling) try: img = img.resize((int(width * scaling), int(height * scaling))) except OSError as err: # Image only gets loaded when we call a method, so may error on partial loads logger.debug(""OS Error resizing preview image: '%s'. Original error: %s"", fname, err) dropped_files.append(fname) continue if img.size[0] != img.size[1]: # Pad to square new_img = Image.new(""RGB"", (thumbnail_size, thumbnail_size)) new_img.paste(img, ((thumbnail_size - img.size[0])//2, (thumbnail_size - img.size[1])//2)) img = new_img draw = ImageDraw.Draw(img) draw.rectangle(((0, 0), (thumbnail_size, thumbnail_size)), outline=""#E5E5E5"", width=1) samples.append(np.array(img)) samples = np.array(samples) if not np.any(samples): logger.debug(""No preview images collected."") return False if dropped_files: logger.debug(""Removing dropped files: %s"", dropped_files) show_files = [fname for fname in show_files if fname not in dropped_files] self._previewcache[""filenames""] = (self._previewcache[""filenames""] + show_files)[-num_images:] cache = self._previewcache[""images""] if cache is None: logger.debug(""Creating new cache"") cache = samples[-num_images:] else: logger.debug(""Appending to existing cache"") cache = np.concatenate((cache, samples))[-num_images:] self._previewcache[""images""] = cache logger.debug(""Cache shape: %s"", self._previewcache[""images""].shape) return True ``` ###Assistant : Load preview images to the image cache. Load new images and append to cache, filtering the cache the number of thumbnails that will fit inside the display panel. Parameters ---------- image_files: list A list of new image files that have been modified since the last check frame_dims: tuple The (width (`int`), height (`int`)) of the display panel that will display the preview thumbnail_size: int The size of each thumbnail that should be created Returns ------- bool ``True`` if images were successfully loaded to cache otherwise ``False`` " 2154,"def target_profile(self) -> t.Optional[PosixProfile]: return t.cast(PosixProfile, self.profiles[0]) if self.profiles else None ","The POSIX target profile, if it uses a different Python interpreter than the controller, otherwise None.",16,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def target_profile(self) -> t.Optional[PosixProfile]: return t.cast(PosixProfile, self.profiles[0]) if self.profiles else None ``` ###Assistant : The POSIX target profile, if it uses a different Python interpreter than the controller, otherwise None. " 2155,"def preprocess_transactions(self): p_bar = tqdm(range(14), desc=""Preprocessing transactions"") try: # 0. If optional fields not in the transactions add missing optional_fields = [ ""Sector"", ""Industry"", ""Country"", ""Region"", ""Fees"", ""Premium"", ""ISIN"", ] if not set(optional_fields).issubset(set(self.__transactions.columns)): for field in optional_fields: if field not in self.__transactions.columns: self.__transactions[field] = np.nan p_bar.n += 1 p_bar.refresh() # 1. Convert Date to datetime self.__transactions[""Date""] = pd.to_datetime(self.__transactions[""Date""]) p_bar.n += 1 p_bar.refresh() # 2. Sort transactions by date self.__transactions = self.__transactions.sort_values(by=""Date"") p_bar.n += 1 p_bar.refresh() # 3. Capitalize Ticker and Type [of instrument...] self.__transactions[""Ticker""] = self.__transactions[""Ticker""].map( lambda x: x.upper() ) self.__transactions[""Type""] = self.__transactions[""Type""].map( lambda x: x.upper() ) p_bar.n += 1 p_bar.refresh() # 4. Translate side: [""deposit"", ""buy""] -> 1 and [""withdrawal"", ""sell""] -> -1 self.__transactions[""Signal""] = self.__transactions[""Side""].map( lambda x: 1 if x.lower() in [""deposit"", ""buy""] else (-1 if x.lower() in [""withdrawal"", ""sell""] else 0) ) p_bar.n += 1 p_bar.refresh() # 5. Convert quantity to signed integer self.__transactions[""Quantity""] = ( abs(self.__transactions[""Quantity""]) * self.__transactions[""Signal""] ) p_bar.n += 1 p_bar.refresh() # 6. Determining the investment/divestment value self.__transactions[""Investment""] = ( self.__transactions[""Quantity""] * self.__transactions[""Price""] + self.__transactions[""Fees""] ) p_bar.n += 1 p_bar.refresh() # 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD) crypto_trades = self.__transactions[self.__transactions.Type == ""CRYPTO""] self.__transactions.loc[ (self.__transactions.Type == ""CRYPTO""), ""Ticker"" ] = [ f""{crypto}-{currency}"" for crypto, currency in zip( crypto_trades.Ticker, crypto_trades.Currency ) ] p_bar.n += 1 p_bar.refresh() # 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided. # If isin not valid ticker is empty self.__transactions[""yf_Ticker""] = self.__transactions[""ISIN""].apply( lambda x: yf.utils.get_ticker_by_isin(x) if not pd.isna(x) else np.nan ) empty_tickers = list( self.__transactions[ (self.__transactions[""yf_Ticker""] == """") | (self.__transactions[""yf_Ticker""].isna()) ][""Ticker""].unique() ) # If ticker from isin is empty it is not valid in yfinance, so check if user provided ticker is supported removed_tickers = [] for item in empty_tickers: with contextlib.redirect_stdout(None): # Suppress yfinance failed download message if occurs valid_ticker = not ( yf.download( item, start=datetime.datetime.now() + datetime.timedelta(days=-5), progress=False, ).empty ) if valid_ticker: # Invalid ISIN but valid ticker self.__transactions.loc[ self.__transactions[""Ticker""] == item, ""yf_Ticker"" ] = np.nan else: self.__transactions.loc[ self.__transactions[""Ticker""] == item, ""yf_Ticker"" ] = """" removed_tickers.append(item) # Merge reformated tickers into Ticker self.__transactions[""Ticker""] = self.__transactions[""yf_Ticker""].fillna( self.__transactions[""Ticker""] ) p_bar.n += 1 p_bar.refresh() # 9. Remove unsupported ISINs that came out empty self.__transactions.drop( self.__transactions[self.__transactions[""Ticker""] == """"].index, inplace=True, ) p_bar.n += 1 p_bar.refresh() # 10. Create tickers dictionary with structure {'Type': [Ticker]} for ticker_type in set(self.__transactions[""Type""]): self.tickers[ticker_type] = list( set( self.__transactions[ self.__transactions[""Type""].isin([ticker_type]) ][""Ticker""] ) ) p_bar.n += 1 p_bar.refresh() # 11. Create list with tickers except cash self.tickers_list = list(set(self.__transactions[""Ticker""])) p_bar.n += 1 p_bar.refresh() # 12. Save transactions inception date self.inception_date = self.__transactions[""Date""][0] p_bar.n += 1 p_bar.refresh() # 13. Populate fields Sector, Industry and Country if ( self.__transactions.loc[ self.__transactions[""Type""] == ""STOCK"", optional_fields, ] .isnull() .values.any() ): # If any fields is empty for stocks (overwrites any info there) self.load_company_data() p_bar.n += 1 p_bar.refresh() # Warn user of removed ISINs if removed_tickers: console.print( f""\n\n[red]The following tickers are not supported and were removed: {removed_tickers}."" f""\nManually edit the 'Ticker' field with the proper Yahoo Finance suffix or provide a valid ISIN."" f""\nSuffix info on 'Yahoo Finance market coverage':"" "" https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html"" f""\nE.g. IWDA -> IWDA.AS[/red]"" ) except Exception: console.print(""\nCould not preprocess transactions."") ","Method to preprocess, format and compute auxiliary fields. Preprocessing steps: 0. If optional fields not in the transactions add missing 1. Convert Date to datetime 2. Sort transactions by date 3. Capitalize Ticker and Type [of instrument...] 4. Translate side: [""deposit"", ""buy""] -> 1 and [""withdrawal"", ""sell""] -> -1 5. Convert quantity to signed integer 6. Determining the investment/divestment value 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD) 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided 9. Remove unsupported ISINs that came out empty 10. Create tickers dictionary with structure {'Type': [Ticker]} 11. Create list with tickers except cash 12. Save transactions inception date 13. Populate fields Sector, Industry and Country ",116,512,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def preprocess_transactions(self): p_bar = tqdm(range(14), desc=""Preprocessing transactions"") try: # 0. If optional fields not in the transactions add missing optional_fields = [ ""Sector"", ""Industry"", ""Country"", ""Region"", ""Fees"", ""Premium"", ""ISIN"", ] if not set(optional_fields).issubset(set(self.__transactions.columns)): for field in optional_fields: if field not in self.__transactions.columns: self.__transactions[field] = np.nan p_bar.n += 1 p_bar.refresh() # 1. Convert Date to datetime self.__transactions[""Date""] = pd.to_datetime(self.__transactions[""Date""]) p_bar.n += 1 p_bar.refresh() # 2. Sort transactions by date self.__transactions = self.__transactions.sort_values(by=""Date"") p_bar.n += 1 p_bar.refresh() # 3. Capitalize Ticker and Type [of instrument...] self.__transactions[""Ticker""] = self.__transactions[""Ticker""].map( lambda x: x.upper() ) self.__transactions[""Type""] = self.__transactions[""Type""].map( lambda x: x.upper() ) p_bar.n += 1 p_bar.refresh() # 4. Translate side: [""deposit"", ""buy""] -> 1 and [""withdrawal"", ""sell""] -> -1 self.__transactions[""Signal""] = self.__transactions[""Side""].map( lambda x: 1 if x.lower() in [""deposit"", ""buy""] else (-1 if x.lower() in [""withdrawal"", ""sell""] else 0) ) p_bar.n += 1 p_bar.refresh() # 5. Convert quantity to signed integer self.__transactions[""Quantity""] = ( abs(self.__transactions[""Quantity""]) * self.__transactions[""Signal""] ) p_bar.n += 1 p_bar.refresh() # 6. Determining the investment/divestment value self.__transactions[""Investment""] = ( self.__transactions[""Quantity""] * self.__transactions[""Price""] + self.__transactions[""Fees""] ) p_bar.n += 1 p_bar.refresh() # 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD) crypto_trades = self.__transactions[self.__transactions.Type == ""CRYPTO""] self.__transactions.loc[ (self.__transactions.Type == ""CRYPTO""), ""Ticker"" ] = [ f""{crypto}-{currency}"" for crypto, currency in zip( crypto_trades.Ticker, crypto_trades.Currency ) ] p_bar.n += 1 p_bar.refresh() # 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided. # If isin not valid ticker is empty self.__transactions[""yf_Ticker""] = self.__transactions[""ISIN""].apply( lambda x: yf.utils.get_ticker_by_isin(x) if not pd.isna(x) else np.nan ) empty_tickers = list( self.__transactions[ (self.__transactions[""yf_Ticker""] == """") | (self.__transactions[""yf_Ticker""].isna()) ][""Ticker""].unique() ) # If ticker from isin is empty it is not valid in yfinance, so check if user provided ticker is supported removed_tickers = [] for item in empty_tickers: with contextlib.redirect_stdout(None): # Suppress yfinance failed download message if occurs valid_ticker = not ( yf.download( item, start=datetime.datetime.now() + datetime.timedelta(days=-5), progress=False, ).empty ) if valid_ticker: # Invalid ISIN but valid ticker self.__transactions.loc[ self.__transactions[""Ticker""] == item, ""yf_Ticker"" ] = np.nan else: self.__transactions.loc[ self.__transactions[""Ticker""] == item, ""yf_Ticker"" ] = """" removed_tickers.append(item) # Merge reformated tickers into Ticker self.__transactions[""Ticker""] = self.__transactions[""yf_Ticker""].fillna( self.__transactions[""Ticker""] ) p_bar.n += 1 p_bar.refresh() # 9. Remove unsupported ISINs that came out empty self.__transactions.drop( self.__transactions[self.__transactions[""Ticker""] == """"].index, inplace=True, ) p_bar.n += 1 p_bar.refresh() # 10. Create tickers dictionary with structure {'Type': [Ticker]} for ticker_type in set(self.__transactions[""Type""]): self.tickers[ticker_type] = list( set( self.__transactions[ self.__transactions[""Type""].isin([ticker_type]) ][""Ticker""] ) ) p_bar.n += 1 p_bar.refresh() # 11. Create list with tickers except cash self.tickers_list = list(set(self.__transactions[""Ticker""])) p_bar.n += 1 p_bar.refresh() # 12. Save transactions inception date self.inception_date = self.__transactions[""Date""][0] p_bar.n += 1 p_bar.refresh() # 13. Populate fields Sector, Industry and Country if ( self.__transactions.loc[ self.__transactions[""Type""] == ""STOCK"", optional_fields, ] .isnull() .values.any() ): # If any fields is empty for stocks (overwrites any info there) self.load_company_data() p_bar.n += 1 p_bar.refresh() # Warn user of removed ISINs if removed_tickers: console.print( f""\n\n[red]The following tickers are not supported and were removed: {removed_tickers}."" f""\nManually edit the 'Ticker' field with the proper Yahoo Finance suffix or provide a valid ISIN."" f""\nSuffix info on 'Yahoo Finance market coverage':"" "" https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html"" f""\nE.g. IWDA -> IWDA.AS[/red]"" ) except Exception: console.print(""\nCould not preprocess transactions."") ``` ###Assistant : Method to preprocess, format and compute auxiliary fields. Preprocessing steps: 0. If optional fields not in the transactions add missing 1. Convert Date to datetime 2. Sort transactions by date 3. Capitalize Ticker and Type [of instrument...] 4. Translate side: [""deposit"", ""buy""] -> 1 and [""withdrawal"", ""sell""] -> -1 5. Convert quantity to signed integer 6. Determining the investment/divestment value 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD) 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided 9. Remove unsupported ISINs that came out empty 10. Create tickers dictionary with structure {'Type': [Ticker]} 11. Create list with tickers except cash 12. Save transactions inception date 13. Populate fields Sector, Industry and Country " 2156,"def _var_key(var): # pylint: disable=protected-access # Get the distributed variable if it exists. if hasattr(var, ""_distributed_container""): var = var._distributed_container() if getattr(var, ""_in_graph_mode"", False): return var._shared_name return var._unique_id ","Key for representing a primary variable, for looking up slots. In graph mode the name is derived from the var shared name. In eager mode the name is derived from the var unique id. If distribution strategy exists, get the primary variable first. Args: var: the variable. Returns: the unique name of the variable. ",54,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _var_key(var): # pylint: disable=protected-access # Get the distributed variable if it exists. if hasattr(var, ""_distributed_container""): var = var._distributed_container() if getattr(var, ""_in_graph_mode"", False): return var._shared_name return var._unique_id ``` ###Assistant : Key for representing a primary variable, for looking up slots. In graph mode the name is derived from the var shared name. In eager mode the name is derived from the var unique id. If distribution strategy exists, get the primary variable first. Args: var: the variable. Returns: the unique name of the variable. " 2157,"def test_stacking_classifier_base_regressor(): X_train, X_test, y_train, y_test = train_test_split( scale(X_iris), y_iris, stratify=y_iris, random_state=42 ) clf = StackingClassifier(estimators=[(""ridge"", Ridge())]) clf.fit(X_train, y_train) clf.predict(X_test) clf.predict_proba(X_test) assert clf.score(X_test, y_test) > 0.8 ",Check that a regressor can be used as the first layer in `StackingClassifier`.,13,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_stacking_classifier_base_regressor(): X_train, X_test, y_train, y_test = train_test_split( scale(X_iris), y_iris, stratify=y_iris, random_state=42 ) clf = StackingClassifier(estimators=[(""ridge"", Ridge())]) clf.fit(X_train, y_train) clf.predict(X_test) clf.predict_proba(X_test) assert clf.score(X_test, y_test) > 0.8 ``` ###Assistant : Check that a regressor can be used as the first layer in `StackingClassifier`. " 2158,"def _get_style_dict(self, gc, rgbFace): attrib = {} forced_alpha = gc.get_forced_alpha() if gc.get_hatch() is not None: attrib['fill'] = ""url(#%s)"" % self._get_hatch(gc, rgbFace) if (rgbFace is not None and len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha): attrib['fill-opacity'] = _short_float_fmt(rgbFace[3]) else: if rgbFace is None: attrib['fill'] = 'none' else: if tuple(rgbFace[:3]) != (0, 0, 0): attrib['fill'] = rgb2hex(rgbFace) if (len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha): attrib['fill-opacity'] = _short_float_fmt(rgbFace[3]) if forced_alpha and gc.get_alpha() != 1.0: attrib['opacity'] = _short_float_fmt(gc.get_alpha()) offset, seq = gc.get_dashes() if seq is not None: attrib['stroke-dasharray'] = ','.join( _short_float_fmt(val) for val in seq) attrib['stroke-dashoffset'] = _short_float_fmt(float(offset)) linewidth = gc.get_linewidth() if linewidth: rgb = gc.get_rgb() attrib['stroke'] = rgb2hex(rgb) if not forced_alpha and rgb[3] != 1.0: attrib['stroke-opacity'] = _short_float_fmt(rgb[3]) if linewidth != 1.0: attrib['stroke-width'] = _short_float_fmt(linewidth) if gc.get_joinstyle() != 'round': attrib['stroke-linejoin'] = gc.get_joinstyle() if gc.get_capstyle() != 'butt': attrib['stroke-linecap'] = _capstyle_d[gc.get_capstyle()] return attrib ",Generate a style string from the GraphicsContext and rgbFace.,9,145,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_style_dict(self, gc, rgbFace): attrib = {} forced_alpha = gc.get_forced_alpha() if gc.get_hatch() is not None: attrib['fill'] = ""url(#%s)"" % self._get_hatch(gc, rgbFace) if (rgbFace is not None and len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha): attrib['fill-opacity'] = _short_float_fmt(rgbFace[3]) else: if rgbFace is None: attrib['fill'] = 'none' else: if tuple(rgbFace[:3]) != (0, 0, 0): attrib['fill'] = rgb2hex(rgbFace) if (len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha): attrib['fill-opacity'] = _short_float_fmt(rgbFace[3]) if forced_alpha and gc.get_alpha() != 1.0: attrib['opacity'] = _short_float_fmt(gc.get_alpha()) offset, seq = gc.get_dashes() if seq is not None: attrib['stroke-dasharray'] = ','.join( _short_float_fmt(val) for val in seq) attrib['stroke-dashoffset'] = _short_float_fmt(float(offset)) linewidth = gc.get_linewidth() if linewidth: rgb = gc.get_rgb() attrib['stroke'] = rgb2hex(rgb) if not forced_alpha and rgb[3] != 1.0: attrib['stroke-opacity'] = _short_float_fmt(rgb[3]) if linewidth != 1.0: attrib['stroke-width'] = _short_float_fmt(linewidth) if gc.get_joinstyle() != 'round': attrib['stroke-linejoin'] = gc.get_joinstyle() if gc.get_capstyle() != 'butt': attrib['stroke-linecap'] = _capstyle_d[gc.get_capstyle()] return attrib ``` ###Assistant : Generate a style string from the GraphicsContext and rgbFace. " 2159,"def test_union(self, mock_handler): self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # --- use predictor --- predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical, 'c': dtype.datetime }, 'predicted_value': 'ccc' } self.set_predictor(predictor) sql = # union all ret = self.command_executor.execute_command( parse_sql(sql.format(union='ALL'), dialect='mindsdb')) assert ret.error_code is None ret_df = self.ret_to_df(ret) assert list(ret_df.columns) == ['a1', 'target'] assert ret_df.shape[0] == 3 + 2 # union ret = self.command_executor.execute_command( parse_sql(sql.format(union=''), dialect='mindsdb')) assert ret.error_code is None ret_df = self.ret_to_df(ret) assert list(ret_df.columns) == ['a1', 'target'] assert ret_df.shape[0] == 3 "," SELECT a as a1, b as target FROM pg.tasks UNION {union} SELECT model.a as a2, model.p as target2 FROM pg.tasks as t JOIN mindsdb.task_model as model WHERE t.a=1 ",28,85,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_union(self, mock_handler): self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # --- use predictor --- predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical, 'c': dtype.datetime }, 'predicted_value': 'ccc' } self.set_predictor(predictor) sql = # union all ret = self.command_executor.execute_command( parse_sql(sql.format(union='ALL'), dialect='mindsdb')) assert ret.error_code is None ret_df = self.ret_to_df(ret) assert list(ret_df.columns) == ['a1', 'target'] assert ret_df.shape[0] == 3 + 2 # union ret = self.command_executor.execute_command( parse_sql(sql.format(union=''), dialect='mindsdb')) assert ret.error_code is None ret_df = self.ret_to_df(ret) assert list(ret_df.columns) == ['a1', 'target'] assert ret_df.shape[0] == 3 ``` ###Assistant : SELECT a as a1, b as target FROM pg.tasks UNION {union} SELECT model.a as a2, model.p as target2 FROM pg.tasks as t JOIN mindsdb.task_model as model WHERE t.a=1 " 2160,"def test_run_cleanup_tables(self, clean_table_mock, table_names): base_kwargs = dict( clean_before_timestamp=None, dry_run=None, verbose=None, ) run_cleanup(**base_kwargs, table_names=table_names) assert clean_table_mock.call_count == len(table_names) if table_names else len(config_dict) "," ``_cleanup_table`` should be called for each table in subset if one is provided else should be called for all tables. ",20,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_run_cleanup_tables(self, clean_table_mock, table_names): base_kwargs = dict( clean_before_timestamp=None, dry_run=None, verbose=None, ) run_cleanup(**base_kwargs, table_names=table_names) assert clean_table_mock.call_count == len(table_names) if table_names else len(config_dict) ``` ###Assistant : ``_cleanup_table`` should be called for each table in subset if one is provided else should be called for all tables. " 2161,"def test_set_serialize_call_old_signature(self, get_import, session): serialize_watcher = MagicMock() "," When XCom.serialize_value takes only param ``value``, other kwargs should be ignored. ",11,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_set_serialize_call_old_signature(self, get_import, session): serialize_watcher = MagicMock() ``` ###Assistant : When XCom.serialize_value takes only param ``value``, other kwargs should be ignored. " 2162,"def gmean(a, axis=0, dtype=None, weights=None): if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it log_a = np.log(np.array(a, dtype=dtype)) elif dtype: # Must change the default dtype allowing array type if isinstance(a, np.ma.MaskedArray): log_a = np.log(np.ma.asarray(a, dtype=dtype)) else: log_a = np.log(np.asarray(a, dtype=dtype)) else: log_a = np.log(a) if weights is not None: weights = np.asanyarray(weights, dtype=dtype) return np.exp(np.average(log_a, axis=axis, weights=weights)) ","Compute the geometric mean along the specified axis. Return the geometric average of the array elements. That is: n-th root of (x1 * x2 * ... * xn) Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : int or None, optional Axis along which the geometric mean is computed. Default is 0. If None, compute over the whole array `a`. dtype : dtype, optional Type of the returned array and of the accumulator in which the elements are summed. If dtype is not specified, it defaults to the dtype of a, unless a has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used. weights : array_like, optional The `weights` array must be broadcastable to the same shape as `a`. Default is None, which gives each value a weight of 1.0. Returns ------- gmean : ndarray See `dtype` parameter above. See Also -------- numpy.mean : Arithmetic average numpy.average : Weighted average hmean : Harmonic mean Notes ----- The geometric average is computed over a single dimension of the input array, axis=0 by default, or all values in the array if axis=None. float64 intermediate and return values are used for integer inputs. Beginning in SciPy 1.9, ``np.matrix`` inputs are converted to ``np.ndarray``s before the calculation is performed. In this case, the output will be a scalar or ``np.ndarray`` of appropriate shape rather than a 2D ``np.matrix``. Similarly, while masked elements of masked arrays are still ignored, the output will be a scalar or ``np.ndarray`` rather than a masked array with ``mask=False``. References ---------- .. [1] ""Weighted Geometric Mean"", *Wikipedia*, https://en.wikipedia.org/wiki/Weighted_geometric_mean. Examples -------- >>> from scipy.stats import gmean >>> gmean([1, 4]) 2.0 >>> gmean([1, 2, 3, 4, 5, 6, 7]) 3.3800151591412964 ",301,63,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def gmean(a, axis=0, dtype=None, weights=None): if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it log_a = np.log(np.array(a, dtype=dtype)) elif dtype: # Must change the default dtype allowing array type if isinstance(a, np.ma.MaskedArray): log_a = np.log(np.ma.asarray(a, dtype=dtype)) else: log_a = np.log(np.asarray(a, dtype=dtype)) else: log_a = np.log(a) if weights is not None: weights = np.asanyarray(weights, dtype=dtype) return np.exp(np.average(log_a, axis=axis, weights=weights)) ``` ###Assistant : Compute the geometric mean along the specified axis. Return the geometric average of the array elements. That is: n-th root of (x1 * x2 * ... * xn) Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : int or None, optional Axis along which the geometric mean is computed. Default is 0. If None, compute over the whole array `a`. dtype : dtype, optional Type of the returned array and of the accumulator in which the elements are summed. If dtype is not specified, it defaults to the dtype of a, unless a has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used. weights : array_like, optional The `weights` array must be broadcastable to the same shape as `a`. Default is None, which gives each value a weight of 1.0. Returns ------- gmean : ndarray See `dtype` parameter above. See Also -------- numpy.mean : Arithmetic average numpy.average : Weighted average hmean : Harmonic mean Notes ----- The geometric average is computed over a single dimension of the input array, axis=0 by default, or all values in the array if axis=None. float64 intermediate and return values are used for integer inputs. Beginning in SciPy 1.9, ``np.matrix`` inputs are converted to ``np.ndarray``s before the calculation is performed. In this case, the output will be a scalar or ``np.ndarray`` of appropriate shape rather than a 2D ``np.matrix``. Similarly, while masked elements of masked arrays are still ignored, the output will be a scalar or ``np.ndarray`` rather than a masked array with ``mask=False``. References ---------- .. [1] ""Weighted Geometric Mean"", *Wikipedia*, https://en.wikipedia.org/wiki/Weighted_geometric_mean. Examples -------- >>> from scipy.stats import gmean >>> gmean([1, 4]) 2.0 >>> gmean([1, 2, 3, 4, 5, 6, 7]) 3.3800151591412964 " 2163,"def handle_m2m_field(self, obj, field): raise NotImplementedError( ""subclasses of Serializer must provide a handle_m2m_field() method"" ) "," Called to handle a ManyToManyField. ",5,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def handle_m2m_field(self, obj, field): raise NotImplementedError( ""subclasses of Serializer must provide a handle_m2m_field() method"" ) ``` ###Assistant : Called to handle a ManyToManyField. " 2164,"def check_connection(self) -> StatusResponse: response = StatusResponse(False) need_to_close = self.is_connected is False try: connection = self.connect() with connection.cursor() as cur: cur.execute('SELECT * FROM SYS.M_DATABASE') response.success = True except dbapi.Error as e: log.error(f'Error connecting to SAP HANA {self.address}, {e}!') response.error_message = e if response.success is True and need_to_close: self.disconnect() if response.success is False and self.is_connected is True: self.is_connected = False return response "," Check the connection of the SAP HANA database :return: success status and error message if error occurs ",17,61,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_connection(self) -> StatusResponse: response = StatusResponse(False) need_to_close = self.is_connected is False try: connection = self.connect() with connection.cursor() as cur: cur.execute('SELECT * FROM SYS.M_DATABASE') response.success = True except dbapi.Error as e: log.error(f'Error connecting to SAP HANA {self.address}, {e}!') response.error_message = e if response.success is True and need_to_close: self.disconnect() if response.success is False and self.is_connected is True: self.is_connected = False return response ``` ###Assistant : Check the connection of the SAP HANA database :return: success status and error message if error occurs " 2165,"def extract(self, rowsList, colsList): r if not is_sequence(rowsList) or not is_sequence(colsList): raise TypeError(""rowsList and colsList must be iterable"") # ensure rowsList and colsList are lists of integers if rowsList and all(isinstance(i, bool) for i in rowsList): rowsList = [index for index, item in enumerate(rowsList) if item] if colsList and all(isinstance(i, bool) for i in colsList): colsList = [index for index, item in enumerate(colsList) if item] # ensure everything is in range rowsList = [a2idx(k, self.rows) for k in rowsList] colsList = [a2idx(k, self.cols) for k in colsList] return self._eval_extract(rowsList, colsList) ","Return a submatrix by specifying a list of rows and columns. Negative indices can be given. All indices must be in the range $-n \le i < n$ where $n$ is the number of rows or columns. Examples ======== >>> from sympy import Matrix >>> m = Matrix(4, 3, range(12)) >>> m Matrix([ [0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]]) >>> m.extract([0, 1, 3], [0, 1]) Matrix([ [0, 1], [3, 4], [9, 10]]) Rows or columns can be repeated: >>> m.extract([0, 0, 1], [-1]) Matrix([ [2], [2], [5]]) Every other row can be taken by using range to provide the indices: >>> m.extract(range(0, m.rows, 2), [-1]) Matrix([ [2], [8]]) RowsList or colsList can also be a list of booleans, in which case the rows or columns corresponding to the True values will be selected: >>> m.extract([0, 1, 2, 3], [True, False, True]) Matrix([ [0, 2], [3, 5], [6, 8], [9, 11]]) ",156,90,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def extract(self, rowsList, colsList): r if not is_sequence(rowsList) or not is_sequence(colsList): raise TypeError(""rowsList and colsList must be iterable"") # ensure rowsList and colsList are lists of integers if rowsList and all(isinstance(i, bool) for i in rowsList): rowsList = [index for index, item in enumerate(rowsList) if item] if colsList and all(isinstance(i, bool) for i in colsList): colsList = [index for index, item in enumerate(colsList) if item] # ensure everything is in range rowsList = [a2idx(k, self.rows) for k in rowsList] colsList = [a2idx(k, self.cols) for k in colsList] return self._eval_extract(rowsList, colsList) ``` ###Assistant : Return a submatrix by specifying a list of rows and columns. Negative indices can be given. All indices must be in the range $-n \le i < n$ where $n$ is the number of rows or columns. Examples ======== >>> from sympy import Matrix >>> m = Matrix(4, 3, range(12)) >>> m Matrix([ [0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]]) >>> m.extract([0, 1, 3], [0, 1]) Matrix([ [0, 1], [3, 4], [9, 10]]) Rows or columns can be repeated: >>> m.extract([0, 0, 1], [-1]) Matrix([ [2], [2], [5]]) Every other row can be taken by using range to provide the indices: >>> m.extract(range(0, m.rows, 2), [-1]) Matrix([ [2], [8]]) RowsList or colsList can also be a list of booleans, in which case the rows or columns corresponding to the True values will be selected: >>> m.extract([0, 1, 2, 3], [True, False, True]) Matrix([ [0, 2], [3, 5], [6, 8], [9, 11]]) " 2166,"def integrate(*args, meijerg=None, conds='piecewise', risch=None, heurisch=None, manual=None, **kwargs): doit_flags = { 'deep': False, 'meijerg': meijerg, 'conds': conds, 'risch': risch, 'heurisch': heurisch, 'manual': manual } integral = Integral(*args, **kwargs) if isinstance(integral, Integral): return integral.doit(**doit_flags) else: new_args = [a.doit(**doit_flags) if isinstance(a, Integral) else a for a in integral.args] return integral.func(*new_args) ","integrate(f, var, ...) Explanation =========== Compute definite or indefinite integral of one or more variables using Risch-Norman algorithm and table lookup. This procedure is able to handle elementary algebraic and transcendental functions and also a huge class of special functions, including Airy, Bessel, Whittaker and Lambert. var can be: - a symbol -- indefinite integration - a tuple (symbol, a) -- indefinite integration with result given with ``a`` replacing ``symbol`` - a tuple (symbol, a, b) -- definite integration Several variables can be specified, in which case the result is multiple integration. (If var is omitted and the integrand is univariate, the indefinite integral in that variable will be performed.) Indefinite integrals are returned without terms that are independent of the integration variables. (see examples) Definite improper integrals often entail delicate convergence conditions. Pass conds='piecewise', 'separate' or 'none' to have these returned, respectively, as a Piecewise function, as a separate result (i.e. result will be a tuple), or not at all (default is 'piecewise'). **Strategy** SymPy uses various approaches to definite integration. One method is to find an antiderivative for the integrand, and then use the fundamental theorem of calculus. Various functions are implemented to integrate polynomial, rational and trigonometric functions, and integrands containing DiracDelta terms. SymPy also implements the part of the Risch algorithm, which is a decision procedure for integrating elementary functions, i.e., the algorithm can either find an elementary antiderivative, or prove that one does not exist. There is also a (very successful, albeit somewhat slow) general implementation of the heuristic Risch algorithm. This algorithm will eventually be phased out as more of the full Risch algorithm is implemented. See the docstring of Integral._eval_integral() for more details on computing the antiderivative using algebraic methods. The option risch=True can be used to use only the (full) Risch algorithm. This is useful if you want to know if an elementary function has an elementary antiderivative. If the indefinite Integral returned by this function is an instance of NonElementaryIntegral, that means that the Risch algorithm has proven that integral to be non-elementary. Note that by default, additional methods (such as the Meijer G method outlined below) are tried on these integrals, as they may be expressible in terms of special functions, so if you only care about elementary answers, use risch=True. Also note that an unevaluated Integral returned by this function is not necessarily a NonElementaryIntegral, even with risch=True, as it may just be an indication that the particular part of the Risch algorithm needed to integrate that function is not yet implemented. Another family of strategies comes from re-writing the integrand in terms of so-called Meijer G-functions. Indefinite integrals of a single G-function can always be computed, and the definite integral of a product of two G-functions can be computed from zero to infinity. Various strategies are implemented to rewrite integrands as G-functions, and use this information to compute integrals (see the ``meijerint`` module). The option manual=True can be used to use only an algorithm that tries to mimic integration by hand. This algorithm does not handle as many integrands as the other algorithms implemented but may return results in a more familiar form. The ``manualintegrate`` module has functions that return the steps used (see the module docstring for more information). In general, the algebraic methods work best for computing antiderivatives of (possibly complicated) combinations of elementary functions. The G-function methods work best for computing definite integrals from zero to infinity of moderately complicated combinations of special functions, or indefinite integrals of very simple combinations of special functions. The strategy employed by the integration code is as follows: - If computing a definite integral, and both limits are real, and at least one limit is +- oo, try the G-function method of definite integration first. - Try to find an antiderivative, using all available methods, ordered by performance (that is try fastest method first, slowest last; in particular polynomial integration is tried first, Meijer G-functions second to last, and heuristic Risch last). - If still not successful, try G-functions irrespective of the limits. The option meijerg=True, False, None can be used to, respectively: always use G-function methods and no others, never use G-function methods, or use all available methods (in order as described above). It defaults to None. Examples ======== >>> from sympy import integrate, log, exp, oo >>> from sympy.abc import a, x, y >>> integrate(x*y, x) x**2*y/2 >>> integrate(log(x), x) x*log(x) - x >>> integrate(log(x), (x, 1, a)) a*log(a) - a + 1 >>> integrate(x) x**2/2 Terms that are independent of x are dropped by indefinite integration: >>> from sympy import sqrt >>> integrate(sqrt(1 + x), (x, 0, x)) 2*(x + 1)**(3/2)/3 - 2/3 >>> integrate(sqrt(1 + x), x) 2*(x + 1)**(3/2)/3 >>> integrate(x*y) Traceback (most recent call last): ... ValueError: specify integration variables to integrate x*y Note that ``integrate(x)`` syntax is meant only for convenience in interactive sessions and should be avoided in library code. >>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise' Piecewise((gamma(a + 1), re(a) > -1), (Integral(x**a*exp(-x), (x, 0, oo)), True)) >>> integrate(x**a*exp(-x), (x, 0, oo), conds='none') gamma(a + 1) >>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate') (gamma(a + 1), re(a) > -1) See Also ======== Integral, Integral.doit ",865,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def integrate(*args, meijerg=None, conds='piecewise', risch=None, heurisch=None, manual=None, **kwargs): doit_flags = { 'deep': False, 'meijerg': meijerg, 'conds': conds, 'risch': risch, 'heurisch': heurisch, 'manual': manual } integral = Integral(*args, **kwargs) if isinstance(integral, Integral): return integral.doit(**doit_flags) else: new_args = [a.doit(**doit_flags) if isinstance(a, Integral) else a for a in integral.args] return integral.func(*new_args) ``` ###Assistant : integrate(f, var, ...) Explanation =========== Compute definite or indefinite integral of one or more variables using Risch-Norman algorithm and table lookup. This procedure is able to handle elementary algebraic and transcendental functions and also a huge class of special functions, including Airy, Bessel, Whittaker and Lambert. var can be: - a symbol -- indefinite integration - a tuple (symbol, a) -- indefinite integration with result given with ``a`` replacing ``symbol`` - a tuple (symbol, a, b) -- definite integration Several variables can be specified, in which case the result is multiple integration. (If var is omitted and the integrand is univariate, the indefinite integral in that variable will be performed.) Indefinite integrals are returned without terms that are independent of the integration variables. (see examples) Definite improper integrals often entail delicate convergence conditions. Pass conds='piecewise', 'separate' or 'none' to have these returned, respectively, as a Piecewise function, as a separate result (i.e. result will be a tuple), or not at all (default is 'piecewise'). **Strategy** SymPy uses various approaches to definite integration. One method is to find an antiderivative for the integrand, and then use the fundamental theorem of calculus. Various functions are implemented to integrate polynomial, rational and trigonometric functions, and integrands containing DiracDelta terms. SymPy also implements the part of the Risch algorithm, which is a decision procedure for integrating elementary functions, i.e., the algorithm can either find an elementary antiderivative, or prove that one does not exist. There is also a (very successful, albeit somewhat slow) general implementation of the heuristic Risch algorithm. This algorithm will eventually be phased out as more of the full Risch algorithm is implemented. See the docstring of Integral._eval_integral() for more details on computing the antiderivative using algebraic methods. The option risch=True can be used to use only the (full) Risch algorithm. This is useful if you want to know if an elementary function has an elementary antiderivative. If the indefinite Integral returned by this function is an instance of NonElementaryIntegral, that means that the Risch algorithm has proven that integral to be non-elementary. Note that by default, additional methods (such as the Meijer G method outlined below) are tried on these integrals, as they may be expressible in terms of special functions, so if you only care about elementary answers, use risch=True. Also note that an unevaluated Integral returned by this function is not necessarily a NonElementaryIntegral, even with risch=True, as it may just be an indication that the particular part of the Risch algorithm needed to integrate that function is not yet implemented. Another family of strategies comes from re-writing the integrand in terms of so-called Meijer G-functions. Indefinite integrals of a single G-function can always be computed, and the definite integral of a product of two G-functions can be computed from zero to infinity. Various strategies are implemented to rewrite integrands as G-functions, and use this information to compute integrals (see the ``meijerint`` module). The option manual=True can be used to use only an algorithm that tries to mimic integration by hand. This algorithm does not handle as many integrands as the other algorithms implemented but may return results in a more familiar form. The ``manualintegrate`` module has functions that return the steps used (see the module docstring for more information). In general, the algebraic methods work best for computing antiderivatives of (possibly complicated) combinations of elementary functions. The G-function methods work best for computing definite integrals from zero to infinity of moderately complicated combinations of special functions, or indefinite integrals of very simple combinations of special functions. The strategy employed by the integration code is as follows: - If computing a definite integral, and both limits are real, and at least one limit is +- oo, try the G-function method of definite integration first. - Try to find an antiderivative, using all available methods, ordered by performance (that is try fastest method first, slowest last; in particular polynomial integration is tried first, Meijer G-functions second to last, and heuristic Risch last). - If still not successful, try G-functions irrespective of the limits. The option meijerg=True, False, None can be used to, respectively: always use G-function methods and no others, never use G-function methods, or use all available methods (in order as described above). It defaults to None. Examples ======== >>> from sympy import integrate, log, exp, oo >>> from sympy.abc import a, x, y >>> integrate(x*y, x) x**2*y/2 >>> integrate(log(x), x) x*log(x) - x >>> integrate(log(x), (x, 1, a)) a*log(a) - a + 1 >>> integrate(x) x**2/2 Terms that are independent of x are dropped by indefinite integration: >>> from sympy import sqrt >>> integrate(sqrt(1 + x), (x, 0, x)) 2*(x + 1)**(3/2)/3 - 2/3 >>> integrate(sqrt(1 + x), x) 2*(x + 1)**(3/2)/3 >>> integrate(x*y) Traceback (most recent call last): ... ValueError: specify integration variables to integrate x*y Note that ``integrate(x)`` syntax is meant only for convenience in interactive sessions and should be avoided in library code. >>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise' Piecewise((gamma(a + 1), re(a) > -1), (Integral(x**a*exp(-x), (x, 0, oo)), True)) >>> integrate(x**a*exp(-x), (x, 0, oo), conds='none') gamma(a + 1) >>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate') (gamma(a + 1), re(a) > -1) See Also ======== Integral, Integral.doit " 2167,"def parsed_paths(self) -> List[str]: res_paths: List[str] = [] paths = self.parser.existing_paths for directory in paths: for filename in paths[directory]: res_paths.append(os.path.join(directory, filename)) return res_paths "," Returns a list of file paths that have currently been parsed into the parser tree. The returned list may include paths with wildcard characters, for example: ['/etc/apache2/conf.d/*.load'] This is typically called on the root node of the ParserNode tree. :returns: list of file paths of files that have been parsed ",50,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parsed_paths(self) -> List[str]: res_paths: List[str] = [] paths = self.parser.existing_paths for directory in paths: for filename in paths[directory]: res_paths.append(os.path.join(directory, filename)) return res_paths ``` ###Assistant : Returns a list of file paths that have currently been parsed into the parser tree. The returned list may include paths with wildcard characters, for example: ['/etc/apache2/conf.d/*.load'] This is typically called on the root node of the ParserNode tree. :returns: list of file paths of files that have been parsed " 2168,"def _check_guts_toc_mtime(attr_name, old_toc, new_toc, last_build, pyc=False): for dest_name, src_name, typecode in old_toc: if misc.mtime(src_name) > last_build: logger.info(""Building because %s changed"", src_name) return True elif pyc and typecode == 'PYMODULE': py_filename = src_name[:-1] if misc.mtime(py_filename) > last_build: logger.info(""Building because %s changed"", py_filename) return True return False "," Rebuild is required if mtimes of files listed in old TOC are newer than last_build. If pyc=True, check for .py files as well. Use this for calculated/analysed values read from cache. ",31,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_guts_toc_mtime(attr_name, old_toc, new_toc, last_build, pyc=False): for dest_name, src_name, typecode in old_toc: if misc.mtime(src_name) > last_build: logger.info(""Building because %s changed"", src_name) return True elif pyc and typecode == 'PYMODULE': py_filename = src_name[:-1] if misc.mtime(py_filename) > last_build: logger.info(""Building because %s changed"", py_filename) return True return False ``` ###Assistant : Rebuild is required if mtimes of files listed in old TOC are newer than last_build. If pyc=True, check for .py files as well. Use this for calculated/analysed values read from cache. " 2169,"def check_connection(self) -> StatusResponse: response = StatusResponse(False) need_to_close = self.is_connected is False try: connection = self.connect() with connection.cursor() as cur: cur.execute('SELECT 1 FROM (SELECT 1 AS ""dual"") AS ""dual""') response.success = True except teradatasql.Error as e: log.error(f'Error connecting to Teradata {self.host}, {e}!') response.error_message = e if response.success is True and need_to_close: self.disconnect() if response.success is False and self.is_connected is True: self.is_connected = False return response "," Check the connection of the Teradata database :return: success status and error message if error occurs ",16,65,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_connection(self) -> StatusResponse: response = StatusResponse(False) need_to_close = self.is_connected is False try: connection = self.connect() with connection.cursor() as cur: cur.execute('SELECT 1 FROM (SELECT 1 AS ""dual"") AS ""dual""') response.success = True except teradatasql.Error as e: log.error(f'Error connecting to Teradata {self.host}, {e}!') response.error_message = e if response.success is True and need_to_close: self.disconnect() if response.success is False and self.is_connected is True: self.is_connected = False return response ``` ###Assistant : Check the connection of the Teradata database :return: success status and error message if error occurs " 2170,"def process_doc_file(code_file, add_new_line=True): with open(code_file, ""r"", encoding=""utf-8"", newline=""\n"") as f: code = f.read() # fmt: off splits = code.split(""```"") if len(splits) % 2 != 1: raise ValueError(""The number of occurrences of ``` should be an even number."") splits = [s if i % 2 == 0 else process_code_block(s, add_new_line=add_new_line) for i, s in enumerate(splits)] clean_code = ""```"".join(splits) # fmt: on diff = clean_code != code if diff: print(f""Overwriting content of {code_file}."") with open(code_file, ""w"", encoding=""utf-8"", newline=""\n"") as f: f.write(clean_code) "," Process given file. Args: code_file (`str` or `os.PathLike`): The file in which we want to style the docstring. ",18,79,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def process_doc_file(code_file, add_new_line=True): with open(code_file, ""r"", encoding=""utf-8"", newline=""\n"") as f: code = f.read() # fmt: off splits = code.split(""```"") if len(splits) % 2 != 1: raise ValueError(""The number of occurrences of ``` should be an even number."") splits = [s if i % 2 == 0 else process_code_block(s, add_new_line=add_new_line) for i, s in enumerate(splits)] clean_code = ""```"".join(splits) # fmt: on diff = clean_code != code if diff: print(f""Overwriting content of {code_file}."") with open(code_file, ""w"", encoding=""utf-8"", newline=""\n"") as f: f.write(clean_code) ``` ###Assistant : Process given file. Args: code_file (`str` or `os.PathLike`): The file in which we want to style the docstring. " 2171,"def finalize(self, batch): for face, landmarks in zip(batch[""detected_faces""], batch[""landmarks""]): if not isinstance(landmarks, np.ndarray): landmarks = np.array(landmarks) face._landmarks_xy = landmarks logger.trace(""Item out: %s"", {key: val.shape if isinstance(val, np.ndarray) else val for key, val in batch.items()}) for filename, face in zip(batch[""filename""], batch[""detected_faces""]): self._output_faces.append(face) if len(self._output_faces) != self._faces_per_filename[filename]: continue output = self._extract_media.pop(filename) output.add_detected_faces(self._output_faces) self._output_faces = [] logger.trace(""Final Output: (filename: '%s', image shape: %s, detected_faces: %s, "" ""item: %s)"", output.filename, output.image_shape, output.detected_faces, output) yield output # <<< PROTECTED METHODS >>> # # << PROCESS_INPUT WRAPPER >>"," Finalize the output from Aligner This should be called as the final task of each `plugin`. Pairs the detected faces back up with their original frame before yielding each frame. Parameters ---------- batch : dict The final ``dict`` from the `plugin` process. It must contain the `keys`: ``detected_faces``, ``landmarks``, ``filename`` Yields ------ :class:`~plugins.extract.pipeline.ExtractMedia` The :attr:`DetectedFaces` list will be populated for this class with the bounding boxes and landmarks for the detected faces found in the frame. ",76,82,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def finalize(self, batch): for face, landmarks in zip(batch[""detected_faces""], batch[""landmarks""]): if not isinstance(landmarks, np.ndarray): landmarks = np.array(landmarks) face._landmarks_xy = landmarks logger.trace(""Item out: %s"", {key: val.shape if isinstance(val, np.ndarray) else val for key, val in batch.items()}) for filename, face in zip(batch[""filename""], batch[""detected_faces""]): self._output_faces.append(face) if len(self._output_faces) != self._faces_per_filename[filename]: continue output = self._extract_media.pop(filename) output.add_detected_faces(self._output_faces) self._output_faces = [] logger.trace(""Final Output: (filename: '%s', image shape: %s, detected_faces: %s, "" ""item: %s)"", output.filename, output.image_shape, output.detected_faces, output) yield output # <<< PROTECTED METHODS >>> # # << PROCESS_INPUT WRAPPER >> ``` ###Assistant : Finalize the output from Aligner This should be called as the final task of each `plugin`. Pairs the detected faces back up with their original frame before yielding each frame. Parameters ---------- batch : dict The final ``dict`` from the `plugin` process. It must contain the `keys`: ``detected_faces``, ``landmarks``, ``filename`` Yields ------ :class:`~plugins.extract.pipeline.ExtractMedia` The :attr:`DetectedFaces` list will be populated for this class with the bounding boxes and landmarks for the detected faces found in the frame. " 2172,"def find_dcerpc_interface(name): try: return next(x for x in DCE_RPC_INTERFACES.values() if x.name == name) except StopIteration: raise AttributeError(""Unknown interface !"") # --- NDR fields - [C706] chap 14 "," Find an interface object through the name in the IDL ",10,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_dcerpc_interface(name): try: return next(x for x in DCE_RPC_INTERFACES.values() if x.name == name) except StopIteration: raise AttributeError(""Unknown interface !"") # --- NDR fields - [C706] chap 14 ``` ###Assistant : Find an interface object through the name in the IDL " 2173,"def test_animatable(): animatable = AnimateTest() # Fake wall-clock time time = 100.0 # Object that does the animation animation = SimpleAnimation( animatable, ""bar"", time, 3.0, start_value=Animatable(20.0), end_value=Animatable(50.0), final_value=Animatable(50.0), easing=lambda x: x, ) assert animation(time) is False assert animatable.bar.value == 20.0 assert animation(time + 1.0) is False assert animatable.bar.value == 30.0 assert animation(time + 2.0) is False assert animatable.bar.value == 40.0 assert animation(time + 2.9) is False assert pytest.approx(animatable.bar.value, 49.0) assert animation(time + 3.0) is True # True to indicate animation is complete assert animatable.bar.value == 50.0 ",Test SimpleAnimation works with the Animatable protocol,7,86,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_animatable(): animatable = AnimateTest() # Fake wall-clock time time = 100.0 # Object that does the animation animation = SimpleAnimation( animatable, ""bar"", time, 3.0, start_value=Animatable(20.0), end_value=Animatable(50.0), final_value=Animatable(50.0), easing=lambda x: x, ) assert animation(time) is False assert animatable.bar.value == 20.0 assert animation(time + 1.0) is False assert animatable.bar.value == 30.0 assert animation(time + 2.0) is False assert animatable.bar.value == 40.0 assert animation(time + 2.9) is False assert pytest.approx(animatable.bar.value, 49.0) assert animation(time + 3.0) is True # True to indicate animation is complete assert animatable.bar.value == 50.0 ``` ###Assistant : Test SimpleAnimation works with the Animatable protocol " 2174,"def update(self, bbox, score, label, gt_box, gt_label, difficult=None): if difficult is None: difficult = np.zeros_like(gt_label) # record class gt count for gtl, diff in zip(gt_label, difficult): if self.evaluate_difficult or int(diff) == 0: self.class_gt_counts[int(np.array(gtl))] += 1 # record class score positive visited = [False] * len(gt_label) for b, s, l in zip(bbox, score, label): pred = b.tolist() if isinstance(b, np.ndarray) else b max_idx = -1 max_overlap = -1.0 for i, gl in enumerate(gt_label): if int(gl) == int(l): if len(gt_box[i]) == 8: overlap = calc_rbox_iou(pred, gt_box[i]) else: overlap = jaccard_overlap(pred, gt_box[i], self.is_bbox_normalized) if overlap > max_overlap: max_overlap = overlap max_idx = i if max_overlap > self.overlap_thresh: if self.evaluate_difficult or \ int(np.array(difficult[max_idx])) == 0: if not visited[max_idx]: self.class_score_poss[int(l)].append([s, 1.0]) visited[max_idx] = True else: self.class_score_poss[int(l)].append([s, 0.0]) else: self.class_score_poss[int(l)].append([s, 0.0]) "," Update metric statics from given prediction and ground truth infomations. ",10,125,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update(self, bbox, score, label, gt_box, gt_label, difficult=None): if difficult is None: difficult = np.zeros_like(gt_label) # record class gt count for gtl, diff in zip(gt_label, difficult): if self.evaluate_difficult or int(diff) == 0: self.class_gt_counts[int(np.array(gtl))] += 1 # record class score positive visited = [False] * len(gt_label) for b, s, l in zip(bbox, score, label): pred = b.tolist() if isinstance(b, np.ndarray) else b max_idx = -1 max_overlap = -1.0 for i, gl in enumerate(gt_label): if int(gl) == int(l): if len(gt_box[i]) == 8: overlap = calc_rbox_iou(pred, gt_box[i]) else: overlap = jaccard_overlap(pred, gt_box[i], self.is_bbox_normalized) if overlap > max_overlap: max_overlap = overlap max_idx = i if max_overlap > self.overlap_thresh: if self.evaluate_difficult or \ int(np.array(difficult[max_idx])) == 0: if not visited[max_idx]: self.class_score_poss[int(l)].append([s, 1.0]) visited[max_idx] = True else: self.class_score_poss[int(l)].append([s, 0.0]) else: self.class_score_poss[int(l)].append([s, 0.0]) ``` ###Assistant : Update metric statics from given prediction and ground truth infomations. " 2175,"def _get_permission_objects_for_actions(self, actions): permission_codenames = [ ""%s_%s"" % (action, self.model_name) for action in actions ] return Permission.objects.filter( content_type=self._content_type, codename__in=permission_codenames ) "," Get a queryset of the Permission objects for the given actions ",11,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_permission_objects_for_actions(self, actions): permission_codenames = [ ""%s_%s"" % (action, self.model_name) for action in actions ] return Permission.objects.filter( content_type=self._content_type, codename__in=permission_codenames ) ``` ###Assistant : Get a queryset of the Permission objects for the given actions " 2176,"def createWindow(self, wintype): debug_type = debug.qenum_key(QWebEnginePage, wintype) background = config.val.tabs.background log.webview.debug(""createWindow with type {}, background {}"".format( debug_type, background)) if wintype == QWebEnginePage.WebWindowType.WebBrowserWindow: # Shift-Alt-Click target = usertypes.ClickTarget.window elif wintype == QWebEnginePage.WebWindowType.WebDialog: log.webview.warning(""{} requested, but we don't support "" ""that!"".format(debug_type)) target = usertypes.ClickTarget.tab elif wintype == QWebEnginePage.WebWindowType.WebBrowserTab: # Middle-click / Ctrl-Click with Shift # FIXME:qtwebengine this also affects target=_blank links... if background: target = usertypes.ClickTarget.tab else: target = usertypes.ClickTarget.tab_bg elif wintype == QWebEnginePage.WebWindowType.WebBrowserBackgroundTab: # Middle-click / Ctrl-Click if background: target = usertypes.ClickTarget.tab_bg else: target = usertypes.ClickTarget.tab else: raise ValueError(""Invalid wintype {}"".format(debug_type)) tab = shared.get_tab(self._win_id, target) return tab._widget # pylint: disable=protected-access ","Called by Qt when a page wants to create a new window. This function is called from the createWindow() method of the associated QWebEnginePage, each time the page wants to create a new window of the given type. This might be the result, for example, of a JavaScript request to open a document in a new window. Args: wintype: This enum describes the types of window that can be created by the createWindow() function. QWebEnginePage::WebBrowserWindow: A complete web browser window. QWebEnginePage::WebBrowserTab: A web browser tab. QWebEnginePage::WebDialog: A window without decoration. QWebEnginePage::WebBrowserBackgroundTab: A web browser tab without hiding the current visible WebEngineView. Return: The new QWebEngineView object. ",106,99,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def createWindow(self, wintype): debug_type = debug.qenum_key(QWebEnginePage, wintype) background = config.val.tabs.background log.webview.debug(""createWindow with type {}, background {}"".format( debug_type, background)) if wintype == QWebEnginePage.WebWindowType.WebBrowserWindow: # Shift-Alt-Click target = usertypes.ClickTarget.window elif wintype == QWebEnginePage.WebWindowType.WebDialog: log.webview.warning(""{} requested, but we don't support "" ""that!"".format(debug_type)) target = usertypes.ClickTarget.tab elif wintype == QWebEnginePage.WebWindowType.WebBrowserTab: # Middle-click / Ctrl-Click with Shift # FIXME:qtwebengine this also affects target=_blank links... if background: target = usertypes.ClickTarget.tab else: target = usertypes.ClickTarget.tab_bg elif wintype == QWebEnginePage.WebWindowType.WebBrowserBackgroundTab: # Middle-click / Ctrl-Click if background: target = usertypes.ClickTarget.tab_bg else: target = usertypes.ClickTarget.tab else: raise ValueError(""Invalid wintype {}"".format(debug_type)) tab = shared.get_tab(self._win_id, target) return tab._widget # pylint: disable=protected-access ``` ###Assistant : Called by Qt when a page wants to create a new window. This function is called from the createWindow() method of the associated QWebEnginePage, each time the page wants to create a new window of the given type. This might be the result, for example, of a JavaScript request to open a document in a new window. Args: wintype: This enum describes the types of window that can be created by the createWindow() function. QWebEnginePage::WebBrowserWindow: A complete web browser window. QWebEnginePage::WebBrowserTab: A web browser tab. QWebEnginePage::WebDialog: A window without decoration. QWebEnginePage::WebBrowserBackgroundTab: A web browser tab without hiding the current visible WebEngineView. Return: The new QWebEngineView object. " 2177,"def predict(self, X): raw_predictions = self.decision_function(X) encoded_labels = self._loss._raw_prediction_to_decision(raw_predictions) return self.classes_.take(encoded_labels, axis=0) ","Predict class for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : ndarray of shape (n_samples,) The predicted values. ",47,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def predict(self, X): raw_predictions = self.decision_function(X) encoded_labels = self._loss._raw_prediction_to_decision(raw_predictions) return self.classes_.take(encoded_labels, axis=0) ``` ###Assistant : Predict class for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : ndarray of shape (n_samples,) The predicted values. " 2178,"def union_all(graphs, rename=()): R = None seen_nodes = set() # rename graph to obtain disjoint node labels","Returns the union of all graphs. The graphs must be disjoint, otherwise an exception is raised. Parameters ---------- graphs : iterable Iterable of NetworkX graphs rename : iterable , optional Node names of graphs can be changed by specifying the tuple rename=('G-','H-') (for example). Node ""u"" in G is then renamed ""G-u"" and ""v"" in H is renamed ""H-v"". Infinite generators (like itertools.count) are also supported. Returns ------- U : a graph with the same type as the first graph in list Raises ------ ValueError If `graphs` is an empty list. Notes ----- To force a disjoint union with node relabeling, use disjoint_union_all(G,H) or convert_node_labels_to integers(). Graph, edge, and node attributes are propagated to the union graph. If a graph attribute is present in multiple graphs, then the value from the last graph in the list with that attribute is used. See Also -------- union disjoint_union_all ",146,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def union_all(graphs, rename=()): R = None seen_nodes = set() # rename graph to obtain disjoint node labels ``` ###Assistant : Returns the union of all graphs. The graphs must be disjoint, otherwise an exception is raised. Parameters ---------- graphs : iterable Iterable of NetworkX graphs rename : iterable , optional Node names of graphs can be changed by specifying the tuple rename=('G-','H-') (for example). Node ""u"" in G is then renamed ""G-u"" and ""v"" in H is renamed ""H-v"". Infinite generators (like itertools.count) are also supported. Returns ------- U : a graph with the same type as the first graph in list Raises ------ ValueError If `graphs` is an empty list. Notes ----- To force a disjoint union with node relabeling, use disjoint_union_all(G,H) or convert_node_labels_to integers(). Graph, edge, and node attributes are propagated to the union graph. If a graph attribute is present in multiple graphs, then the value from the last graph in the list with that attribute is used. See Also -------- union disjoint_union_all " 2179,"def test_unknown_category_that_are_negative(): rng = np.random.RandomState(42) n_samples = 1000 X = np.c_[rng.rand(n_samples), rng.randint(4, size=n_samples)] y = np.zeros(shape=n_samples) y[X[:, 1] % 2 == 0] = 1 hist = HistGradientBoostingRegressor( random_state=0, categorical_features=[False, True], max_iter=10, ).fit(X, y) # Check that negative values from the second column are treated like a # missing category X_test_neg = np.asarray([[1, -2], [3, -4]]) X_test_nan = np.asarray([[1, np.nan], [3, np.nan]]) assert_allclose(hist.predict(X_test_neg), hist.predict(X_test_nan)) ","Check that unknown categories that are negative does not error. Non-regression test for #24274. ",14,63,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_unknown_category_that_are_negative(): rng = np.random.RandomState(42) n_samples = 1000 X = np.c_[rng.rand(n_samples), rng.randint(4, size=n_samples)] y = np.zeros(shape=n_samples) y[X[:, 1] % 2 == 0] = 1 hist = HistGradientBoostingRegressor( random_state=0, categorical_features=[False, True], max_iter=10, ).fit(X, y) # Check that negative values from the second column are treated like a # missing category X_test_neg = np.asarray([[1, -2], [3, -4]]) X_test_nan = np.asarray([[1, np.nan], [3, np.nan]]) assert_allclose(hist.predict(X_test_neg), hist.predict(X_test_nan)) ``` ###Assistant : Check that unknown categories that are negative does not error. Non-regression test for #24274. " 2180,"def forward(self, x_input, mask, cache=None): if isinstance(x_input, tuple): x, pos_emb = x_input[0], x_input[1] else: x, pos_emb = x_input, None # whether to use macaron style if self.feed_forward_macaron is not None: residual = x if self.normalize_before: x = self.norm_ff_macaron(x) x = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(x)) if not self.normalize_before: x = self.norm_ff_macaron(x) # multi-headed self-attention module residual = x if self.normalize_before: x = self.norm_mha(x) if cache is None: x_q = x else: assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size) x_q = x[:, -1:, :] residual = residual[:, -1:, :] mask = None if mask is None else mask[:, -1:, :] if pos_emb is not None: x_att = self.self_attn(x_q, x, x, pos_emb, mask) else: x_att = self.self_attn(x_q, x, x, mask) if self.concat_after: x_concat = torch.cat((x, x_att), dim=-1) x = residual + self.concat_linear(x_concat) else: x = residual + self.dropout(x_att) if not self.normalize_before: x = self.norm_mha(x) # convolution module if self.conv_module is not None: residual = x if self.normalize_before: x = self.norm_conv(x) x = residual + self.dropout(self.conv_module(x)) if not self.normalize_before: x = self.norm_conv(x) # feed forward module residual = x if self.normalize_before: x = self.norm_ff(x) x = residual + self.ff_scale * self.dropout(self.feed_forward(x)) if not self.normalize_before: x = self.norm_ff(x) if self.conv_module is not None: x = self.norm_final(x) if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask ","Compute encoded features. :param torch.Tensor x_input: encoded source features, w/o pos_emb tuple((batch, max_time_in, size), (1, max_time_in, size)) or (batch, max_time_in, size) :param torch.Tensor mask: mask for x (batch, max_time_in) :param torch.Tensor cache: cache for x (batch, max_time_in - 1, size) :rtype: Tuple[torch.Tensor, torch.Tensor] ",43,225,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def forward(self, x_input, mask, cache=None): if isinstance(x_input, tuple): x, pos_emb = x_input[0], x_input[1] else: x, pos_emb = x_input, None # whether to use macaron style if self.feed_forward_macaron is not None: residual = x if self.normalize_before: x = self.norm_ff_macaron(x) x = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(x)) if not self.normalize_before: x = self.norm_ff_macaron(x) # multi-headed self-attention module residual = x if self.normalize_before: x = self.norm_mha(x) if cache is None: x_q = x else: assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size) x_q = x[:, -1:, :] residual = residual[:, -1:, :] mask = None if mask is None else mask[:, -1:, :] if pos_emb is not None: x_att = self.self_attn(x_q, x, x, pos_emb, mask) else: x_att = self.self_attn(x_q, x, x, mask) if self.concat_after: x_concat = torch.cat((x, x_att), dim=-1) x = residual + self.concat_linear(x_concat) else: x = residual + self.dropout(x_att) if not self.normalize_before: x = self.norm_mha(x) # convolution module if self.conv_module is not None: residual = x if self.normalize_before: x = self.norm_conv(x) x = residual + self.dropout(self.conv_module(x)) if not self.normalize_before: x = self.norm_conv(x) # feed forward module residual = x if self.normalize_before: x = self.norm_ff(x) x = residual + self.ff_scale * self.dropout(self.feed_forward(x)) if not self.normalize_before: x = self.norm_ff(x) if self.conv_module is not None: x = self.norm_final(x) if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask ``` ###Assistant : Compute encoded features. :param torch.Tensor x_input: encoded source features, w/o pos_emb tuple((batch, max_time_in, size), (1, max_time_in, size)) or (batch, max_time_in, size) :param torch.Tensor mask: mask for x (batch, max_time_in) :param torch.Tensor cache: cache for x (batch, max_time_in - 1, size) :rtype: Tuple[torch.Tensor, torch.Tensor] " 2181,"def test_prompt_invalid_template_format() -> None: template = ""This is a {foo} test."" input_variables = [""foo""] with pytest.raises(ValueError): Prompt( input_variables=input_variables, template=template, template_format=""bar"" ) ",Test initializing a prompt with invalid template format.,8,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_prompt_invalid_template_format() -> None: template = ""This is a {foo} test."" input_variables = [""foo""] with pytest.raises(ValueError): Prompt( input_variables=input_variables, template=template, template_format=""bar"" ) ``` ###Assistant : Test initializing a prompt with invalid template format. " 2182,"def test_medium_does_not_exist(self) -> None: # test for unknown medium url = ""/_synapse/admin/v1/threepid/publickey/users/unknown-key"" channel = self.make_request( ""GET"", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body[""errcode""]) # test for unknown user with a known medium url = ""/_synapse/admin/v1/threepid/email/users/unknown"" channel = self.make_request( ""GET"", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body[""errcode""]) ","Tests that both a lookup for a medium that does not exist and a user that doesn't exist with that third party ID returns a 404",26,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_medium_does_not_exist(self) -> None: # test for unknown medium url = ""/_synapse/admin/v1/threepid/publickey/users/unknown-key"" channel = self.make_request( ""GET"", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body[""errcode""]) # test for unknown user with a known medium url = ""/_synapse/admin/v1/threepid/email/users/unknown"" channel = self.make_request( ""GET"", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body[""errcode""]) ``` ###Assistant : Tests that both a lookup for a medium that does not exist and a user that doesn't exist with that third party ID returns a 404 " 2183,"def deepspeed_config_process(self, prefix="""", mismatches=None, config=None, must_match=True, **kwargs): mismatches = [] if mismatches is None else mismatches if config is None: config = self.deepspeed_config for key, value in config.items(): if isinstance(value, dict): self.deepspeed_config_process( prefix=prefix + key + ""."", mismatches=mismatches, config=value, must_match=must_match, **kwargs ) else: self.fill_match(prefix + key, mismatches, must_match=must_match, **kwargs) if len(mismatches) > 0 and prefix == """": mismatches_msg = ""\n"".join(mismatches) raise ValueError( ""Please correct the following DeepSpeed config values that mismatch kwargs "" f"" values:\n{mismatches_msg}\nThe easiest method is to set these DeepSpeed config values to 'auto'."" ) @dataclass",Process the DeepSpeed config with the values from the kwargs.,10,88,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def deepspeed_config_process(self, prefix="""", mismatches=None, config=None, must_match=True, **kwargs): mismatches = [] if mismatches is None else mismatches if config is None: config = self.deepspeed_config for key, value in config.items(): if isinstance(value, dict): self.deepspeed_config_process( prefix=prefix + key + ""."", mismatches=mismatches, config=value, must_match=must_match, **kwargs ) else: self.fill_match(prefix + key, mismatches, must_match=must_match, **kwargs) if len(mismatches) > 0 and prefix == """": mismatches_msg = ""\n"".join(mismatches) raise ValueError( ""Please correct the following DeepSpeed config values that mismatch kwargs "" f"" values:\n{mismatches_msg}\nThe easiest method is to set these DeepSpeed config values to 'auto'."" ) @dataclass ``` ###Assistant : Process the DeepSpeed config with the values from the kwargs. " 2184,"def get_metrics_result(self): # Collect metrics to return return_metrics = {} for metric in self.metrics: result = metric.result() if isinstance(result, dict): return_metrics.update(result) else: return_metrics[metric.name] = result return return_metrics ","Returns the model's metrics values as a dict. If any of the metric result is a dict (containing multiple metrics), each of them gets added to the top level returned dict of this method. Returns: A `dict` containing values of the metrics listed in `self.metrics`. Example: `{'loss': 0.2, 'accuracy': 0.7}`. ",50,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_metrics_result(self): # Collect metrics to return return_metrics = {} for metric in self.metrics: result = metric.result() if isinstance(result, dict): return_metrics.update(result) else: return_metrics[metric.name] = result return return_metrics ``` ###Assistant : Returns the model's metrics values as a dict. If any of the metric result is a dict (containing multiple metrics), each of them gets added to the top level returned dict of this method. Returns: A `dict` containing values of the metrics listed in `self.metrics`. Example: `{'loss': 0.2, 'accuracy': 0.7}`. " 2185,"def refactor_docstring(self, input, filename): result = [] block = None block_lineno = None indent = None lineno = 0 for line in input.splitlines(keepends=True): lineno += 1 if line.lstrip().startswith(self.PS1): if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block_lineno = lineno block = [line] i = line.find(self.PS1) indent = line[:i] elif (indent is not None and (line.startswith(indent + self.PS2) or line == indent + self.PS2.rstrip() + ""\n"")): block.append(line) else: if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block = None indent = None result.append(line) if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) return """".join(result) ","Refactors a docstring, looking for doctests. This returns a modified version of the input string. It looks for doctests, which start with a "">>>"" prompt, and may be continued with ""..."" prompts, as long as the ""..."" is indented the same as the "">>>"". (Unfortunately we can't use the doctest module's parser, since, like most parsers, it is not geared towards preserving the original source.) ",65,95,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def refactor_docstring(self, input, filename): result = [] block = None block_lineno = None indent = None lineno = 0 for line in input.splitlines(keepends=True): lineno += 1 if line.lstrip().startswith(self.PS1): if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block_lineno = lineno block = [line] i = line.find(self.PS1) indent = line[:i] elif (indent is not None and (line.startswith(indent + self.PS2) or line == indent + self.PS2.rstrip() + ""\n"")): block.append(line) else: if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block = None indent = None result.append(line) if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) return """".join(result) ``` ###Assistant : Refactors a docstring, looking for doctests. This returns a modified version of the input string. It looks for doctests, which start with a "">>>"" prompt, and may be continued with ""..."" prompts, as long as the ""..."" is indented the same as the "">>>"". (Unfortunately we can't use the doctest module's parser, since, like most parsers, it is not geared towards preserving the original source.) " 2186,"def test_complex_reversed_dag(self, test_complex_taskgroup_dag, complex_dag_expected_edges): ( dag, group, ( group_dm1, group_dm2, group_dm3, dm_in1, dm_in2, dm_in3, dm_in4, dm_out1, dm_out2, dm_out3, dm_out4, op_in1, op_out1, ), ) = test_complex_taskgroup_dag group_dm1 << [group_dm2, group_dm3] group << dm_in1 group << Label('label dm_in2 <=> group') << dm_in2 group << Label('label dm_in3/dm_in4 <=> group') << [dm_in3, dm_in4] group << Label('label op_in1 <=> group') << XComArg(op_in1, 'test_key') dm_out1 << group dm_out2 << Label('label group <=> dm_out2') << group [dm_out3, dm_out4] << Label('label group <=> dm_out3/dm_out4') << group XComArg(op_out1, 'test_key') << Label('label group <=> op_out1') << group compare_dag_edges(dag_edges(dag), complex_dag_expected_edges) ",Tests the complex reversed dag with a TaskGroup and a Label,11,89,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_complex_reversed_dag(self, test_complex_taskgroup_dag, complex_dag_expected_edges): ( dag, group, ( group_dm1, group_dm2, group_dm3, dm_in1, dm_in2, dm_in3, dm_in4, dm_out1, dm_out2, dm_out3, dm_out4, op_in1, op_out1, ), ) = test_complex_taskgroup_dag group_dm1 << [group_dm2, group_dm3] group << dm_in1 group << Label('label dm_in2 <=> group') << dm_in2 group << Label('label dm_in3/dm_in4 <=> group') << [dm_in3, dm_in4] group << Label('label op_in1 <=> group') << XComArg(op_in1, 'test_key') dm_out1 << group dm_out2 << Label('label group <=> dm_out2') << group [dm_out3, dm_out4] << Label('label group <=> dm_out3/dm_out4') << group XComArg(op_out1, 'test_key') << Label('label group <=> op_out1') << group compare_dag_edges(dag_edges(dag), complex_dag_expected_edges) ``` ###Assistant : Tests the complex reversed dag with a TaskGroup and a Label " 2187,"def postprocess_response(token_ids, tokenizer): eos_pos = len(token_ids) for i, tok_id in enumerate(token_ids): if tok_id == tokenizer.sep_token_id: eos_pos = i break token_ids = token_ids[:eos_pos] tokens = tokenizer.convert_ids_to_tokens(token_ids) tokens = tokenizer.merge_subword(tokens) return tokens ",Post-process the decoded sequence. Truncate from the first .,9,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def postprocess_response(token_ids, tokenizer): eos_pos = len(token_ids) for i, tok_id in enumerate(token_ids): if tok_id == tokenizer.sep_token_id: eos_pos = i break token_ids = token_ids[:eos_pos] tokens = tokenizer.convert_ids_to_tokens(token_ids) tokens = tokenizer.merge_subword(tokens) return tokens ``` ###Assistant : Post-process the decoded sequence. Truncate from the first . " 2188,"def clip_grad_norm_(self, parameters, max_norm, norm_type=2): if self.distributed_type == DistributedType.FSDP: self.unscale_gradients() parameters = [p for p in parameters] for model in self._models: if parameters == [p for p in model.parameters()]: return model.clip_grad_norm_(max_norm, norm_type) elif self.distributed_type == DistributedType.DEEPSPEED: # `accelerator.backward(loss)` is doing that automatically. Therefore, it's implementation is not needed # We cannot return the gradient norm because DeepSpeed does it. return None self.unscale_gradients() return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type) "," Should be used in place of `torch.nn.utils.clip_grad_norm_`. Returns: `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector). Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(gradient_accumulation_steps=2) >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) >>> for (input, target) in dataloader: ... optimizer.zero_grad() ... output = model(input) ... loss = loss_func(output, target) ... accelerator.backward(loss) ... if accelerator.sync_gradients: ... accelerator.clip_grad_norm_(model.parameters(), max_grad_norm) ... optimizer.step() ``` ",69,66,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def clip_grad_norm_(self, parameters, max_norm, norm_type=2): if self.distributed_type == DistributedType.FSDP: self.unscale_gradients() parameters = [p for p in parameters] for model in self._models: if parameters == [p for p in model.parameters()]: return model.clip_grad_norm_(max_norm, norm_type) elif self.distributed_type == DistributedType.DEEPSPEED: # `accelerator.backward(loss)` is doing that automatically. Therefore, it's implementation is not needed # We cannot return the gradient norm because DeepSpeed does it. return None self.unscale_gradients() return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type) ``` ###Assistant : Should be used in place of `torch.nn.utils.clip_grad_norm_`. Returns: `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector). Example: ```python >>> from accelerate import Accelerator >>> accelerator = Accelerator(gradient_accumulation_steps=2) >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) >>> for (input, target) in dataloader: ... optimizer.zero_grad() ... output = model(input) ... loss = loss_func(output, target) ... accelerator.backward(loss) ... if accelerator.sync_gradients: ... accelerator.clip_grad_norm_(model.parameters(), max_grad_norm) ... optimizer.step() ``` " 2189,"def check_library_list(self, libraries): if not isinstance(libraries, list): raise DistutilsSetupError( ""'libraries' option must be a list of tuples"") for lib in libraries: if not isinstance(lib, tuple) and len(lib) != 2: raise DistutilsSetupError( ""each element of 'libraries' must a 2-tuple"") name, build_info = lib if not isinstance(name, str): raise DistutilsSetupError( ""first element of each tuple in 'libraries' "" ""must be a string (the library name)"") if '/' in name or (os.sep != '/' and os.sep in name): raise DistutilsSetupError(""bad library name '%s': "" ""may not contain directory separators"" % lib[0]) if not isinstance(build_info, dict): raise DistutilsSetupError( ""second element of each tuple in 'libraries' "" ""must be a dictionary (build info)"") ","Ensure that the list of libraries is valid. `library` is presumably provided as a command option 'libraries'. This method checks that it is a list of 2-tuples, where the tuples are (library_name, build_info_dict). Raise DistutilsSetupError if the structure is invalid anywhere; just returns otherwise. ",44,108,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_library_list(self, libraries): if not isinstance(libraries, list): raise DistutilsSetupError( ""'libraries' option must be a list of tuples"") for lib in libraries: if not isinstance(lib, tuple) and len(lib) != 2: raise DistutilsSetupError( ""each element of 'libraries' must a 2-tuple"") name, build_info = lib if not isinstance(name, str): raise DistutilsSetupError( ""first element of each tuple in 'libraries' "" ""must be a string (the library name)"") if '/' in name or (os.sep != '/' and os.sep in name): raise DistutilsSetupError(""bad library name '%s': "" ""may not contain directory separators"" % lib[0]) if not isinstance(build_info, dict): raise DistutilsSetupError( ""second element of each tuple in 'libraries' "" ""must be a dictionary (build info)"") ``` ###Assistant : Ensure that the list of libraries is valid. `library` is presumably provided as a command option 'libraries'. This method checks that it is a list of 2-tuples, where the tuples are (library_name, build_info_dict). Raise DistutilsSetupError if the structure is invalid anywhere; just returns otherwise. " 2190,"def _process_new_state(cls, new_state, unprocessed, processed): if isinstance(new_state, str): # an existing state if new_state == '#pop': return -1 elif new_state in unprocessed: return (new_state,) elif new_state == '#push': return new_state elif new_state[:5] == '#pop:': return -int(new_state[5:]) else: assert False, 'unknown new state %r' % new_state elif isinstance(new_state, combined): # combine a new state from existing ones tmp_state = '_tmp_%d' % cls._tmpname cls._tmpname += 1 itokens = [] for istate in new_state: assert istate != new_state, 'circular state ref %r' % istate itokens.extend(cls._process_state(unprocessed, processed, istate)) processed[tmp_state] = itokens return (tmp_state,) elif isinstance(new_state, tuple): # push more than one state for istate in new_state: assert (istate in unprocessed or istate in ('#pop', '#push')), \ 'unknown new state ' + istate return new_state else: assert False, 'unknown new state def %r' % new_state ",Preprocess the state transition action of a token definition.,9,130,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _process_new_state(cls, new_state, unprocessed, processed): if isinstance(new_state, str): # an existing state if new_state == '#pop': return -1 elif new_state in unprocessed: return (new_state,) elif new_state == '#push': return new_state elif new_state[:5] == '#pop:': return -int(new_state[5:]) else: assert False, 'unknown new state %r' % new_state elif isinstance(new_state, combined): # combine a new state from existing ones tmp_state = '_tmp_%d' % cls._tmpname cls._tmpname += 1 itokens = [] for istate in new_state: assert istate != new_state, 'circular state ref %r' % istate itokens.extend(cls._process_state(unprocessed, processed, istate)) processed[tmp_state] = itokens return (tmp_state,) elif isinstance(new_state, tuple): # push more than one state for istate in new_state: assert (istate in unprocessed or istate in ('#pop', '#push')), \ 'unknown new state ' + istate return new_state else: assert False, 'unknown new state def %r' % new_state ``` ###Assistant : Preprocess the state transition action of a token definition. " 2191,"def test_ohe_infrequent_two_levels_user_cats(): X_train = np.array( [[""a""] * 5 + [""b""] * 20 + [""c""] * 10 + [""d""] * 3], dtype=object ).T ohe = OneHotEncoder( categories=[[""c"", ""d"", ""a"", ""b""]], sparse=False, handle_unknown=""infrequent_if_exist"", max_categories=2, ).fit(X_train) assert_array_equal(ohe.infrequent_categories_, [[""c"", ""d"", ""a""]]) X_test = [[""b""], [""a""], [""c""], [""d""], [""e""]] expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]]) X_trans = ohe.transform(X_test) assert_allclose(expected, X_trans) # 'infrequent' is used to denote the infrequent categories for # `inverse_transform` expected_inv = [[col] for col in [""b""] + [""infrequent_sklearn""] * 4] X_inv = ohe.inverse_transform(X_trans) assert_array_equal(expected_inv, X_inv) ",Test that the order of the categories provided by a user is respected.,13,89,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_ohe_infrequent_two_levels_user_cats(): X_train = np.array( [[""a""] * 5 + [""b""] * 20 + [""c""] * 10 + [""d""] * 3], dtype=object ).T ohe = OneHotEncoder( categories=[[""c"", ""d"", ""a"", ""b""]], sparse=False, handle_unknown=""infrequent_if_exist"", max_categories=2, ).fit(X_train) assert_array_equal(ohe.infrequent_categories_, [[""c"", ""d"", ""a""]]) X_test = [[""b""], [""a""], [""c""], [""d""], [""e""]] expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]]) X_trans = ohe.transform(X_test) assert_allclose(expected, X_trans) # 'infrequent' is used to denote the infrequent categories for # `inverse_transform` expected_inv = [[col] for col in [""b""] + [""infrequent_sklearn""] * 4] X_inv = ohe.inverse_transform(X_trans) assert_array_equal(expected_inv, X_inv) ``` ###Assistant : Test that the order of the categories provided by a user is respected. " 2192,"def fit(self, X, y=None): self._validate_params() random_state = check_random_state(self.random_state) X = self._validate_data(X) self.mean_ = X.mean(axis=0) X = X - self.mean_ if self.n_components is None: n_components = X.shape[1] else: n_components = self.n_components return self._fit(X, n_components, random_state) ","Fit the model from data in X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Returns the instance itself. ",53,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fit(self, X, y=None): self._validate_params() random_state = check_random_state(self.random_state) X = self._validate_data(X) self.mean_ = X.mean(axis=0) X = X - self.mean_ if self.n_components is None: n_components = X.shape[1] else: n_components = self.n_components return self._fit(X, n_components, random_state) ``` ###Assistant : Fit the model from data in X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Returns the instance itself. " 2193,"def test_create_profile(): invoke_and_assert( [""profile"", ""create"", ""foo""], expected_output=( f ), ) profiles = load_profiles() assert profiles[""foo""] == Profile( name=""foo"", settings={}, source=PREFECT_PROFILES_PATH.value() ) "," Created profile 'foo'. Switch to your new profile with: prefect profile use 'foo' Or, to use it for a single command, include the `-p` option: prefect -p 'foo' config view ",30,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_create_profile(): invoke_and_assert( [""profile"", ""create"", ""foo""], expected_output=( f ), ) profiles = load_profiles() assert profiles[""foo""] == Profile( name=""foo"", settings={}, source=PREFECT_PROFILES_PATH.value() ) ``` ###Assistant : Created profile 'foo'. Switch to your new profile with: prefect profile use 'foo' Or, to use it for a single command, include the `-p` option: prefect -p 'foo' config view " 2194,"def test_mixed_errorbar_polar_caps(): fig = plt.figure() ax = plt.subplot(111, projection='polar') # symmetric errorbars th_sym = [1, 2, 3] r_sym = [0.9]*3 ax.errorbar(th_sym, r_sym, xerr=0.35, yerr=0.2, fmt=""o"") # long errorbars th_long = [np.pi/2 + .1, np.pi + .1] r_long = [1.8, 2.2] ax.errorbar(th_long, r_long, xerr=0.8 * np.pi, yerr=0.15, fmt=""o"") # asymmetric errorbars th_asym = [4*np.pi/3 + .1, 5*np.pi/3 + .1, 2*np.pi-0.1] r_asym = [1.1]*3 xerr = [[.3, .3, .2], [.2, .3, .3]] yerr = [[.35, .5, .5], [.5, .35, .5]] ax.errorbar(th_asym, r_asym, xerr=xerr, yerr=yerr, fmt=""o"") # overlapping errorbar th_over = [2.1] r_over = [3.1] ax.errorbar(th_over, r_over, xerr=10, yerr=.2, fmt=""o"") "," Mix several polar errorbar use cases in a single test figure. It is advisable to position individual points off the grid. If there are problems with reproducibility of this test, consider removing grid. ",33,97,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_mixed_errorbar_polar_caps(): fig = plt.figure() ax = plt.subplot(111, projection='polar') # symmetric errorbars th_sym = [1, 2, 3] r_sym = [0.9]*3 ax.errorbar(th_sym, r_sym, xerr=0.35, yerr=0.2, fmt=""o"") # long errorbars th_long = [np.pi/2 + .1, np.pi + .1] r_long = [1.8, 2.2] ax.errorbar(th_long, r_long, xerr=0.8 * np.pi, yerr=0.15, fmt=""o"") # asymmetric errorbars th_asym = [4*np.pi/3 + .1, 5*np.pi/3 + .1, 2*np.pi-0.1] r_asym = [1.1]*3 xerr = [[.3, .3, .2], [.2, .3, .3]] yerr = [[.35, .5, .5], [.5, .35, .5]] ax.errorbar(th_asym, r_asym, xerr=xerr, yerr=yerr, fmt=""o"") # overlapping errorbar th_over = [2.1] r_over = [3.1] ax.errorbar(th_over, r_over, xerr=10, yerr=.2, fmt=""o"") ``` ###Assistant : Mix several polar errorbar use cases in a single test figure. It is advisable to position individual points off the grid. If there are problems with reproducibility of this test, consider removing grid. " 2195,"def test_edgeql_for_01(self): self.assert_test_query( r, {(1, 1), (2, 2), (3, 3)}, ) "," FOR X IN {1,2,3} UNION ((SELECT X), (SELECT X)); ",9,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_edgeql_for_01(self): self.assert_test_query( r, {(1, 1), (2, 2), (3, 3)}, ) ``` ###Assistant : FOR X IN {1,2,3} UNION ((SELECT X), (SELECT X)); " 2196,"def on_clicked(self, index): if not index.isValid(): return item = self._model().data(index, downloads.ModelRole.item) if item.done and item.successful: item.open_file() item.remove() ","Handle clicking of an item. Args: index: The QModelIndex of the clicked item. ",13,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def on_clicked(self, index): if not index.isValid(): return item = self._model().data(index, downloads.ModelRole.item) if item.done and item.successful: item.open_file() item.remove() ``` ###Assistant : Handle clicking of an item. Args: index: The QModelIndex of the clicked item. " 2197,"def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) self.bias = self.decoder.bias @add_start_docstrings( , XLM_ROBERTA_XL_START_DOCSTRING, )"," XLM-RoBERTa-xlarge Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. ",23,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) self.bias = self.decoder.bias @add_start_docstrings( , XLM_ROBERTA_XL_START_DOCSTRING, ) ``` ###Assistant : XLM-RoBERTa-xlarge Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. " 2198,"def matches_minor(self, other): return (self.major, self.minor) == (other.major, other.minor) ","Check whether this version matches the other in (major, minor).",10,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def matches_minor(self, other): return (self.major, self.minor) == (other.major, other.minor) ``` ###Assistant : Check whether this version matches the other in (major, minor). " 2199,"def test_04_query_predictor_single_where_condition(self): time.sleep(120) # TODO query = f response = self.handler.native_query(query) self.assertTrue(response.type == RESPONSE_TYPE.TABLE) self.assertTrue(len(response.data_frame) == 1) self.assertTrue(response.data_frame['sqft'][0] == 100) self.assertTrue(response.data_frame['rental_price'][0] is not None) "," SELECT target from {self.test_model_1} WHERE sqft=100 ",6,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_04_query_predictor_single_where_condition(self): time.sleep(120) # TODO query = f response = self.handler.native_query(query) self.assertTrue(response.type == RESPONSE_TYPE.TABLE) self.assertTrue(len(response.data_frame) == 1) self.assertTrue(response.data_frame['sqft'][0] == 100) self.assertTrue(response.data_frame['rental_price'][0] is not None) ``` ###Assistant : SELECT target from {self.test_model_1} WHERE sqft=100 " 2200,"def _setup_room_for_insertion_backfill_tests(self) -> _BackfillSetupInfo: room_id = ""!backfill-room-test:some-host"" depth_map: Dict[str, int] = { ""1"": 1, ""2"": 2, ""insertion_eventA"": 3, ""3"": 4, ""insertion_eventB"": 5, ""4"": 6, ""5"": 7, } "," Sets up a room with various insertion event backward extremities to test backfill functions against. Returns: _BackfillSetupInfo including the `room_id` to test against and `depth_map` of events in the room ",30,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _setup_room_for_insertion_backfill_tests(self) -> _BackfillSetupInfo: room_id = ""!backfill-room-test:some-host"" depth_map: Dict[str, int] = { ""1"": 1, ""2"": 2, ""insertion_eventA"": 3, ""3"": 4, ""insertion_eventB"": 5, ""4"": 6, ""5"": 7, } ``` ###Assistant : Sets up a room with various insertion event backward extremities to test backfill functions against. Returns: _BackfillSetupInfo including the `room_id` to test against and `depth_map` of events in the room " 2201,"def postprocessing(data): if type_to_string(type(data)) == ""torch.Tensor"": try: import torch from torchvision import transforms # By default Torch tensors are displayed as images. To display them as JSON, # the user can simply convert them to numpy arrays. transformer = transforms.ToPILImage() return transformer(torch.squeeze(data)) except ModuleNotFoundError: logger.warning( ""Module `torchvision` isn't installed, unable to process torch tensor."" ) return data return data ","Add support for types that are not supported by Gradio. Some data types like PyTorch tensors, cannot be processed and displayed through Gradio. Thus we extend support to these data types by transforming them into a form that Gradio can process and display. ",43,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def postprocessing(data): if type_to_string(type(data)) == ""torch.Tensor"": try: import torch from torchvision import transforms # By default Torch tensors are displayed as images. To display them as JSON, # the user can simply convert them to numpy arrays. transformer = transforms.ToPILImage() return transformer(torch.squeeze(data)) except ModuleNotFoundError: logger.warning( ""Module `torchvision` isn't installed, unable to process torch tensor."" ) return data return data ``` ###Assistant : Add support for types that are not supported by Gradio. Some data types like PyTorch tensors, cannot be processed and displayed through Gradio. Thus we extend support to these data types by transforming them into a form that Gradio can process and display. " 2202,"def get_scrap_item_details(bom_no): scrap_items = {} for item in frappe.db.sql( , bom_no, as_dict=1, ): scrap_items[item.item_code] = item.stock_qty return scrap_items ","select item_code, stock_qty from `tabBOM Scrap Item` where parent = %s",11,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_scrap_item_details(bom_no): scrap_items = {} for item in frappe.db.sql( , bom_no, as_dict=1, ): scrap_items[item.item_code] = item.stock_qty return scrap_items ``` ###Assistant : select item_code, stock_qty from `tabBOM Scrap Item` where parent = %s " 2203,"def get_is_active(self, session=NEW_SESSION) -> Optional[None]: return session.query(DagModel.is_active).filter(DagModel.dag_id == self.dag_id).scalar() ",Returns a boolean indicating whether this DAG is active,9,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_is_active(self, session=NEW_SESSION) -> Optional[None]: return session.query(DagModel.is_active).filter(DagModel.dag_id == self.dag_id).scalar() ``` ###Assistant : Returns a boolean indicating whether this DAG is active " 2204,"def is_homepage(self) -> bool: return self.is_top_level and self.is_index and self.file.url in ('.', './', 'index.html') previous_page: Optional[Page] next_page: Optional[Page] parent: Optional[Section] children: None = None is_section: bool = False is_page: bool = True is_link: bool = False ","Evaluates to `True` for the homepage of the site and `False` for all other pages.The [page][mkdocs.structure.pages.Page] object for the previous page or `None`. The value will be `None` if the current page is the first item in the site navigation or if the current page is not included in the navigation at all.The [page][mkdocs.structure.pages.Page] object for the next page or `None`. The value will be `None` if the current page is the last item in the site navigation or if the current page is not included in the navigation at all.The immediate parent of the page in the site navigation. `None` if the page is at the top level.Pages do not contain children and the attribute is always `None`.Indicates that the navigation object is a ""section"" object. Always `False` for page objects.Indicates that the navigation object is a ""page"" object. Always `True` for page objects.Indicates that the navigation object is a ""link"" object. Always `False` for page objects.",158,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_homepage(self) -> bool: return self.is_top_level and self.is_index and self.file.url in ('.', './', 'index.html') previous_page: Optional[Page] next_page: Optional[Page] parent: Optional[Section] children: None = None is_section: bool = False is_page: bool = True is_link: bool = False ``` ###Assistant : Evaluates to `True` for the homepage of the site and `False` for all other pages.The [page][mkdocs.structure.pages.Page] object for the previous page or `None`. The value will be `None` if the current page is the first item in the site navigation or if the current page is not included in the navigation at all.The [page][mkdocs.structure.pages.Page] object for the next page or `None`. The value will be `None` if the current page is the last item in the site navigation or if the current page is not included in the navigation at all.The immediate parent of the page in the site navigation. `None` if the page is at the top level.Pages do not contain children and the attribute is always `None`.Indicates that the navigation object is a ""section"" object. Always `False` for page objects.Indicates that the navigation object is a ""page"" object. Always `True` for page objects.Indicates that the navigation object is a ""link"" object. Always `False` for page objects. " 2205,"def _filetypes(self): all_files = (""All files"", ""*.*"") filetypes = dict( default=(all_files,), alignments=[(""Faceswap Alignments"", ""*.fsa""), all_files], config_project=[(""Faceswap Project files"", ""*.fsw""), all_files], config_task=[(""Faceswap Task files"", ""*.fst""), all_files], config_all=[(""Faceswap Project and Task files"", ""*.fst *.fsw""), all_files], csv=[(""Comma separated values"", ""*.csv""), all_files], image=[(""Bitmap"", ""*.bmp""), (""JPG"", ""*.jpeg *.jpg""), (""PNG"", ""*.png""), (""TIFF"", ""*.tif *.tiff""), all_files], ini=[(""Faceswap config files"", ""*.ini""), all_files], json=[(""JSON file"", ""*.json""), all_files], model=[(""Keras model files"", ""*.h5""), all_files], state=[(""State files"", ""*.json""), all_files], log=[(""Log files"", ""*.log""), all_files], video=[(""Audio Video Interleave"", ""*.avi""), (""Flash Video"", ""*.flv""), (""Matroska"", ""*.mkv""), (""MOV"", ""*.mov""), (""MP4"", ""*.mp4""), (""MPEG"", ""*.mpeg *.mpg *.ts *.vob""), (""WebM"", ""*.webm""), (""Windows Media Video"", ""*.wmv""), all_files]) # Add in multi-select options and upper case extensions for Linux for key in filetypes: if platform.system() == ""Linux"": filetypes[key] = [item if item[0] == ""All files"" else (item[0], f""{item[1]} {item[1].upper()}"") for item in filetypes[key]] if len(filetypes[key]) > 2: multi = [f""{key.title()} Files""] multi.append("" "".join([ftype[1] for ftype in filetypes[key] if ftype[0] != ""All files""])) filetypes[key].insert(0, tuple(multi)) return filetypes ", dict: The accepted extensions for each file type for opening/saving ,10,154,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _filetypes(self): all_files = (""All files"", ""*.*"") filetypes = dict( default=(all_files,), alignments=[(""Faceswap Alignments"", ""*.fsa""), all_files], config_project=[(""Faceswap Project files"", ""*.fsw""), all_files], config_task=[(""Faceswap Task files"", ""*.fst""), all_files], config_all=[(""Faceswap Project and Task files"", ""*.fst *.fsw""), all_files], csv=[(""Comma separated values"", ""*.csv""), all_files], image=[(""Bitmap"", ""*.bmp""), (""JPG"", ""*.jpeg *.jpg""), (""PNG"", ""*.png""), (""TIFF"", ""*.tif *.tiff""), all_files], ini=[(""Faceswap config files"", ""*.ini""), all_files], json=[(""JSON file"", ""*.json""), all_files], model=[(""Keras model files"", ""*.h5""), all_files], state=[(""State files"", ""*.json""), all_files], log=[(""Log files"", ""*.log""), all_files], video=[(""Audio Video Interleave"", ""*.avi""), (""Flash Video"", ""*.flv""), (""Matroska"", ""*.mkv""), (""MOV"", ""*.mov""), (""MP4"", ""*.mp4""), (""MPEG"", ""*.mpeg *.mpg *.ts *.vob""), (""WebM"", ""*.webm""), (""Windows Media Video"", ""*.wmv""), all_files]) # Add in multi-select options and upper case extensions for Linux for key in filetypes: if platform.system() == ""Linux"": filetypes[key] = [item if item[0] == ""All files"" else (item[0], f""{item[1]} {item[1].upper()}"") for item in filetypes[key]] if len(filetypes[key]) > 2: multi = [f""{key.title()} Files""] multi.append("" "".join([ftype[1] for ftype in filetypes[key] if ftype[0] != ""All files""])) filetypes[key].insert(0, tuple(multi)) return filetypes ``` ###Assistant : dict: The accepted extensions for each file type for opening/saving " 2206,"def result(self): if self._state == _CANCELLED: exc = self._make_cancelled_error() raise exc if self._state != _FINISHED: raise exceptions.InvalidStateError('Result is not ready.') self.__log_traceback = False if self._exception is not None: raise self._exception return self._result ","Return the result this future represents. If the future has been cancelled, raises CancelledError. If the future's result isn't yet available, raises InvalidStateError. If the future is done and has an exception set, this exception is raised. ",37,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def result(self): if self._state == _CANCELLED: exc = self._make_cancelled_error() raise exc if self._state != _FINISHED: raise exceptions.InvalidStateError('Result is not ready.') self.__log_traceback = False if self._exception is not None: raise self._exception return self._result ``` ###Assistant : Return the result this future represents. If the future has been cancelled, raises CancelledError. If the future's result isn't yet available, raises InvalidStateError. If the future is done and has an exception set, this exception is raised. " 2207,"def _set_fsspec_for_multiprocess() -> None: fsspec.asyn.iothread[0] = None fsspec.asyn.loop[0] = None "," Clear reference to the loop and thread. This is necessary otherwise HTTPFileSystem hangs in the ML training loop. Only required for fsspec >= 0.9.0 See https://github.com/fsspec/gcsfs/issues/379 ",26,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _set_fsspec_for_multiprocess() -> None: fsspec.asyn.iothread[0] = None fsspec.asyn.loop[0] = None ``` ###Assistant : Clear reference to the loop and thread. This is necessary otherwise HTTPFileSystem hangs in the ML training loop. Only required for fsspec >= 0.9.0 See https://github.com/fsspec/gcsfs/issues/379 " 2208,"def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08): out = np.cumsum(arr, axis=axis, dtype=np.float64) expected = np.sum(arr, axis=axis, dtype=np.float64) if not np.all( np.isclose( out.take(-1, axis=axis), expected, rtol=rtol, atol=atol, equal_nan=True ) ): warnings.warn( ""cumsum was found to be unstable: "" ""its last element does not correspond to sum"", RuntimeWarning, ) return out ","Use high precision for cumsum and check that final value matches sum. Warns if the final cumulative sum does not match the sum (up to the chosen tolerance). Parameters ---------- arr : array-like To be cumulatively summed as flat. axis : int, default=None Axis along which the cumulative sum is computed. The default (None) is to compute the cumsum over the flattened array. rtol : float, default=1e-05 Relative tolerance, see ``np.allclose``. atol : float, default=1e-08 Absolute tolerance, see ``np.allclose``. Returns ------- out : ndarray Array with the cumulative sums along the chosen axis. ",93,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08): out = np.cumsum(arr, axis=axis, dtype=np.float64) expected = np.sum(arr, axis=axis, dtype=np.float64) if not np.all( np.isclose( out.take(-1, axis=axis), expected, rtol=rtol, atol=atol, equal_nan=True ) ): warnings.warn( ""cumsum was found to be unstable: "" ""its last element does not correspond to sum"", RuntimeWarning, ) return out ``` ###Assistant : Use high precision for cumsum and check that final value matches sum. Warns if the final cumulative sum does not match the sum (up to the chosen tolerance). Parameters ---------- arr : array-like To be cumulatively summed as flat. axis : int, default=None Axis along which the cumulative sum is computed. The default (None) is to compute the cumsum over the flattened array. rtol : float, default=1e-05 Relative tolerance, see ``np.allclose``. atol : float, default=1e-08 Absolute tolerance, see ``np.allclose``. Returns ------- out : ndarray Array with the cumulative sums along the chosen axis. " 2209,"def confirm(self): args = request.args dag_id = args.get('dag_id') task_id = args.get('task_id') dag_run_id = args.get('dag_run_id') state = args.get('state') origin = args.get('origin') if 'map_index' not in args: map_indexes: Optional[List[int]] = None else: map_indexes = args.getlist('map_index', type=int) upstream = to_boolean(args.get('upstream')) downstream = to_boolean(args.get('downstream')) future = to_boolean(args.get('future')) past = to_boolean(args.get('past')) origin = origin or url_for('Airflow.index') dag = get_airflow_app().dag_bag.get_dag(dag_id) if not dag: msg = f'DAG {dag_id} not found' return redirect_or_json(origin, msg, status='error', status_code=404) try: task = dag.get_task(task_id) except airflow.exceptions.TaskNotFound: msg = f""Task {task_id} not found"" return redirect_or_json(origin, msg, status='error', status_code=404) task.dag = dag if state not in ( 'success', 'failed', ): msg = f""Invalid state {state}, must be either 'success' or 'failed'"" return redirect_or_json(origin, msg, status='error', status_code=400) latest_execution_date = dag.get_latest_execution_date() if not latest_execution_date: msg = f""Cannot mark tasks as {state}, seem that dag {dag_id} has never run"" return redirect_or_json(origin, msg, status='error', status_code=400) if map_indexes is None: tasks: Union[List[Operator], List[Tuple[Operator, int]]] = [task] else: tasks = [(task, map_index) for map_index in map_indexes] to_be_altered = set_state( tasks=tasks, run_id=dag_run_id, upstream=upstream, downstream=downstream, future=future, past=past, state=state, commit=False, ) if request.headers.get('Accept') == 'application/json': details = [str(t) for t in to_be_altered] return htmlsafe_json_dumps(details, separators=(',', ':')) details = ""\n"".join(str(t) for t in to_be_altered) response = self.render_template( ""airflow/confirm.html"", endpoint=url_for(f'Airflow.{state}'), message=f""Task instances you are about to mark as {state}:"", details=details, ) return response ",Show confirmation page for marking tasks as success or failed.,10,208,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def confirm(self): args = request.args dag_id = args.get('dag_id') task_id = args.get('task_id') dag_run_id = args.get('dag_run_id') state = args.get('state') origin = args.get('origin') if 'map_index' not in args: map_indexes: Optional[List[int]] = None else: map_indexes = args.getlist('map_index', type=int) upstream = to_boolean(args.get('upstream')) downstream = to_boolean(args.get('downstream')) future = to_boolean(args.get('future')) past = to_boolean(args.get('past')) origin = origin or url_for('Airflow.index') dag = get_airflow_app().dag_bag.get_dag(dag_id) if not dag: msg = f'DAG {dag_id} not found' return redirect_or_json(origin, msg, status='error', status_code=404) try: task = dag.get_task(task_id) except airflow.exceptions.TaskNotFound: msg = f""Task {task_id} not found"" return redirect_or_json(origin, msg, status='error', status_code=404) task.dag = dag if state not in ( 'success', 'failed', ): msg = f""Invalid state {state}, must be either 'success' or 'failed'"" return redirect_or_json(origin, msg, status='error', status_code=400) latest_execution_date = dag.get_latest_execution_date() if not latest_execution_date: msg = f""Cannot mark tasks as {state}, seem that dag {dag_id} has never run"" return redirect_or_json(origin, msg, status='error', status_code=400) if map_indexes is None: tasks: Union[List[Operator], List[Tuple[Operator, int]]] = [task] else: tasks = [(task, map_index) for map_index in map_indexes] to_be_altered = set_state( tasks=tasks, run_id=dag_run_id, upstream=upstream, downstream=downstream, future=future, past=past, state=state, commit=False, ) if request.headers.get('Accept') == 'application/json': details = [str(t) for t in to_be_altered] return htmlsafe_json_dumps(details, separators=(',', ':')) details = ""\n"".join(str(t) for t in to_be_altered) response = self.render_template( ""airflow/confirm.html"", endpoint=url_for(f'Airflow.{state}'), message=f""Task instances you are about to mark as {state}:"", details=details, ) return response ``` ###Assistant : Show confirmation page for marking tasks as success or failed. " 2210,"def resample(self) -> Dict[str, Any]: result = {} for module in self.nas_modules: result.update(module.resample(memo=result)) return result ","Trigger the resample for each ``nas_module``. Sometimes (e.g., in differentiable cases), it does nothing. Returns ------- dict Sampled architecture. ",19,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def resample(self) -> Dict[str, Any]: result = {} for module in self.nas_modules: result.update(module.resample(memo=result)) return result ``` ###Assistant : Trigger the resample for each ``nas_module``. Sometimes (e.g., in differentiable cases), it does nothing. Returns ------- dict Sampled architecture. " 2211,"def test_json_to_doc_attribute_consistency(doc): doc_json = doc.to_json() doc_json[""tokens""][1].pop(""morph"") with pytest.raises(ValueError): Doc(doc.vocab).from_json(doc_json) ",Test that Doc.from_json() raises an exception if tokens don't all have the same set of properties.,16,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_json_to_doc_attribute_consistency(doc): doc_json = doc.to_json() doc_json[""tokens""][1].pop(""morph"") with pytest.raises(ValueError): Doc(doc.vocab).from_json(doc_json) ``` ###Assistant : Test that Doc.from_json() raises an exception if tokens don't all have the same set of properties. " 2212,"def test_index_css_classes(self): # General index page response = self.client.get(reverse(""admin:index"")) self.assertContains(response, '
    ') self.assertContains(response, '') # App index page response = self.client.get(reverse(""admin:app_list"", args=(""admin_views"",))) self.assertContains(response, '
    ') self.assertContains(response, '') "," CSS class names are used for each app and model on the admin index pages (#17050). ",16,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_index_css_classes(self): # General index page response = self.client.get(reverse(""admin:index"")) self.assertContains(response, '
    ') self.assertContains(response, '') # App index page response = self.client.get(reverse(""admin:app_list"", args=(""admin_views"",))) self.assertContains(response, '
    ') self.assertContains(response, '') ``` ###Assistant : CSS class names are used for each app and model on the admin index pages (#17050). " 2213,"def test_action_column_class(self): response = self.client.get(reverse(""admin:admin_views_subscriber_changelist"")) self.assertIsNotNone(response.context[""action_form""]) self.assertContains(response, ""action-checkbox-column"") ",The checkbox column class is present in the response.,9,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_action_column_class(self): response = self.client.get(reverse(""admin:admin_views_subscriber_changelist"")) self.assertIsNotNone(response.context[""action_form""]) self.assertContains(response, ""action-checkbox-column"") ``` ###Assistant : The checkbox column class is present in the response. " 2214,"def hashkey(cls, *args, **kwargs): return cachetools.keys.hashkey(f""<{cls.__name__}>"", *args, **kwargs) "," Usage of @cachetools.cached has changed to @cachetools.cachedmethod The previous cachetools decorator called the hash function and passed in (self, key). The new cachtools decorator calls the hash function with just (key). Ideally, we would continue to pass self, however, the cachetools decorator interface does not allow us to. This hashkey function is to maintain that the key generated looks like ('', key). The thought is that maybe it is important to namespace our cache to the SettingsWrapper scope in case some other usage of this cache exists. I can not think of how any other system could and would use our private cache, but for safety sake we are ensuring the key schema does not change. ",116,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def hashkey(cls, *args, **kwargs): return cachetools.keys.hashkey(f""<{cls.__name__}>"", *args, **kwargs) ``` ###Assistant : Usage of @cachetools.cached has changed to @cachetools.cachedmethod The previous cachetools decorator called the hash function and passed in (self, key). The new cachtools decorator calls the hash function with just (key). Ideally, we would continue to pass self, however, the cachetools decorator interface does not allow us to. This hashkey function is to maintain that the key generated looks like ('', key). The thought is that maybe it is important to namespace our cache to the SettingsWrapper scope in case some other usage of this cache exists. I can not think of how any other system could and would use our private cache, but for safety sake we are ensuring the key schema does not change. " 2215,"def load(cls, path): with open(path) as yaml_file: data = yaml.safe_load(yaml_file) if not isinstance(data, dict): raise TypeError(f'Conent of config file {path} is not a dict/object') utils.set_base_path(Path(path).parent) config = cls(**data) utils.unset_base_path() return config "," Load a YAML config file from file system. Since YAML is a superset of JSON, it can also load JSON files. This method raises exception if: - The file is not available - The file content is not valid YAML - Top level value of the YAML is not object - The YAML contains not supported fields It does not raise exception when the YAML misses fields or contains bad fields. Parameters ---------- path : PathLike Path of the config file. Returns ------- cls An object of ConfigBase subclass. ",89,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load(cls, path): with open(path) as yaml_file: data = yaml.safe_load(yaml_file) if not isinstance(data, dict): raise TypeError(f'Conent of config file {path} is not a dict/object') utils.set_base_path(Path(path).parent) config = cls(**data) utils.unset_base_path() return config ``` ###Assistant : Load a YAML config file from file system. Since YAML is a superset of JSON, it can also load JSON files. This method raises exception if: - The file is not available - The file content is not valid YAML - Top level value of the YAML is not object - The YAML contains not supported fields It does not raise exception when the YAML misses fields or contains bad fields. Parameters ---------- path : PathLike Path of the config file. Returns ------- cls An object of ConfigBase subclass. " 2216,"def get_leave_allocation_for_period(employee, leave_type, from_date, to_date): leave_allocated = 0 leave_allocations = frappe.db.sql( , {""from_date"": from_date, ""to_date"": to_date, ""employee"": employee, ""leave_type"": leave_type}, as_dict=1, ) if leave_allocations: for leave_alloc in leave_allocations: leave_allocated += leave_alloc.total_leaves_allocated return leave_allocated @frappe.whitelist()"," select employee, leave_type, from_date, to_date, total_leaves_allocated from `tabLeave Allocation` where employee=%(employee)s and leave_type=%(leave_type)s and docstatus=1 and (from_date between %(from_date)s and %(to_date)s or to_date between %(from_date)s and %(to_date)s or (from_date < %(from_date)s and to_date > %(to_date)s)) ",35,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_leave_allocation_for_period(employee, leave_type, from_date, to_date): leave_allocated = 0 leave_allocations = frappe.db.sql( , {""from_date"": from_date, ""to_date"": to_date, ""employee"": employee, ""leave_type"": leave_type}, as_dict=1, ) if leave_allocations: for leave_alloc in leave_allocations: leave_allocated += leave_alloc.total_leaves_allocated return leave_allocated @frappe.whitelist() ``` ###Assistant : select employee, leave_type, from_date, to_date, total_leaves_allocated from `tabLeave Allocation` where employee=%(employee)s and leave_type=%(leave_type)s and docstatus=1 and (from_date between %(from_date)s and %(to_date)s or to_date between %(from_date)s and %(to_date)s or (from_date < %(from_date)s and to_date > %(to_date)s)) " 2217,"def test_proxy_model_content_type_is_used_for_log_entries(self): proxy_content_type = ContentType.objects.get_for_model( ArticleProxy, for_concrete_model=False ) post_data = { ""site"": self.site.pk, ""title"": ""Foo"", ""hist"": ""Bar"", ""created_0"": ""2015-12-25"", ""created_1"": ""00:00"", } changelist_url = reverse(""admin:admin_utils_articleproxy_changelist"") # add proxy_add_url = reverse(""admin:admin_utils_articleproxy_add"") response = self.client.post(proxy_add_url, post_data) self.assertRedirects(response, changelist_url) proxy_addition_log = LogEntry.objects.latest(""id"") self.assertEqual(proxy_addition_log.action_flag, ADDITION) self.assertEqual(proxy_addition_log.content_type, proxy_content_type) # change article_id = proxy_addition_log.object_id proxy_change_url = reverse( ""admin:admin_utils_articleproxy_change"", args=(article_id,) ) post_data[""title""] = ""New"" response = self.client.post(proxy_change_url, post_data) self.assertRedirects(response, changelist_url) proxy_change_log = LogEntry.objects.latest(""id"") self.assertEqual(proxy_change_log.action_flag, CHANGE) self.assertEqual(proxy_change_log.content_type, proxy_content_type) # delete proxy_delete_url = reverse( ""admin:admin_utils_articleproxy_delete"", args=(article_id,) ) response = self.client.post(proxy_delete_url, {""post"": ""yes""}) self.assertRedirects(response, changelist_url) proxy_delete_log = LogEntry.objects.latest(""id"") self.assertEqual(proxy_delete_log.action_flag, DELETION) self.assertEqual(proxy_delete_log.content_type, proxy_content_type) "," Log entries for proxy models should have the proxy model's contenttype (#21084). ",12,92,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_proxy_model_content_type_is_used_for_log_entries(self): proxy_content_type = ContentType.objects.get_for_model( ArticleProxy, for_concrete_model=False ) post_data = { ""site"": self.site.pk, ""title"": ""Foo"", ""hist"": ""Bar"", ""created_0"": ""2015-12-25"", ""created_1"": ""00:00"", } changelist_url = reverse(""admin:admin_utils_articleproxy_changelist"") # add proxy_add_url = reverse(""admin:admin_utils_articleproxy_add"") response = self.client.post(proxy_add_url, post_data) self.assertRedirects(response, changelist_url) proxy_addition_log = LogEntry.objects.latest(""id"") self.assertEqual(proxy_addition_log.action_flag, ADDITION) self.assertEqual(proxy_addition_log.content_type, proxy_content_type) # change article_id = proxy_addition_log.object_id proxy_change_url = reverse( ""admin:admin_utils_articleproxy_change"", args=(article_id,) ) post_data[""title""] = ""New"" response = self.client.post(proxy_change_url, post_data) self.assertRedirects(response, changelist_url) proxy_change_log = LogEntry.objects.latest(""id"") self.assertEqual(proxy_change_log.action_flag, CHANGE) self.assertEqual(proxy_change_log.content_type, proxy_content_type) # delete proxy_delete_url = reverse( ""admin:admin_utils_articleproxy_delete"", args=(article_id,) ) response = self.client.post(proxy_delete_url, {""post"": ""yes""}) self.assertRedirects(response, changelist_url) proxy_delete_log = LogEntry.objects.latest(""id"") self.assertEqual(proxy_delete_log.action_flag, DELETION) self.assertEqual(proxy_delete_log.content_type, proxy_content_type) ``` ###Assistant : Log entries for proxy models should have the proxy model's contenttype (#21084). " 2218,"def typename(typ, short=False) -> str: if not isinstance(typ, type): return typename(type(typ)) try: if not typ.__module__ or typ.__module__ == ""builtins"": return typ.__name__ else: if short: module, *_ = typ.__module__.split(""."") else: module = typ.__module__ return module + ""."" + typ.__name__ except AttributeError: return str(typ) "," Return the name of a type Examples -------- >>> typename(int) 'int' >>> from dask.core import literal >>> typename(literal) 'dask.core.literal' >>> typename(literal, short=True) 'dask.literal' ",23,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def typename(typ, short=False) -> str: if not isinstance(typ, type): return typename(type(typ)) try: if not typ.__module__ or typ.__module__ == ""builtins"": return typ.__name__ else: if short: module, *_ = typ.__module__.split(""."") else: module = typ.__module__ return module + ""."" + typ.__name__ except AttributeError: return str(typ) ``` ###Assistant : Return the name of a type Examples -------- >>> typename(int) 'int' >>> from dask.core import literal >>> typename(literal) 'dask.core.literal' >>> typename(literal, short=True) 'dask.literal' " 2219,"def fetch_command(self, subcommand): # Get commands outside of try block to prevent swallowing exceptions commands = get_commands() try: app_name = commands[subcommand] except KeyError: if os.environ.get(""DJANGO_SETTINGS_MODULE""): # If `subcommand` is missing due to misconfigured settings, the # following line will retrigger an ImproperlyConfigured exception # (get_commands() swallows the original one) so the user is # informed about it. settings.INSTALLED_APPS elif not settings.configured: sys.stderr.write(""No Django settings specified.\n"") possible_matches = get_close_matches(subcommand, commands) sys.stderr.write(""Unknown command: %r"" % subcommand) if possible_matches: sys.stderr.write("". Did you mean %s?"" % possible_matches[0]) sys.stderr.write(""\nType '%s help' for usage.\n"" % self.prog_name) sys.exit(1) if isinstance(app_name, BaseCommand): # If the command is already loaded, use it directly. klass = app_name else: klass = load_command_class(app_name, subcommand) return klass "," Try to fetch the given subcommand, printing a message with the appropriate command called from the command line (usually ""django-admin"" or ""manage.py"") if it can't be found. ",27,114,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fetch_command(self, subcommand): # Get commands outside of try block to prevent swallowing exceptions commands = get_commands() try: app_name = commands[subcommand] except KeyError: if os.environ.get(""DJANGO_SETTINGS_MODULE""): # If `subcommand` is missing due to misconfigured settings, the # following line will retrigger an ImproperlyConfigured exception # (get_commands() swallows the original one) so the user is # informed about it. settings.INSTALLED_APPS elif not settings.configured: sys.stderr.write(""No Django settings specified.\n"") possible_matches = get_close_matches(subcommand, commands) sys.stderr.write(""Unknown command: %r"" % subcommand) if possible_matches: sys.stderr.write("". Did you mean %s?"" % possible_matches[0]) sys.stderr.write(""\nType '%s help' for usage.\n"" % self.prog_name) sys.exit(1) if isinstance(app_name, BaseCommand): # If the command is already loaded, use it directly. klass = app_name else: klass = load_command_class(app_name, subcommand) return klass ``` ###Assistant : Try to fetch the given subcommand, printing a message with the appropriate command called from the command line (usually ""django-admin"" or ""manage.py"") if it can't be found. " 2220,"def url(self, name): name = self._normalize_name(clean_name(name)) blob = self.bucket.blob(name) blob_params = self.get_object_parameters(name) no_signed_url = ( blob_params.get('acl', self.default_acl) == 'publicRead' or not self.querystring_auth) if not self.custom_endpoint and no_signed_url: return blob.public_url elif no_signed_url: out = '{storage_base_url}/{quoted_name}'.format( storage_base_url=self.custom_endpoint, quoted_name=_quote(name, safe=b""/~""), ) return out elif not self.custom_endpoint: out2 = blob.generate_signed_url( expiration=self.expiration, version=""v4"", **self._get_signing_kwargs() ) return out2 else: out3 = blob.generate_signed_url( bucket_bound_hostname=self.custom_endpoint, expiration=self.expiration, version=""v4"", **self._get_signing_kwargs() ) return out3 "," Return public url or a signed url for the Blob. This DOES NOT check for existance of Blob - that makes codes too slow for many use cases. Overridden to force the use of the IAM signBlob API. See https://github.com/googleapis/python-storage/blob/519074112775c19742522158f612b467cf590219/google/cloud/storage/_signing.py#L628 # NOQA ",42,63,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def url(self, name): name = self._normalize_name(clean_name(name)) blob = self.bucket.blob(name) blob_params = self.get_object_parameters(name) no_signed_url = ( blob_params.get('acl', self.default_acl) == 'publicRead' or not self.querystring_auth) if not self.custom_endpoint and no_signed_url: return blob.public_url elif no_signed_url: out = '{storage_base_url}/{quoted_name}'.format( storage_base_url=self.custom_endpoint, quoted_name=_quote(name, safe=b""/~""), ) return out elif not self.custom_endpoint: out2 = blob.generate_signed_url( expiration=self.expiration, version=""v4"", **self._get_signing_kwargs() ) return out2 else: out3 = blob.generate_signed_url( bucket_bound_hostname=self.custom_endpoint, expiration=self.expiration, version=""v4"", **self._get_signing_kwargs() ) return out3 ``` ###Assistant : Return public url or a signed url for the Blob. This DOES NOT check for existance of Blob - that makes codes too slow for many use cases. Overridden to force the use of the IAM signBlob API. See https://github.com/googleapis/python-storage/blob/519074112775c19742522158f612b467cf590219/google/cloud/storage/_signing.py#L628 # NOQA " 2221,"def check_is_fitted(estimator, attributes=None, *, msg=None, all_or_any=all): if isclass(estimator): raise TypeError(""{} is a class, not an instance."".format(estimator)) if msg is None: msg = ( ""This %(name)s instance is not fitted yet. Call 'fit' with "" ""appropriate arguments before using this estimator."" ) if not hasattr(estimator, ""fit""): raise TypeError(""%s is not an estimator instance."" % (estimator)) if attributes is not None: if not isinstance(attributes, (list, tuple)): attributes = [attributes] fitted = all_or_any([hasattr(estimator, attr) for attr in attributes]) elif hasattr(estimator, ""__sklearn_is_fitted__""): fitted = estimator.__sklearn_is_fitted__() else: fitted = [ v for v in vars(estimator) if v.endswith(""_"") and not v.startswith(""__"") ] if not fitted: raise NotFittedError(msg % {""name"": type(estimator).__name__}) ","Perform is_fitted validation for estimator. Checks if the estimator is fitted by verifying the presence of fitted attributes (ending with a trailing underscore) and otherwise raises a NotFittedError with the given message. If an estimator does not set any attributes with a trailing underscore, it can define a ``__sklearn_is_fitted__`` method returning a boolean to specify if the estimator is fitted or not. Parameters ---------- estimator : estimator instance Estimator instance for which the check is performed. attributes : str, list or tuple of str, default=None Attribute name(s) given as string or a list/tuple of strings Eg.: ``[""coef_"", ""estimator_"", ...], ""coef_""`` If `None`, `estimator` is considered fitted if there exist an attribute that ends with a underscore and does not start with double underscore. msg : str, default=None The default error message is, ""This %(name)s instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator."" For custom messages if ""%(name)s"" is present in the message string, it is substituted for the estimator name. Eg. : ""Estimator, %(name)s, must be fitted before sparsifying"". all_or_any : callable, {all, any}, default=all Specify whether all or any of the given attributes must exist. Raises ------ TypeError If the estimator is a class or not an estimator instance NotFittedError If the attributes are not found. ",213,104,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_is_fitted(estimator, attributes=None, *, msg=None, all_or_any=all): if isclass(estimator): raise TypeError(""{} is a class, not an instance."".format(estimator)) if msg is None: msg = ( ""This %(name)s instance is not fitted yet. Call 'fit' with "" ""appropriate arguments before using this estimator."" ) if not hasattr(estimator, ""fit""): raise TypeError(""%s is not an estimator instance."" % (estimator)) if attributes is not None: if not isinstance(attributes, (list, tuple)): attributes = [attributes] fitted = all_or_any([hasattr(estimator, attr) for attr in attributes]) elif hasattr(estimator, ""__sklearn_is_fitted__""): fitted = estimator.__sklearn_is_fitted__() else: fitted = [ v for v in vars(estimator) if v.endswith(""_"") and not v.startswith(""__"") ] if not fitted: raise NotFittedError(msg % {""name"": type(estimator).__name__}) ``` ###Assistant : Perform is_fitted validation for estimator. Checks if the estimator is fitted by verifying the presence of fitted attributes (ending with a trailing underscore) and otherwise raises a NotFittedError with the given message. If an estimator does not set any attributes with a trailing underscore, it can define a ``__sklearn_is_fitted__`` method returning a boolean to specify if the estimator is fitted or not. Parameters ---------- estimator : estimator instance Estimator instance for which the check is performed. attributes : str, list or tuple of str, default=None Attribute name(s) given as string or a list/tuple of strings Eg.: ``[""coef_"", ""estimator_"", ...], ""coef_""`` If `None`, `estimator` is considered fitted if there exist an attribute that ends with a underscore and does not start with double underscore. msg : str, default=None The default error message is, ""This %(name)s instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator."" For custom messages if ""%(name)s"" is present in the message string, it is substituted for the estimator name. Eg. : ""Estimator, %(name)s, must be fitted before sparsifying"". all_or_any : callable, {all, any}, default=all Specify whether all or any of the given attributes must exist. Raises ------ TypeError If the estimator is a class or not an estimator instance NotFittedError If the attributes are not found. " 2222,"def p_mean_variance(self, model, x, t, transformer_out, clip_denoised=True, model_kwargs=None): if model_kwargs is None: model_kwargs = {} B, C = x.shape[:2] assert t.shape == (B,) model_output = model(x, t, transformer_out) assert model_output.shape == (B, C * 2, *x.shape[2:]) model_output, model_var_values = torch.split(model_output, C, dim=1) min_log = _extract_into_tensor(self.noise_scheduler.posterior_log_variance_clipped, t, x.shape) max_log = _extract_into_tensor(np.log(self.noise_scheduler.betas), t, x.shape) # The model_var_values is [-1, 1] for [min_var, max_var]. frac = (model_var_values + 1) / 2 model_log_variance = frac * max_log + (1 - frac) * min_log model_variance = torch.exp(model_log_variance) pred_xstart = self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output) if clip_denoised: pred_xstart = pred_xstart.clamp(-1, 1) model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t) assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape return model_mean, model_variance, model_log_variance, pred_xstart "," Apply the model to get p(x_{t-1} | x_t), as well as a prediction of the initial x, x_0. :param model: the model, which takes a signal and a batch of timesteps as input. :param x: the [N x C x ...] tensor at time t. :param t: a 1-D Tensor of timesteps. :param clip_denoised: if True, clip the denoised signal into [-1, 1]. :param model_kwargs: if not None, a dict of extra keyword arguments to pass to the model. This can be used for conditioning. :return: a dict with the following keys: - 'mean': the model mean output. - 'variance': the model variance output. - 'log_variance': the log of 'variance'. - 'pred_xstart': the prediction for x_0. ",116,113,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def p_mean_variance(self, model, x, t, transformer_out, clip_denoised=True, model_kwargs=None): if model_kwargs is None: model_kwargs = {} B, C = x.shape[:2] assert t.shape == (B,) model_output = model(x, t, transformer_out) assert model_output.shape == (B, C * 2, *x.shape[2:]) model_output, model_var_values = torch.split(model_output, C, dim=1) min_log = _extract_into_tensor(self.noise_scheduler.posterior_log_variance_clipped, t, x.shape) max_log = _extract_into_tensor(np.log(self.noise_scheduler.betas), t, x.shape) # The model_var_values is [-1, 1] for [min_var, max_var]. frac = (model_var_values + 1) / 2 model_log_variance = frac * max_log + (1 - frac) * min_log model_variance = torch.exp(model_log_variance) pred_xstart = self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output) if clip_denoised: pred_xstart = pred_xstart.clamp(-1, 1) model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t) assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape return model_mean, model_variance, model_log_variance, pred_xstart ``` ###Assistant : Apply the model to get p(x_{t-1} | x_t), as well as a prediction of the initial x, x_0. :param model: the model, which takes a signal and a batch of timesteps as input. :param x: the [N x C x ...] tensor at time t. :param t: a 1-D Tensor of timesteps. :param clip_denoised: if True, clip the denoised signal into [-1, 1]. :param model_kwargs: if not None, a dict of extra keyword arguments to pass to the model. This can be used for conditioning. :return: a dict with the following keys: - 'mean': the model mean output. - 'variance': the model variance output. - 'log_variance': the log of 'variance'. - 'pred_xstart': the prediction for x_0. " 2223,"def test_invalid_parameters_in_stacking(): stacker = StackingClassifier(estimators=[]) html_output = estimator_html_repr(stacker) assert html.escape(str(stacker)) in html_output ","Invalidate stacking configuration uses default repr. Non-regression test for #24009. ",10,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_invalid_parameters_in_stacking(): stacker = StackingClassifier(estimators=[]) html_output = estimator_html_repr(stacker) assert html.escape(str(stacker)) in html_output ``` ###Assistant : Invalidate stacking configuration uses default repr. Non-regression test for #24009. " 2224,"def add_permissions(): for doctype in (""South Africa VAT Settings"", ""South Africa VAT Account""): add_permission(doctype, ""All"", 0) for role in (""Accounts Manager"", ""Accounts User"", ""System Manager""): add_permission(doctype, role, 0) update_permission_property(doctype, role, 0, ""write"", 1) update_permission_property(doctype, role, 0, ""create"", 1) if not frappe.db.get_value(""Custom Role"", dict(report=""VAT Audit Report"")): frappe.get_doc( dict( doctype=""Custom Role"", report=""VAT Audit Report"", roles=[dict(role=""Accounts User""), dict(role=""Accounts Manager""), dict(role=""Auditor"")], ) ).insert() ","Add Permissions for South Africa VAT Settings and South Africa VAT Account and VAT Audit Report",16,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add_permissions(): for doctype in (""South Africa VAT Settings"", ""South Africa VAT Account""): add_permission(doctype, ""All"", 0) for role in (""Accounts Manager"", ""Accounts User"", ""System Manager""): add_permission(doctype, role, 0) update_permission_property(doctype, role, 0, ""write"", 1) update_permission_property(doctype, role, 0, ""create"", 1) if not frappe.db.get_value(""Custom Role"", dict(report=""VAT Audit Report"")): frappe.get_doc( dict( doctype=""Custom Role"", report=""VAT Audit Report"", roles=[dict(role=""Accounts User""), dict(role=""Accounts Manager""), dict(role=""Auditor"")], ) ).insert() ``` ###Assistant : Add Permissions for South Africa VAT Settings and South Africa VAT Account and VAT Audit Report " 2225,"def has_delete_permission(self, request, obj=None): opts = self.opts codename = get_permission_codename(""delete"", opts) return request.user.has_perm(""%s.%s"" % (opts.app_label, codename)) "," Return True if the given request has permission to change the given Django model instance, the default implementation doesn't examine the `obj` parameter. Can be overridden by the user in subclasses. In such case it should return True if the given request has permission to delete the `obj` model instance. If `obj` is None, this should return True if the given request has permission to delete *any* object of the given type. ",72,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def has_delete_permission(self, request, obj=None): opts = self.opts codename = get_permission_codename(""delete"", opts) return request.user.has_perm(""%s.%s"" % (opts.app_label, codename)) ``` ###Assistant : Return True if the given request has permission to change the given Django model instance, the default implementation doesn't examine the `obj` parameter. Can be overridden by the user in subclasses. In such case it should return True if the given request has permission to delete the `obj` model instance. If `obj` is None, this should return True if the given request has permission to delete *any* object of the given type. " 2226,"def set_3d_properties(self, zs=0, zdir='z'): xs = self.get_xdata() ys = self.get_ydata() zs = cbook._to_unmasked_float_array(zs).ravel() zs = np.broadcast_to(zs, len(xs)) self._verts3d = juggle_axes(xs, ys, zs, zdir) self.stale = True "," Set the *z* position and direction of the line. Parameters ---------- zs : float or array of floats The location along the *zdir* axis in 3D space to position the line. zdir : {'x', 'y', 'z'} Plane to plot line orthogonal to. Default: 'z'. See `.get_dir_vector` for a description of the values. ",52,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_3d_properties(self, zs=0, zdir='z'): xs = self.get_xdata() ys = self.get_ydata() zs = cbook._to_unmasked_float_array(zs).ravel() zs = np.broadcast_to(zs, len(xs)) self._verts3d = juggle_axes(xs, ys, zs, zdir) self.stale = True ``` ###Assistant : Set the *z* position and direction of the line. Parameters ---------- zs : float or array of floats The location along the *zdir* axis in 3D space to position the line. zdir : {'x', 'y', 'z'} Plane to plot line orthogonal to. Default: 'z'. See `.get_dir_vector` for a description of the values. " 2227,"def clear_tasks(self): logger.debug(""Clearing stored tasks"") self._tasks = {} "," Clears all of the stored tasks. This is required when loading a task stored in a legacy project file, and is only to be called by :class:`Project` when a project has been loaded which is in fact a task. ",39,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def clear_tasks(self): logger.debug(""Clearing stored tasks"") self._tasks = {} ``` ###Assistant : Clears all of the stored tasks. This is required when loading a task stored in a legacy project file, and is only to be called by :class:`Project` when a project has been loaded which is in fact a task. " 2228,"def add_department_leaves(events, start, end, employee, company): department = frappe.db.get_value(""Employee"", employee, ""department"") if not department: return # department leaves department_employees = frappe.db.sql_list( , (department, company), ) filter_conditions = ' and employee in (""%s"")' % '"", ""'.join(department_employees) add_leaves(events, start, end, filter_conditions=filter_conditions) ","select name from tabEmployee where department=%s and company=%s",8,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add_department_leaves(events, start, end, employee, company): department = frappe.db.get_value(""Employee"", employee, ""department"") if not department: return # department leaves department_employees = frappe.db.sql_list( , (department, company), ) filter_conditions = ' and employee in (""%s"")' % '"", ""'.join(department_employees) add_leaves(events, start, end, filter_conditions=filter_conditions) ``` ###Assistant : select name from tabEmployee where department=%s and company=%s " 2229,"def is_file(self, follow_links=None): if follow_links is None: follow_links = True node_stat = self._stat if follow_links else self._lstat return stat.S_ISREG(node_stat.st_mode) "," Get whether the entry is a regular file. *follow_links* (:class:`bool` or :data:`None`) is whether to follow symbolic links. If this is :data:`True`, a symlink to a regular file will result in :data:`True`. Default is :data:`None` for :data:`True`. Returns whether the entry is a regular file (:class:`bool`). ",46,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_file(self, follow_links=None): if follow_links is None: follow_links = True node_stat = self._stat if follow_links else self._lstat return stat.S_ISREG(node_stat.st_mode) ``` ###Assistant : Get whether the entry is a regular file. *follow_links* (:class:`bool` or :data:`None`) is whether to follow symbolic links. If this is :data:`True`, a symlink to a regular file will result in :data:`True`. Default is :data:`None` for :data:`True`. Returns whether the entry is a regular file (:class:`bool`). " 2230,"def test_tabular_model_form_meta_readonly_field(self): response = self.client.get(reverse(""admin:admin_inlines_someparentmodel_add"")) self.assertContains( response, '', ) self.assertContains(response, ""Label from ModelForm.Meta"") "," Tabular inlines use ModelForm.Meta.help_texts and labels for read-only fields. ",9,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_tabular_model_form_meta_readonly_field(self): response = self.client.get(reverse(""admin:admin_inlines_someparentmodel_add"")) self.assertContains( response, '', ) self.assertContains(response, ""Label from ModelForm.Meta"") ``` ###Assistant : Tabular inlines use ModelForm.Meta.help_texts and labels for read-only fields. " 2231,"def test_edit_get_unlocked_no_lock_permission(self): # Use edit permission only self.set_permissions([""change""]) # Get the edit page response = self.client.get(self.get_url(""edit"")) html = response.content.decode() lock_url = self.get_url(""lock"") # Should not show lock message self.assertNotContains( response, ""'I'm a lockable snippet!' was locked"", ) # Should show unlocked information in the side panel self.assertContains( response, f""Anyone can edit this {self.model_name}."", ) # Should not show info to lock the object in the side panel self.assertNotContains( response, ""Lock it to prevent others from editing."", ) # Should show Save action menu item self.assertContains( response, f""{self.save_button_label}"", html=True, ) # Should not show Locked action menu item self.assertTagInHTML( '', html, count=0, allow_extra_attrs=True, ) # Should not show the lock button self.assertTagInHTML( f'', html, count=0, allow_extra_attrs=True, ) ",A user cannot lock an object without the lock permission.,10,121,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_edit_get_unlocked_no_lock_permission(self): # Use edit permission only self.set_permissions([""change""]) # Get the edit page response = self.client.get(self.get_url(""edit"")) html = response.content.decode() lock_url = self.get_url(""lock"") # Should not show lock message self.assertNotContains( response, ""'I'm a lockable snippet!' was locked"", ) # Should show unlocked information in the side panel self.assertContains( response, f""Anyone can edit this {self.model_name}."", ) # Should not show info to lock the object in the side panel self.assertNotContains( response, ""Lock it to prevent others from editing."", ) # Should show Save action menu item self.assertContains( response, f""{self.save_button_label}"", html=True, ) # Should not show Locked action menu item self.assertTagInHTML( '', html, count=0, allow_extra_attrs=True, ) # Should not show the lock button self.assertTagInHTML( f'', html, count=0, allow_extra_attrs=True, ) ``` ###Assistant : A user cannot lock an object without the lock permission. " 2232,"def increment_project_counter(project, delta=1, using=""default""): if delta <= 0: raise ValueError(""There is only one way, and that's up."") sample_rate = options.get(""store.projectcounter-modern-upsert-sample-rate"") modern_upsert = sample_rate and random.random() <= sample_rate # To prevent the statement_timeout leaking into the session we need to use # set local which can be used only within a transaction with transaction.atomic(using=using): cur = connections[using].cursor() try: statement_timeout = None if settings.SENTRY_PROJECT_COUNTER_STATEMENT_TIMEOUT: # WARNING: This is not a proper fix and should be removed once # we have better way of generating next_short_id. cur.execute(""show statement_timeout"") statement_timeout = cur.fetchone()[0] cur.execute( ""set local statement_timeout = %s"", [settings.SENTRY_PROJECT_COUNTER_STATEMENT_TIMEOUT], ) if modern_upsert: # Our postgres wrapper thing does not allow for named arguments cur.execute( ""insert into sentry_projectcounter (project_id, value) "" ""values (%s, %s) "" ""on conflict (project_id) do update "" ""set value = sentry_projectcounter.value + %s "" ""returning value"", [project.id, delta, delta], ) else: cur.execute( ""select sentry_increment_project_counter(%s, %s)"", [project.id, delta], ) project_counter = cur.fetchone()[0] if statement_timeout is not None: cur.execute( ""set local statement_timeout = %s"", [statement_timeout], ) return project_counter finally: cur.close() # this must be idempotent because it seems to execute twice # (at least during test runs)",This method primarily exists so that south code can use it.,11,184,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def increment_project_counter(project, delta=1, using=""default""): if delta <= 0: raise ValueError(""There is only one way, and that's up."") sample_rate = options.get(""store.projectcounter-modern-upsert-sample-rate"") modern_upsert = sample_rate and random.random() <= sample_rate # To prevent the statement_timeout leaking into the session we need to use # set local which can be used only within a transaction with transaction.atomic(using=using): cur = connections[using].cursor() try: statement_timeout = None if settings.SENTRY_PROJECT_COUNTER_STATEMENT_TIMEOUT: # WARNING: This is not a proper fix and should be removed once # we have better way of generating next_short_id. cur.execute(""show statement_timeout"") statement_timeout = cur.fetchone()[0] cur.execute( ""set local statement_timeout = %s"", [settings.SENTRY_PROJECT_COUNTER_STATEMENT_TIMEOUT], ) if modern_upsert: # Our postgres wrapper thing does not allow for named arguments cur.execute( ""insert into sentry_projectcounter (project_id, value) "" ""values (%s, %s) "" ""on conflict (project_id) do update "" ""set value = sentry_projectcounter.value + %s "" ""returning value"", [project.id, delta, delta], ) else: cur.execute( ""select sentry_increment_project_counter(%s, %s)"", [project.id, delta], ) project_counter = cur.fetchone()[0] if statement_timeout is not None: cur.execute( ""set local statement_timeout = %s"", [statement_timeout], ) return project_counter finally: cur.close() # this must be idempotent because it seems to execute twice # (at least during test runs) ``` ###Assistant : This method primarily exists so that south code can use it. " 2233,"def test_multiple_gen_nexts_closed_in_different_order(self) -> None: id_gen = self._create_id_generator() ","Check that we handle overlapping calls to gen_next, even when their IDs created and persisted in different orders.",18,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_multiple_gen_nexts_closed_in_different_order(self) -> None: id_gen = self._create_id_generator() ``` ###Assistant : Check that we handle overlapping calls to gen_next, even when their IDs created and persisted in different orders. " 2234,"def str_presenter(dumper, data): if len(data.splitlines()) > 1: # check for multiline string return dumper.represent_scalar(""tag:yaml.org,2002:str"", data, style=""|"") return dumper.represent_scalar(""tag:yaml.org,2002:str"", data) yaml.add_representer(str, str_presenter) yaml.representer.SafeRepresenter.add_representer(str, str_presenter) deployment_app = PrefectTyper( name=""deployment"", help=""Commands for working with deployments."" ) app.add_typer(deployment_app) "," configures yaml for dumping multiline strings Ref: https://stackoverflow.com/questions/8640959/how-can-i-control-what-scalar-form-pyyaml-uses-for-my-data ",8,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def str_presenter(dumper, data): if len(data.splitlines()) > 1: # check for multiline string return dumper.represent_scalar(""tag:yaml.org,2002:str"", data, style=""|"") return dumper.represent_scalar(""tag:yaml.org,2002:str"", data) yaml.add_representer(str, str_presenter) yaml.representer.SafeRepresenter.add_representer(str, str_presenter) deployment_app = PrefectTyper( name=""deployment"", help=""Commands for working with deployments."" ) app.add_typer(deployment_app) ``` ###Assistant : configures yaml for dumping multiline strings Ref: https://stackoverflow.com/questions/8640959/how-can-i-control-what-scalar-form-pyyaml-uses-for-my-data " 2235,"def _get_permissions(self, user_obj, obj, from_name): if not user_obj.is_active or user_obj.is_anonymous or obj is not None: return set() perm_cache_name = ""_effective_permissions_cache"" if not getattr(user_obj, perm_cache_name, None): perms = getattr(self, f""_get_{from_name}_permissions"")(user_obj) perms = perms.values_list(""content_type__app_label"", ""codename"").order_by() setattr(user_obj, perm_cache_name, {f""{ct}.{name}"" for ct, name in perms}) return getattr(user_obj, perm_cache_name) ","Return the permissions of `user_obj` from `from_name`. `from_name` can be either ""group"" or ""user"" to return permissions from `_get_group_permissions` or `_get_user_permissions` respectively. ",22,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_permissions(self, user_obj, obj, from_name): if not user_obj.is_active or user_obj.is_anonymous or obj is not None: return set() perm_cache_name = ""_effective_permissions_cache"" if not getattr(user_obj, perm_cache_name, None): perms = getattr(self, f""_get_{from_name}_permissions"")(user_obj) perms = perms.values_list(""content_type__app_label"", ""codename"").order_by() setattr(user_obj, perm_cache_name, {f""{ct}.{name}"" for ct, name in perms}) return getattr(user_obj, perm_cache_name) ``` ###Assistant : Return the permissions of `user_obj` from `from_name`. `from_name` can be either ""group"" or ""user"" to return permissions from `_get_group_permissions` or `_get_user_permissions` respectively. " 2236,"def test_trainable_layers(self): model = model = self._get_model() # Set the last layer to *not* be trainable. model.layers[-1].trainable = False self._train_model(model, use_dataset=True) loaded = self._save_and_load(model) self._test_evaluation(model, loaded) self.assertFalse(model.layers[-1].trainable) self.assertFalse(loaded.layers[-1].trainable) ",Tests that trainable status of individual layers is preserved.,9,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_trainable_layers(self): model = model = self._get_model() # Set the last layer to *not* be trainable. model.layers[-1].trainable = False self._train_model(model, use_dataset=True) loaded = self._save_and_load(model) self._test_evaluation(model, loaded) self.assertFalse(model.layers[-1].trainable) self.assertFalse(loaded.layers[-1].trainable) ``` ###Assistant : Tests that trainable status of individual layers is preserved. " 2237,"def _reorder_labels(self, row_positions=None, col_positions=None): if row_positions is not None: ordered_rows = self._partition_mgr_cls.map_axis_partitions( 0, self._partitions, lambda df: df.iloc[row_positions] ) row_idx = self.index[row_positions] else: ordered_rows = self._partitions row_idx = self.index if col_positions is not None: ordered_cols = self._partition_mgr_cls.map_axis_partitions( 1, ordered_rows, lambda df: df.iloc[:, col_positions] ) col_idx = self.columns[col_positions] else: ordered_cols = ordered_rows col_idx = self.columns return self.__constructor__(ordered_cols, row_idx, col_idx) "," Reorder the column and or rows in this DataFrame. Parameters ---------- row_positions : list of int, optional The ordered list of new row orders such that each position within the list indicates the new position. col_positions : list of int, optional The ordered list of new column orders such that each position within the list indicates the new position. Returns ------- PandasDataframe A new PandasDataframe with reordered columns and/or rows. ",70,57,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _reorder_labels(self, row_positions=None, col_positions=None): if row_positions is not None: ordered_rows = self._partition_mgr_cls.map_axis_partitions( 0, self._partitions, lambda df: df.iloc[row_positions] ) row_idx = self.index[row_positions] else: ordered_rows = self._partitions row_idx = self.index if col_positions is not None: ordered_cols = self._partition_mgr_cls.map_axis_partitions( 1, ordered_rows, lambda df: df.iloc[:, col_positions] ) col_idx = self.columns[col_positions] else: ordered_cols = ordered_rows col_idx = self.columns return self.__constructor__(ordered_cols, row_idx, col_idx) ``` ###Assistant : Reorder the column and or rows in this DataFrame. Parameters ---------- row_positions : list of int, optional The ordered list of new row orders such that each position within the list indicates the new position. col_positions : list of int, optional The ordered list of new column orders such that each position within the list indicates the new position. Returns ------- PandasDataframe A new PandasDataframe with reordered columns and/or rows. " 2238,"def get_actual_sle_dict(name): sles = frappe.db.sql( , name, as_dict=1, ) sle_dict = {} for d in sles: sle_dict[(d.item_code, d.warehouse)] = { ""actual_qty"": d.actual_qty, ""stock_value_difference"": d.stock_value_difference, } return sle_dict "," select item_code, warehouse, sum(actual_qty) as actual_qty, sum(stock_value_difference) as stock_value_difference from `tabStock Ledger Entry` where voucher_type = 'Asset Capitalization' and voucher_no = %s group by item_code, warehouse having actual_qty != 0 ",30,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_actual_sle_dict(name): sles = frappe.db.sql( , name, as_dict=1, ) sle_dict = {} for d in sles: sle_dict[(d.item_code, d.warehouse)] = { ""actual_qty"": d.actual_qty, ""stock_value_difference"": d.stock_value_difference, } return sle_dict ``` ###Assistant : select item_code, warehouse, sum(actual_qty) as actual_qty, sum(stock_value_difference) as stock_value_difference from `tabStock Ledger Entry` where voucher_type = 'Asset Capitalization' and voucher_no = %s group by item_code, warehouse having actual_qty != 0 " 2239,"async def _async_process_on_unload(self) -> None: if self._on_unload is not None: while self._on_unload: self._on_unload.pop()() while self._pending_tasks: pending = [task for task in self._pending_tasks if not task.done()] self._pending_tasks.clear() if pending: await asyncio.gather(*pending) ",Process the on_unload callbacks and wait for pending tasks.,9,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def _async_process_on_unload(self) -> None: if self._on_unload is not None: while self._on_unload: self._on_unload.pop()() while self._pending_tasks: pending = [task for task in self._pending_tasks if not task.done()] self._pending_tasks.clear() if pending: await asyncio.gather(*pending) ``` ###Assistant : Process the on_unload callbacks and wait for pending tasks. " 2240,"def apply_func(partition, func, *args, **kwargs): result = func(partition, *args, **kwargs) return result, get_ip() "," Execute a function on the partition in a worker process. Parameters ---------- partition : pandas.DataFrame A pandas DataFrame the function needs to be executed on. func : callable The function to perform. *args : list Positional arguments to pass to ``func``. **kwargs : dict Keyword arguments to pass to ``func``. Returns ------- pandas.DataFrame The resulting pandas DataFrame. str The node IP address of the worker process. Notes ----- Directly passing a call queue entry (i.e. a list of [func, args, kwargs]) instead of destructuring it causes a performance penalty. ",89,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def apply_func(partition, func, *args, **kwargs): result = func(partition, *args, **kwargs) return result, get_ip() ``` ###Assistant : Execute a function on the partition in a worker process. Parameters ---------- partition : pandas.DataFrame A pandas DataFrame the function needs to be executed on. func : callable The function to perform. *args : list Positional arguments to pass to ``func``. **kwargs : dict Keyword arguments to pass to ``func``. Returns ------- pandas.DataFrame The resulting pandas DataFrame. str The node IP address of the worker process. Notes ----- Directly passing a call queue entry (i.e. a list of [func, args, kwargs]) instead of destructuring it causes a performance penalty. " 2241,"def _proc_function_remote(self, *, fun, low, user, tag, jid, daemonize=True): if daemonize and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize() # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() # pack a few things into low low[""__jid__""] = jid low[""__user__""] = user low[""__tag__""] = tag try: return self.cmd_sync(low) except salt.exceptions.EauthAuthenticationError as exc: log.error(exc) "," Run this method in a multiprocess target to execute the function on the master and fire the return data on the event bus ",23,53,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _proc_function_remote(self, *, fun, low, user, tag, jid, daemonize=True): if daemonize and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize() # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() # pack a few things into low low[""__jid__""] = jid low[""__user__""] = user low[""__tag__""] = tag try: return self.cmd_sync(low) except salt.exceptions.EauthAuthenticationError as exc: log.error(exc) ``` ###Assistant : Run this method in a multiprocess target to execute the function on the master and fire the return data on the event bus " 2242,"def array_safe(a, like, **kwargs): from dask.array.routines import array return _array_like_safe(np.array, array, a, like, **kwargs) "," If `a` is `dask.array`, return `dask.array.asarray(a, **kwargs)`, otherwise return `np.asarray(a, like=like, **kwargs)`, dispatching the call to the library that implements the like array. Note that when `a` is a `dask.Array` backed by `cupy.ndarray` but `like` isn't, this function will call `a.compute(scheduler=""sync"")` before `np.array`, as downstream libraries are unlikely to know how to convert a `dask.Array` and CuPy doesn't implement `__array__` to prevent implicit copies to host. ",66,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def array_safe(a, like, **kwargs): from dask.array.routines import array return _array_like_safe(np.array, array, a, like, **kwargs) ``` ###Assistant : If `a` is `dask.array`, return `dask.array.asarray(a, **kwargs)`, otherwise return `np.asarray(a, like=like, **kwargs)`, dispatching the call to the library that implements the like array. Note that when `a` is a `dask.Array` backed by `cupy.ndarray` but `like` isn't, this function will call `a.compute(scheduler=""sync"")` before `np.array`, as downstream libraries are unlikely to know how to convert a `dask.Array` and CuPy doesn't implement `__array__` to prevent implicit copies to host. " 2243,"def get_tables(self) -> StatusResponse: query = result = self.native_query(query) df = result.data_frame df = df.drop(['type', 'type'], axis=1) result.data_frame = df.rename(columns={'name': 'table_name'}) return result "," Return list of entities that will be accessible as tables. Returns: HandlerResponse SHOW TABLES; ",14,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_tables(self) -> StatusResponse: query = result = self.native_query(query) df = result.data_frame df = df.drop(['type', 'type'], axis=1) result.data_frame = df.rename(columns={'name': 'table_name'}) return result ``` ###Assistant : Return list of entities that will be accessible as tables. Returns: HandlerResponse SHOW TABLES; " 2244,"def accuracy(self, params, X, Y, averaged=True): Y_hat = self.apply(params, X) Y_hat = d2l.reshape(Y_hat, (-1, Y_hat.shape[-1])) preds = d2l.astype(d2l.argmax(Y_hat, axis=1), Y.dtype) compare = d2l.astype(preds == d2l.reshape(Y, -1), d2l.float32) return d2l.reduce_mean(compare) if averaged else compare ","Compute the number of correct predictions. Defined in :numref:`sec_classification`",9,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def accuracy(self, params, X, Y, averaged=True): Y_hat = self.apply(params, X) Y_hat = d2l.reshape(Y_hat, (-1, Y_hat.shape[-1])) preds = d2l.astype(d2l.argmax(Y_hat, axis=1), Y.dtype) compare = d2l.astype(preds == d2l.reshape(Y, -1), d2l.float32) return d2l.reduce_mean(compare) if averaged else compare ``` ###Assistant : Compute the number of correct predictions. Defined in :numref:`sec_classification` " 2245,"def open_metadata(self, book, custom_columns): if config.config_use_google_drive: if not gdriveutils.is_gdrive_ready(): raise Exception('Google Drive is configured but not ready') web_content_link = gdriveutils.get_metadata_backup_via_gdrive(book.path) if not web_content_link: raise Exception('Google Drive cover url not found') stream = None try: stream = urlopen(web_content_link) except Exception as ex: # Bubble exception to calling function self.log.debug('Error reading metadata.opf: ' + str(ex)) # ToDo Check whats going on raise ex finally: if stream is not None: stream.close() else: # ToDo: Handle book folder not found or not readable book_metadata_filepath = os.path.join(config.config_calibre_dir, book.path, 'metadata.opf') #if not os.path.isfile(book_metadata_filepath): self.create_new_metadata_backup(book, custom_columns, book_metadata_filepath) # else: ","namespaces = {'dc': PURL_NAMESPACE, 'opf': OPF_NAMESPACE} test = etree.parse(book_metadata_filepath) root = test.getroot() for i in root.iter(): self.log.info(i) title = root.find(""dc:metadata"", namespaces) pass with open(book_metadata_filepath, ""rb"") as f: xml = f.read() root = objectify.fromstring(xml) # root.metadata['{http://purl.org/dc/elements/1.1/}title'] # root.metadata[PURL + 'title'] # getattr(root.metadata, PURL +'title') # test = objectify.parse() pass # backup not found has to be created #raise Exception('Book cover file not found')",62,92,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def open_metadata(self, book, custom_columns): if config.config_use_google_drive: if not gdriveutils.is_gdrive_ready(): raise Exception('Google Drive is configured but not ready') web_content_link = gdriveutils.get_metadata_backup_via_gdrive(book.path) if not web_content_link: raise Exception('Google Drive cover url not found') stream = None try: stream = urlopen(web_content_link) except Exception as ex: # Bubble exception to calling function self.log.debug('Error reading metadata.opf: ' + str(ex)) # ToDo Check whats going on raise ex finally: if stream is not None: stream.close() else: # ToDo: Handle book folder not found or not readable book_metadata_filepath = os.path.join(config.config_calibre_dir, book.path, 'metadata.opf') #if not os.path.isfile(book_metadata_filepath): self.create_new_metadata_backup(book, custom_columns, book_metadata_filepath) # else: ``` ###Assistant : namespaces = {'dc': PURL_NAMESPACE, 'opf': OPF_NAMESPACE} test = etree.parse(book_metadata_filepath) root = test.getroot() for i in root.iter(): self.log.info(i) title = root.find(""dc:metadata"", namespaces) pass with open(book_metadata_filepath, ""rb"") as f: xml = f.read() root = objectify.fromstring(xml) # root.metadata['{http://purl.org/dc/elements/1.1/}title'] # root.metadata[PURL + 'title'] # getattr(root.metadata, PURL +'title') # test = objectify.parse() pass # backup not found has to be created #raise Exception('Book cover file not found') " 2246,"def test_gevent_monkey(pyi_builder): pyi_builder.test_source() # The tkinter module may be available for import, but not actually importable due to missing shared libraries. # Therefore, we need to use `can_import_module`-based skip decorator instead of `@importorskip`. @pytest.mark.skipif(not can_import_module(""tkinter""), reason=""tkinter cannot be imported."")"," from gevent.monkey import patch_all patch_all() ",5,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_gevent_monkey(pyi_builder): pyi_builder.test_source() # The tkinter module may be available for import, but not actually importable due to missing shared libraries. # Therefore, we need to use `can_import_module`-based skip decorator instead of `@importorskip`. @pytest.mark.skipif(not can_import_module(""tkinter""), reason=""tkinter cannot be imported."") ``` ###Assistant : from gevent.monkey import patch_all patch_all() " 2247,"def split_auth_netloc_from_url(url): # type: (str) -> Tuple[str, str, Tuple[str, str]] url_without_auth, (netloc, auth) = _transform_url(url, _get_netloc) return url_without_auth, netloc, auth "," Parse a url into separate netloc, auth, and url with no auth. Returns: (url_without_auth, netloc, (username, password)) ",17,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def split_auth_netloc_from_url(url): # type: (str) -> Tuple[str, str, Tuple[str, str]] url_without_auth, (netloc, auth) = _transform_url(url, _get_netloc) return url_without_auth, netloc, auth ``` ###Assistant : Parse a url into separate netloc, auth, and url with no auth. Returns: (url_without_auth, netloc, (username, password)) " 2248,"def test_non_str_color(): text = Text(""test_color_inheritance"", color=Color(""blue"")) markup_text = MarkupText(""test_color_inheritance"", color=Color(""blue"")) ","Test that the Text and MarkupText can accept non_str color values i.e. colour.Color(red).",13,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_non_str_color(): text = Text(""test_color_inheritance"", color=Color(""blue"")) markup_text = MarkupText(""test_color_inheritance"", color=Color(""blue"")) ``` ###Assistant : Test that the Text and MarkupText can accept non_str color values i.e. colour.Color(red). " 2249,"async def test_strategy_no_network_settings(pick_radio, mock_app, hass): mock_app.load_network_info = MagicMock(side_effect=NetworkNotFormed()) result, port = await pick_radio(RadioType.ezsp) assert ( config_flow.FORMATION_REUSE_SETTINGS not in result[""data_schema""].schema[""next_step_id""].container ) ",Test formation strategy when no network settings are present.,9,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_strategy_no_network_settings(pick_radio, mock_app, hass): mock_app.load_network_info = MagicMock(side_effect=NetworkNotFormed()) result, port = await pick_radio(RadioType.ezsp) assert ( config_flow.FORMATION_REUSE_SETTINGS not in result[""data_schema""].schema[""next_step_id""].container ) ``` ###Assistant : Test formation strategy when no network settings are present. " 2250,"def detrend(x, key=None, axis=None): if key is None or key in ['constant', 'mean', 'default']: return detrend(x, key=detrend_mean, axis=axis) elif key == 'linear': return detrend(x, key=detrend_linear, axis=axis) elif key == 'none': return detrend(x, key=detrend_none, axis=axis) elif callable(key): x = np.asarray(x) if axis is not None and axis + 1 > x.ndim: raise ValueError(f'axis(={axis}) out of bounds') if (axis is None and x.ndim == 0) or (not axis and x.ndim == 1): return key(x) # try to use the 'axis' argument if the function supports it, # otherwise use apply_along_axis to do it try: return key(x, axis=axis) except TypeError: return np.apply_along_axis(key, axis=axis, arr=x) else: raise ValueError( f""Unknown value for key: {key!r}, must be one of: 'default', "" f""'constant', 'mean', 'linear', or a function"") "," Return *x* with its trend removed. Parameters ---------- x : array or sequence Array or sequence containing the data. key : {'default', 'constant', 'mean', 'linear', 'none'} or function The detrending algorithm to use. 'default', 'mean', and 'constant' are the same as `detrend_mean`. 'linear' is the same as `detrend_linear`. 'none' is the same as `detrend_none`. The default is 'mean'. See the corresponding functions for more details regarding the algorithms. Can also be a function that carries out the detrend operation. axis : int The axis along which to do the detrending. See Also -------- detrend_mean : Implementation of the 'mean' algorithm. detrend_linear : Implementation of the 'linear' algorithm. detrend_none : Implementation of the 'none' algorithm. ",114,121,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def detrend(x, key=None, axis=None): if key is None or key in ['constant', 'mean', 'default']: return detrend(x, key=detrend_mean, axis=axis) elif key == 'linear': return detrend(x, key=detrend_linear, axis=axis) elif key == 'none': return detrend(x, key=detrend_none, axis=axis) elif callable(key): x = np.asarray(x) if axis is not None and axis + 1 > x.ndim: raise ValueError(f'axis(={axis}) out of bounds') if (axis is None and x.ndim == 0) or (not axis and x.ndim == 1): return key(x) # try to use the 'axis' argument if the function supports it, # otherwise use apply_along_axis to do it try: return key(x, axis=axis) except TypeError: return np.apply_along_axis(key, axis=axis, arr=x) else: raise ValueError( f""Unknown value for key: {key!r}, must be one of: 'default', "" f""'constant', 'mean', 'linear', or a function"") ``` ###Assistant : Return *x* with its trend removed. Parameters ---------- x : array or sequence Array or sequence containing the data. key : {'default', 'constant', 'mean', 'linear', 'none'} or function The detrending algorithm to use. 'default', 'mean', and 'constant' are the same as `detrend_mean`. 'linear' is the same as `detrend_linear`. 'none' is the same as `detrend_none`. The default is 'mean'. See the corresponding functions for more details regarding the algorithms. Can also be a function that carries out the detrend operation. axis : int The axis along which to do the detrending. See Also -------- detrend_mean : Implementation of the 'mean' algorithm. detrend_linear : Implementation of the 'linear' algorithm. detrend_none : Implementation of the 'none' algorithm. " 2251,"def has_unrendered_errors(bound_field): return bound_field.errors and not hasattr( bound_field.field.widget, ""render_with_errors"" ) @register.filter(is_safe=True) @stringfilter"," Return true if this field has errors that were not accounted for by render_with_errors, because the widget does not support the render_with_errors method ",23,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def has_unrendered_errors(bound_field): return bound_field.errors and not hasattr( bound_field.field.widget, ""render_with_errors"" ) @register.filter(is_safe=True) @stringfilter ``` ###Assistant : Return true if this field has errors that were not accounted for by render_with_errors, because the widget does not support the render_with_errors method " 2252,"def test_raw_id_threshold_page_permission_inline_admin(self): with self.settings(CMS_RAW_ID_USERS=1): with self.assertNumQueries(1): self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, []) # Create users to check if threshold is honored self._get_guys() with self.settings(CMS_RAW_ID_USERS=False): with self.assertNumQueries(0): self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, []) with self.settings(CMS_RAW_ID_USERS=True): with self.assertNumQueries(0): self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, ['user']) with self.settings(CMS_RAW_ID_USERS=1): with self.assertNumQueries(1): self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, ['user']) "," Only count users when using an integer value as threshold for CMS_RAW_ID_USERS. ",12,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_raw_id_threshold_page_permission_inline_admin(self): with self.settings(CMS_RAW_ID_USERS=1): with self.assertNumQueries(1): self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, []) # Create users to check if threshold is honored self._get_guys() with self.settings(CMS_RAW_ID_USERS=False): with self.assertNumQueries(0): self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, []) with self.settings(CMS_RAW_ID_USERS=True): with self.assertNumQueries(0): self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, ['user']) with self.settings(CMS_RAW_ID_USERS=1): with self.assertNumQueries(1): self.assertEqual(PagePermissionInlineAdmin.raw_id_fields, ['user']) ``` ###Assistant : Only count users when using an integer value as threshold for CMS_RAW_ID_USERS. " 2253,"def get_breaks(self, filename, lineno): filename = self.canonic(filename) return filename in self.breaks and \ lineno in self.breaks[filename] and \ Breakpoint.bplist[filename, lineno] or [] ","Return all breakpoints for filename:lineno. If no breakpoints are set, return an empty list. ",14,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_breaks(self, filename, lineno): filename = self.canonic(filename) return filename in self.breaks and \ lineno in self.breaks[filename] and \ Breakpoint.bplist[filename, lineno] or [] ``` ###Assistant : Return all breakpoints for filename:lineno. If no breakpoints are set, return an empty list. " 2254,"def test_override(self) -> None: self.get_success( self.store.register_user( self.user_id, self.pwhash, approved=True, ) ) user = self.get_success(self.store.get_user_by_id(self.user_id)) self.assertIsNotNone(user) assert user is not None self.assertEqual(user[""approved""], 1) approved = self.get_success(self.store.is_user_approved(self.user_id)) self.assertTrue(approved) ","Tests that if we require approval for new accounts, but we explicitly say the new user should be considered approved, they're marked as approved. ",24,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_override(self) -> None: self.get_success( self.store.register_user( self.user_id, self.pwhash, approved=True, ) ) user = self.get_success(self.store.get_user_by_id(self.user_id)) self.assertIsNotNone(user) assert user is not None self.assertEqual(user[""approved""], 1) approved = self.get_success(self.store.is_user_approved(self.user_id)) self.assertTrue(approved) ``` ###Assistant : Tests that if we require approval for new accounts, but we explicitly say the new user should be considered approved, they're marked as approved. " 2255,"def tokenize(lines, token='word'): if token == 'word': return [line.split() for line in lines] elif token == 'char': return [list(line) for line in lines] else: print('ERROR: unknown token type: ' + token) ","Split text lines into word or character tokens. Defined in :numref:`sec_text_preprocessing`",11,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def tokenize(lines, token='word'): if token == 'word': return [line.split() for line in lines] elif token == 'char': return [list(line) for line in lines] else: print('ERROR: unknown token type: ' + token) ``` ###Assistant : Split text lines into word or character tokens. Defined in :numref:`sec_text_preprocessing` " 2256,"def make_purchase_order_for_default_supplier(source_name, selected_items=None, target_doc=None): if not selected_items: return if isinstance(selected_items, str): selected_items = json.loads(selected_items) def set_missing_values(source, target): target.supplier = supplier target.apply_discount_on = """" target.additional_discount_percentage = 0.0 target.discount_amount = 0.0 target.inter_company_order_reference = """" default_price_list = frappe.get_value(""Supplier"", supplier, ""default_price_list"") if default_price_list: target.buying_price_list = default_price_list if any(item.delivered_by_supplier == 1 for item in source.items): if source.shipping_address_name: target.shipping_address = source.shipping_address_name target.shipping_address_display = source.shipping_address else: target.shipping_address = source.customer_address target.shipping_address_display = source.address_display target.customer_contact_person = source.contact_person target.customer_contact_display = source.contact_display target.customer_contact_mobile = source.contact_mobile target.customer_contact_email = source.contact_email else: target.customer = """" target.customer_name = """" target.run_method(""set_missing_values"") target.run_method(""calculate_taxes_and_totals"") def update_item(source, target, source_parent): target.schedule_date = source.delivery_date target.qty = flt(source.qty) - (flt(source.ordered_qty) / flt(source.conversion_factor)) target.stock_qty = flt(source.stock_qty) - flt(source.ordered_qty) target.project = source_parent.project suppliers = [item.get(""supplier"") for item in selected_items if item.get(""supplier"")] suppliers = list(dict.fromkeys(suppliers)) # remove duplicates while preserving order items_to_map = [item.get(""item_code"") for item in selected_items if item.get(""item_code"")] items_to_map = list(set(items_to_map)) if not suppliers: frappe.throw( _(""Please set a Supplier against the Items to be considered in the Purchase Order."") ) purchase_orders = [] for supplier in suppliers: doc = get_mapped_doc( ""Sales Order"", source_name, { ""Sales Order"": { ""doctype"": ""Purchase Order"", ""field_no_map"": [ ""address_display"", ""contact_display"", ""contact_mobile"", ""contact_email"", ""contact_person"", ""taxes_and_charges"", ""shipping_address"", ""terms"", ], ""validation"": {""docstatus"": [""="", 1]}, }, ""Sales Order Item"": { ""doctype"": ""Purchase Order Item"", ""field_map"": [ [""name"", ""sales_order_item""], [""parent"", ""sales_order""], [""stock_uom"", ""stock_uom""], [""uom"", ""uom""], [""conversion_factor"", ""conversion_factor""], [""delivery_date"", ""schedule_date""], ], ""field_no_map"": [ ""rate"", ""price_list_rate"", ""item_tax_template"", ""discount_percentage"", ""discount_amount"", ""pricing_rules"", ], ""postprocess"": update_item, ""condition"": lambda doc: doc.ordered_qty < doc.stock_qty and doc.supplier == supplier and doc.item_code in items_to_map, }, }, target_doc, set_missing_values, ) doc.insert() frappe.db.commit() purchase_orders.append(doc) return purchase_orders @frappe.whitelist()",Creates Purchase Order for each Supplier. Returns a list of doc objects.,12,252,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def make_purchase_order_for_default_supplier(source_name, selected_items=None, target_doc=None): if not selected_items: return if isinstance(selected_items, str): selected_items = json.loads(selected_items) def set_missing_values(source, target): target.supplier = supplier target.apply_discount_on = """" target.additional_discount_percentage = 0.0 target.discount_amount = 0.0 target.inter_company_order_reference = """" default_price_list = frappe.get_value(""Supplier"", supplier, ""default_price_list"") if default_price_list: target.buying_price_list = default_price_list if any(item.delivered_by_supplier == 1 for item in source.items): if source.shipping_address_name: target.shipping_address = source.shipping_address_name target.shipping_address_display = source.shipping_address else: target.shipping_address = source.customer_address target.shipping_address_display = source.address_display target.customer_contact_person = source.contact_person target.customer_contact_display = source.contact_display target.customer_contact_mobile = source.contact_mobile target.customer_contact_email = source.contact_email else: target.customer = """" target.customer_name = """" target.run_method(""set_missing_values"") target.run_method(""calculate_taxes_and_totals"") def update_item(source, target, source_parent): target.schedule_date = source.delivery_date target.qty = flt(source.qty) - (flt(source.ordered_qty) / flt(source.conversion_factor)) target.stock_qty = flt(source.stock_qty) - flt(source.ordered_qty) target.project = source_parent.project suppliers = [item.get(""supplier"") for item in selected_items if item.get(""supplier"")] suppliers = list(dict.fromkeys(suppliers)) # remove duplicates while preserving order items_to_map = [item.get(""item_code"") for item in selected_items if item.get(""item_code"")] items_to_map = list(set(items_to_map)) if not suppliers: frappe.throw( _(""Please set a Supplier against the Items to be considered in the Purchase Order."") ) purchase_orders = [] for supplier in suppliers: doc = get_mapped_doc( ""Sales Order"", source_name, { ""Sales Order"": { ""doctype"": ""Purchase Order"", ""field_no_map"": [ ""address_display"", ""contact_display"", ""contact_mobile"", ""contact_email"", ""contact_person"", ""taxes_and_charges"", ""shipping_address"", ""terms"", ], ""validation"": {""docstatus"": [""="", 1]}, }, ""Sales Order Item"": { ""doctype"": ""Purchase Order Item"", ""field_map"": [ [""name"", ""sales_order_item""], [""parent"", ""sales_order""], [""stock_uom"", ""stock_uom""], [""uom"", ""uom""], [""conversion_factor"", ""conversion_factor""], [""delivery_date"", ""schedule_date""], ], ""field_no_map"": [ ""rate"", ""price_list_rate"", ""item_tax_template"", ""discount_percentage"", ""discount_amount"", ""pricing_rules"", ], ""postprocess"": update_item, ""condition"": lambda doc: doc.ordered_qty < doc.stock_qty and doc.supplier == supplier and doc.item_code in items_to_map, }, }, target_doc, set_missing_values, ) doc.insert() frappe.db.commit() purchase_orders.append(doc) return purchase_orders @frappe.whitelist() ``` ###Assistant : Creates Purchase Order for each Supplier. Returns a list of doc objects. " 2257,"def resolve_relation(model, app_label=None, model_name=None): if isinstance(model, str): if model == RECURSIVE_RELATIONSHIP_CONSTANT: if app_label is None or model_name is None: raise TypeError( 'app_label and model_name must be provided to resolve ' 'recursive relationships.' ) return app_label, model_name if '.' in model: app_label, model_name = model.split('.', 1) return app_label, model_name.lower() if app_label is None: raise TypeError( 'app_label must be provided to resolve unscoped model relationships.' ) return app_label, model.lower() return model._meta.app_label, model._meta.model_name "," Turn a model class or model reference string and return a model tuple. app_label and model_name are used to resolve the scope of recursive and unscoped model relationship. ",28,70,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def resolve_relation(model, app_label=None, model_name=None): if isinstance(model, str): if model == RECURSIVE_RELATIONSHIP_CONSTANT: if app_label is None or model_name is None: raise TypeError( 'app_label and model_name must be provided to resolve ' 'recursive relationships.' ) return app_label, model_name if '.' in model: app_label, model_name = model.split('.', 1) return app_label, model_name.lower() if app_label is None: raise TypeError( 'app_label must be provided to resolve unscoped model relationships.' ) return app_label, model.lower() return model._meta.app_label, model._meta.model_name ``` ###Assistant : Turn a model class or model reference string and return a model tuple. app_label and model_name are used to resolve the scope of recursive and unscoped model relationship. " 2258,"def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): missing_layers = [] unexpected_layers = [] mismatched_layers = [] # Read the H5 file with h5py.File(resolved_archive_file, ""r"") as sharded_checkpoint_file: # Retrieve the name of each layer from the H5 file saved_h5_model_layers_name = set( hdf5_format.load_attributes_from_hdf5_group(sharded_checkpoint_file, ""layer_names"") ) # Find the missing layers from the high level list of layers missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name) # Find the unexpected layers from the high level list of layers unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers])) saved_weight_names_set = set() symbolic_weights_names = set() weight_value_tuples = [] # Compute missing and unexpected sub layers # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...] for layer in model.layers: # if layer_name from the H5 file belongs to the layers from the instantiated model if layer.name in saved_h5_model_layers_name: # Get the H5 layer object from its name h5_layer_object = sharded_checkpoint_file[layer.name] # Get all the weights as a list from the layer object symbolic_weights = layer.trainable_weights + layer.non_trainable_weights saved_weights = {} # Create a dict from the H5 saved model that looks like {""weight_name"": weight_value} # And a set with only the names for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, ""weight_names""): # TF names always start with the model name so we ignore it name = ""/"".join(weight_name.split(""/"")[1:]) if _prefix is not None: name = _prefix + ""/"" + name saved_weights[name] = np.asarray(h5_layer_object[weight_name]) # Add the updated name to the final list for computing missing/unexpected values saved_weight_names_set.add(name) # Loop over each weights from the instantiated model and compare with the weights from the H5 file for symbolic_weight in symbolic_weights: # TF names always start with the model name so we ignore it if _prefix is not None: delimeter = len(_prefix.split(""/"")) symbolic_weight_name = ""/"".join( symbolic_weight.name.split(""/"")[:delimeter] + symbolic_weight.name.split(""/"")[delimeter + 1 :] ) else: symbolic_weight_name = ""/"".join(symbolic_weight.name.split(""/"")[1:]) # here we check if the current weight is among the weights from the H5 file # If yes, get the weight_value of the corresponding weight from the H5 file # If not, make the value to None saved_weight_value = saved_weights.get(symbolic_weight_name, None) # Add the updated name to the final list for computing missing/unexpected values symbolic_weights_names.add(symbolic_weight_name) # If the current weight is found if saved_weight_value is not None: # Check if the shape of the current weight and the one from the H5 file are different if K.int_shape(symbolic_weight) != saved_weight_value.shape: # If yes we reshape the weight from the H5 file accordingly to the current weight # If the two shapes are not compatible we raise an issue try: array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight)) except ValueError as e: if ignore_mismatched_sizes: mismatched_layers.append( (symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight)) ) continue else: raise e else: array = saved_weight_value # We create the tuple that will be loaded and add it to the final list weight_value_tuples.append((symbolic_weight, array)) # Load all the weights K.batch_set_value(weight_value_tuples) # Compute the missing and unexpected layers missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set)) unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names)) return missing_layers, unexpected_layers, mismatched_layers "," Detect missing and unexpected layers and load the TF weights from the shard file accordingly to their names and shapes. Args: model (`tf.keras.models.Model`): The model to load the weights into. resolved_archive_file (`str`): The location of the H5 file. ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether or not to ignore weights with shapes that don't match between the checkpoint of the model. Returns: Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the mismatched layers. ",83,479,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): missing_layers = [] unexpected_layers = [] mismatched_layers = [] # Read the H5 file with h5py.File(resolved_archive_file, ""r"") as sharded_checkpoint_file: # Retrieve the name of each layer from the H5 file saved_h5_model_layers_name = set( hdf5_format.load_attributes_from_hdf5_group(sharded_checkpoint_file, ""layer_names"") ) # Find the missing layers from the high level list of layers missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name) # Find the unexpected layers from the high level list of layers unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers])) saved_weight_names_set = set() symbolic_weights_names = set() weight_value_tuples = [] # Compute missing and unexpected sub layers # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...] for layer in model.layers: # if layer_name from the H5 file belongs to the layers from the instantiated model if layer.name in saved_h5_model_layers_name: # Get the H5 layer object from its name h5_layer_object = sharded_checkpoint_file[layer.name] # Get all the weights as a list from the layer object symbolic_weights = layer.trainable_weights + layer.non_trainable_weights saved_weights = {} # Create a dict from the H5 saved model that looks like {""weight_name"": weight_value} # And a set with only the names for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, ""weight_names""): # TF names always start with the model name so we ignore it name = ""/"".join(weight_name.split(""/"")[1:]) if _prefix is not None: name = _prefix + ""/"" + name saved_weights[name] = np.asarray(h5_layer_object[weight_name]) # Add the updated name to the final list for computing missing/unexpected values saved_weight_names_set.add(name) # Loop over each weights from the instantiated model and compare with the weights from the H5 file for symbolic_weight in symbolic_weights: # TF names always start with the model name so we ignore it if _prefix is not None: delimeter = len(_prefix.split(""/"")) symbolic_weight_name = ""/"".join( symbolic_weight.name.split(""/"")[:delimeter] + symbolic_weight.name.split(""/"")[delimeter + 1 :] ) else: symbolic_weight_name = ""/"".join(symbolic_weight.name.split(""/"")[1:]) # here we check if the current weight is among the weights from the H5 file # If yes, get the weight_value of the corresponding weight from the H5 file # If not, make the value to None saved_weight_value = saved_weights.get(symbolic_weight_name, None) # Add the updated name to the final list for computing missing/unexpected values symbolic_weights_names.add(symbolic_weight_name) # If the current weight is found if saved_weight_value is not None: # Check if the shape of the current weight and the one from the H5 file are different if K.int_shape(symbolic_weight) != saved_weight_value.shape: # If yes we reshape the weight from the H5 file accordingly to the current weight # If the two shapes are not compatible we raise an issue try: array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight)) except ValueError as e: if ignore_mismatched_sizes: mismatched_layers.append( (symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight)) ) continue else: raise e else: array = saved_weight_value # We create the tuple that will be loaded and add it to the final list weight_value_tuples.append((symbolic_weight, array)) # Load all the weights K.batch_set_value(weight_value_tuples) # Compute the missing and unexpected layers missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set)) unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names)) return missing_layers, unexpected_layers, mismatched_layers ``` ###Assistant : Detect missing and unexpected layers and load the TF weights from the shard file accordingly to their names and shapes. Args: model (`tf.keras.models.Model`): The model to load the weights into. resolved_archive_file (`str`): The location of the H5 file. ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether or not to ignore weights with shapes that don't match between the checkpoint of the model. Returns: Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the mismatched layers. " 2259,"def css_classes(self, extra_classes=None): if hasattr(extra_classes, ""split""): extra_classes = extra_classes.split() extra_classes = set(extra_classes or []) if self.errors and hasattr(self.form, ""error_css_class""): extra_classes.add(self.form.error_css_class) if self.field.required and hasattr(self.form, ""required_css_class""): extra_classes.add(self.form.required_css_class) return "" "".join(extra_classes) "," Return a string of space-separated CSS classes for this field. ",10,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def css_classes(self, extra_classes=None): if hasattr(extra_classes, ""split""): extra_classes = extra_classes.split() extra_classes = set(extra_classes or []) if self.errors and hasattr(self.form, ""error_css_class""): extra_classes.add(self.form.error_css_class) if self.field.required and hasattr(self.form, ""required_css_class""): extra_classes.add(self.form.required_css_class) return "" "".join(extra_classes) ``` ###Assistant : Return a string of space-separated CSS classes for this field. " 2260,"def _get_free_vram(self) -> List[float]: vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).free / (1024 * 1024) for handle in self._handles] self._log(""debug"", f""GPU VRAM free: {vram}"") return vram "," Obtain the amount of VRAM that is available, in Megabytes, for each connected Nvidia GPU. Returns ------- list List of `float`s containing the amount of VRAM available, in Megabytes, for each connected GPU as corresponding to the values in :attr:`_handles ",40,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_free_vram(self) -> List[float]: vram = [pynvml.nvmlDeviceGetMemoryInfo(handle).free / (1024 * 1024) for handle in self._handles] self._log(""debug"", f""GPU VRAM free: {vram}"") return vram ``` ###Assistant : Obtain the amount of VRAM that is available, in Megabytes, for each connected Nvidia GPU. Returns ------- list List of `float`s containing the amount of VRAM available, in Megabytes, for each connected GPU as corresponding to the values in :attr:`_handles " 2261,"def get_project(doctype, txt, searchfield, start, page_len, filters): from erpnext.controllers.queries import get_match_cond meta = frappe.get_meta(doctype) searchfields = meta.get_search_fields() search_columns = "", "" + "", "".join(searchfields) if searchfields else """" search_cond = "" or "" + "" or "".join(field + "" like %(txt)s"" for field in searchfields) return frappe.db.sql( .format( search_columns=search_columns, search_condition=search_cond ), { ""key"": searchfield, ""txt"": ""%"" + txt + ""%"", ""mcond"": get_match_cond(doctype), ""start"": start, ""page_len"": page_len, }, ) @frappe.whitelist()"," select name {search_columns} from `tabProject` where %(key)s like %(txt)s %(mcond)s {search_condition} order by name limit %(start)s, %(page_len)s",17,69,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_project(doctype, txt, searchfield, start, page_len, filters): from erpnext.controllers.queries import get_match_cond meta = frappe.get_meta(doctype) searchfields = meta.get_search_fields() search_columns = "", "" + "", "".join(searchfields) if searchfields else """" search_cond = "" or "" + "" or "".join(field + "" like %(txt)s"" for field in searchfields) return frappe.db.sql( .format( search_columns=search_columns, search_condition=search_cond ), { ""key"": searchfield, ""txt"": ""%"" + txt + ""%"", ""mcond"": get_match_cond(doctype), ""start"": start, ""page_len"": page_len, }, ) @frappe.whitelist() ``` ###Assistant : select name {search_columns} from `tabProject` where %(key)s like %(txt)s %(mcond)s {search_condition} order by name limit %(start)s, %(page_len)s " 2262,"def build(self, var_list): super().build(var_list) if getattr(self, ""_built"", False): return self._built = True self._momentums = [] self._velocities = [] self._u_product = tf.Variable(1.0, dtype=var_list[0].dtype) # Keep a counter on how many times of _u_product has been computed to # avoid duplicated computations. self._u_product_counter = 1 for var in var_list: self._momentums.append( self.add_variable_from_reference( model_variable=var, variable_name=""m"" ) ) self._velocities.append( self.add_variable_from_reference( model_variable=var, variable_name=""v"" ) ) ","Initialize optimizer variables. Nadam optimizer has 2 types of variables: momentums and velocities. Args: var_list: list of model variables to build Nadam variables on. ",24,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def build(self, var_list): super().build(var_list) if getattr(self, ""_built"", False): return self._built = True self._momentums = [] self._velocities = [] self._u_product = tf.Variable(1.0, dtype=var_list[0].dtype) # Keep a counter on how many times of _u_product has been computed to # avoid duplicated computations. self._u_product_counter = 1 for var in var_list: self._momentums.append( self.add_variable_from_reference( model_variable=var, variable_name=""m"" ) ) self._velocities.append( self.add_variable_from_reference( model_variable=var, variable_name=""v"" ) ) ``` ###Assistant : Initialize optimizer variables. Nadam optimizer has 2 types of variables: momentums and velocities. Args: var_list: list of model variables to build Nadam variables on. " 2263,"def list_distinfo_files(self, absolute=False): record_path = os.path.join(self.path, 'installed-files.txt') if os.path.exists(record_path): skip = True with codecs.open(record_path, 'r', encoding='utf-8') as f: for line in f: line = line.strip() if line == './': skip = False continue if not skip: p = os.path.normpath(os.path.join(self.path, line)) if p.startswith(self.path): if absolute: yield p else: yield line "," Iterates over the ``installed-files.txt`` entries and returns paths for each line if the path is pointing to a file located in the ``.egg-info`` directory or one of its subdirectories. :parameter absolute: If *absolute* is ``True``, each returned path is transformed into a local absolute path. Otherwise the raw value from ``installed-files.txt`` is returned. :type absolute: boolean :returns: iterator of paths ",60,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def list_distinfo_files(self, absolute=False): record_path = os.path.join(self.path, 'installed-files.txt') if os.path.exists(record_path): skip = True with codecs.open(record_path, 'r', encoding='utf-8') as f: for line in f: line = line.strip() if line == './': skip = False continue if not skip: p = os.path.normpath(os.path.join(self.path, line)) if p.startswith(self.path): if absolute: yield p else: yield line ``` ###Assistant : Iterates over the ``installed-files.txt`` entries and returns paths for each line if the path is pointing to a file located in the ``.egg-info`` directory or one of its subdirectories. :parameter absolute: If *absolute* is ``True``, each returned path is transformed into a local absolute path. Otherwise the raw value from ``installed-files.txt`` is returned. :type absolute: boolean :returns: iterator of paths " 2264,"def mixin_scalable_deployment_parser(parser): gp = mixin_base_deployment_parser(parser, title='Scalable Deployment') gp.add_argument( '--polling', type=str, default=PollingType.ANY.name, help=, ) gp.add_argument( '--shards', type=int, default=1, help='The number of shards in the deployment running at the same time. For more details check ' 'https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies', ) gp.add_argument( '--replicas', type=int, default=1, help='The number of replicas in the deployment', ) gp.add_argument( '--native', action='store_true', default=False, help='If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.', ) ","Mixing in arguments required by a scalable deployment into the given parser. The deployment is scalable and can have shards, replicas and polling :param parser: the parser instance to which we add arguments The polling strategy of the Deployment and its endpoints (when `shards>1`). Can be defined for all endpoints of a Deployment or by endpoint. Define per Deployment: - ANY: only one (whoever is idle) Pod polls the message - ALL: all Pods poll the message (like a broadcast) Define per Endpoint: JSON dict, {endpoint: PollingType} {'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'} ",93,68,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mixin_scalable_deployment_parser(parser): gp = mixin_base_deployment_parser(parser, title='Scalable Deployment') gp.add_argument( '--polling', type=str, default=PollingType.ANY.name, help=, ) gp.add_argument( '--shards', type=int, default=1, help='The number of shards in the deployment running at the same time. For more details check ' 'https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies', ) gp.add_argument( '--replicas', type=int, default=1, help='The number of replicas in the deployment', ) gp.add_argument( '--native', action='store_true', default=False, help='If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.', ) ``` ###Assistant : Mixing in arguments required by a scalable deployment into the given parser. The deployment is scalable and can have shards, replicas and polling :param parser: the parser instance to which we add arguments The polling strategy of the Deployment and its endpoints (when `shards>1`). Can be defined for all endpoints of a Deployment or by endpoint. Define per Deployment: - ANY: only one (whoever is idle) Pod polls the message - ALL: all Pods poll the message (like a broadcast) Define per Endpoint: JSON dict, {endpoint: PollingType} {'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'} " 2265,"def test_explorer_list_private(self): response = self.client.get( reverse(""wagtailadmin_explore"", args=(self.private_page.id,)) ) # Check the response self.assertEqual(response.status_code, 200) # Must have one privacy icon (next to the private child page) self.assertContains( response, ' 0: return scale * x` - `if x < 0: return scale * alpha * (exp(x) - 1)` where `alpha` and `scale` are pre-defined constants (`alpha=1.67326324` and `scale=1.05070098`). Basically, the SELU activation function multiplies `scale` (> 1) with the output of the `tf.keras.activations.elu` function to ensure a slope larger than one for positive inputs. The values of `alpha` and `scale` are chosen so that the mean and variance of the inputs are preserved between two consecutive layers as long as the weights are initialized correctly (see `tf.keras.initializers.LecunNormal` initializer) and the number of input units is ""large enough"" (see reference paper for more information). Example Usage: >>> num_classes = 10 # 10-class problem >>> model = tf.keras.Sequential() >>> model.add(tf.keras.layers.Dense(64, kernel_initializer='lecun_normal', ... activation='selu')) >>> model.add(tf.keras.layers.Dense(32, kernel_initializer='lecun_normal', ... activation='selu')) >>> model.add(tf.keras.layers.Dense(16, kernel_initializer='lecun_normal', ... activation='selu')) >>> model.add(tf.keras.layers.Dense(num_classes, activation='softmax')) Args: x: A tensor or variable to compute the activation function for. Returns: The scaled exponential unit activation: `scale * elu(x, alpha)`. Notes: - To be used together with the `tf.keras.initializers.LecunNormal` initializer. - To be used together with the dropout variant `tf.keras.layers.AlphaDropout` (not regular dropout). References: - [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515) ",205,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def selu(x): return tf.nn.selu(x) @keras_export(""keras.activations.softplus"") @tf.__internal__.dispatch.add_dispatch_support ``` ###Assistant : Scaled Exponential Linear Unit (SELU). The Scaled Exponential Linear Unit (SELU) activation function is defined as: - `if x > 0: return scale * x` - `if x < 0: return scale * alpha * (exp(x) - 1)` where `alpha` and `scale` are pre-defined constants (`alpha=1.67326324` and `scale=1.05070098`). Basically, the SELU activation function multiplies `scale` (> 1) with the output of the `tf.keras.activations.elu` function to ensure a slope larger than one for positive inputs. The values of `alpha` and `scale` are chosen so that the mean and variance of the inputs are preserved between two consecutive layers as long as the weights are initialized correctly (see `tf.keras.initializers.LecunNormal` initializer) and the number of input units is ""large enough"" (see reference paper for more information). Example Usage: >>> num_classes = 10 # 10-class problem >>> model = tf.keras.Sequential() >>> model.add(tf.keras.layers.Dense(64, kernel_initializer='lecun_normal', ... activation='selu')) >>> model.add(tf.keras.layers.Dense(32, kernel_initializer='lecun_normal', ... activation='selu')) >>> model.add(tf.keras.layers.Dense(16, kernel_initializer='lecun_normal', ... activation='selu')) >>> model.add(tf.keras.layers.Dense(num_classes, activation='softmax')) Args: x: A tensor or variable to compute the activation function for. Returns: The scaled exponential unit activation: `scale * elu(x, alpha)`. Notes: - To be used together with the `tf.keras.initializers.LecunNormal` initializer. - To be used together with the dropout variant `tf.keras.layers.AlphaDropout` (not regular dropout). References: - [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515) " 2269,"def _find_safe_escape_tokens(self, text): for i, t, v in self._filter_to( self.lang.get_tokens_unprocessed(text), lambda t: t in Token.Comment or t in Token.String ): if t is None: for i2, t2, v2 in self._find_escape_tokens(v): yield i + i2, t2, v2 else: yield i, None, v ", find escape tokens that are not in strings or comments ,10,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _find_safe_escape_tokens(self, text): for i, t, v in self._filter_to( self.lang.get_tokens_unprocessed(text), lambda t: t in Token.Comment or t in Token.String ): if t is None: for i2, t2, v2 in self._find_escape_tokens(v): yield i + i2, t2, v2 else: yield i, None, v ``` ###Assistant : find escape tokens that are not in strings or comments " 2270,"def set_up_fileselector(quteproc, py_proc, kind, files, output_type): cmd, args = py_proc(r) args += files.split(' ') if output_type == ""a temporary file"": args += ['--file={}'] fileselect_cmd = json.dumps([cmd, *args]) quteproc.set_setting('fileselect.handler', 'external') quteproc.set_setting(f'fileselect.{kind}.command', fileselect_cmd) ","Set up fileselect.xxx.command to select the file(s). import os import sys tmp_file = None for i, arg in enumerate(sys.argv): if arg.startswith('--file='): tmp_file = arg[len('--file='):] sys.argv.pop(i) break selected_files = sys.argv[1:] if tmp_file is None: for selected_file in selected_files: print(os.path.abspath(selected_file)) else: with open(tmp_file, 'w') as f: for selected_file in selected_files: f.write(os.path.abspath(selected_file) + '\n') ",51,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_up_fileselector(quteproc, py_proc, kind, files, output_type): cmd, args = py_proc(r) args += files.split(' ') if output_type == ""a temporary file"": args += ['--file={}'] fileselect_cmd = json.dumps([cmd, *args]) quteproc.set_setting('fileselect.handler', 'external') quteproc.set_setting(f'fileselect.{kind}.command', fileselect_cmd) ``` ###Assistant : Set up fileselect.xxx.command to select the file(s). import os import sys tmp_file = None for i, arg in enumerate(sys.argv): if arg.startswith('--file='): tmp_file = arg[len('--file='):] sys.argv.pop(i) break selected_files = sys.argv[1:] if tmp_file is None: for selected_file in selected_files: print(os.path.abspath(selected_file)) else: with open(tmp_file, 'w') as f: for selected_file in selected_files: f.write(os.path.abspath(selected_file) + '\n') " 2271,"def task_runner(request): if not hasattr(request.param, ""_pytestfixturefunction""): raise TypeError(""Received invalid `task_runner` parameter. Expected fixture."") yield request.getfixturevalue(request.param.__name__) "," An indirect fixture that expects to receive a pytest fixture that yields a task runner. ",15,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def task_runner(request): if not hasattr(request.param, ""_pytestfixturefunction""): raise TypeError(""Received invalid `task_runner` parameter. Expected fixture."") yield request.getfixturevalue(request.param.__name__) ``` ###Assistant : An indirect fixture that expects to receive a pytest fixture that yields a task runner. " 2272,"def test_get_apns_context(self) -> None: import zerver.lib.push_notifications zerver.lib.push_notifications.get_apns_context.cache_clear() try: with self.settings(APNS_CERT_FILE=""/foo.pem""), mock.patch(""aioapns.APNs"") as mock_apns: apns_context = get_apns_context() assert apns_context is not None try: self.assertEqual(mock_apns.return_value, apns_context.apns) finally: apns_context.loop.close() finally: # Reset the cache for `get_apns_context` so that we don't # leak changes to the rest of the world. zerver.lib.push_notifications.get_apns_context.cache_clear() ","This test is pretty hacky, and needs to carefully reset the state it modifies in order to avoid leaking state that can lead to nondeterministic results for other tests. ",29,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_apns_context(self) -> None: import zerver.lib.push_notifications zerver.lib.push_notifications.get_apns_context.cache_clear() try: with self.settings(APNS_CERT_FILE=""/foo.pem""), mock.patch(""aioapns.APNs"") as mock_apns: apns_context = get_apns_context() assert apns_context is not None try: self.assertEqual(mock_apns.return_value, apns_context.apns) finally: apns_context.loop.close() finally: # Reset the cache for `get_apns_context` so that we don't # leak changes to the rest of the world. zerver.lib.push_notifications.get_apns_context.cache_clear() ``` ###Assistant : This test is pretty hacky, and needs to carefully reset the state it modifies in order to avoid leaking state that can lead to nondeterministic results for other tests. " 2273,"def _validate(self) -> None: if (self._args.writer == ""ffmpeg"" and not self._images.is_video and self._args.reference_video is None): raise FaceswapError(""Output as video selected, but using frames as input. You must "" ""provide a reference video ('-ref', '--reference-video')."") if (self._args.on_the_fly and self._args.mask_type not in (""none"", ""extended"", ""components"")): logger.warning(""You have selected an incompatible mask type ('%s') for On-The-Fly "" ""conversion. Switching to 'extended'"", self._args.mask_type) self._args.mask_type = ""extended"" if (not self._args.on_the_fly and self._args.mask_type not in (""none"", ""predicted"") and not self._alignments.mask_is_valid(self._args.mask_type)): msg = (f""You have selected the Mask Type `{self._args.mask_type}` but at least one "" ""face does not have this mask stored in the Alignments File.\nYou should "" ""generate the required masks with the Mask Tool or set the Mask Type option to "" ""an existing Mask Type.\nA summary of existing masks is as follows:\nTotal "" f""faces: {self._alignments.faces_count}, "" f""Masks: {self._alignments.mask_summary}"") raise FaceswapError(msg) if self._args.mask_type == ""predicted"" and not self._predictor.has_predicted_mask: available_masks = [k for k, v in self._alignments.mask_summary.items() if k != ""none"" and v == self._alignments.faces_count] if not available_masks: msg = (""Predicted Mask selected, but the model was not trained with a mask and no "" ""masks are stored in the Alignments File.\nYou should generate the "" ""required masks with the Mask Tool or set the Mask Type to `none`."") raise FaceswapError(msg) mask_type = available_masks[0] logger.warning(""Predicted Mask selected, but the model was not trained with a "" ""mask. Selecting first available mask: '%s'"", mask_type) self._args.mask_type = mask_type "," Validate the Command Line Options. Ensure that certain cli selections are valid and won't result in an error. Checks: * If frames have been passed in with video output, ensure user supplies reference video. * If ""on-the-fly"" and a Neural Network mask is selected, warn and switch to 'extended' * If a mask-type is selected, ensure it exists in the alignments file. * If a predicted mask-type is selected, ensure model has been trained with a mask otherwise attempt to select first available masks, otherwise raise error. Raises ------ FaceswapError If an invalid selection has been found. ",97,230,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _validate(self) -> None: if (self._args.writer == ""ffmpeg"" and not self._images.is_video and self._args.reference_video is None): raise FaceswapError(""Output as video selected, but using frames as input. You must "" ""provide a reference video ('-ref', '--reference-video')."") if (self._args.on_the_fly and self._args.mask_type not in (""none"", ""extended"", ""components"")): logger.warning(""You have selected an incompatible mask type ('%s') for On-The-Fly "" ""conversion. Switching to 'extended'"", self._args.mask_type) self._args.mask_type = ""extended"" if (not self._args.on_the_fly and self._args.mask_type not in (""none"", ""predicted"") and not self._alignments.mask_is_valid(self._args.mask_type)): msg = (f""You have selected the Mask Type `{self._args.mask_type}` but at least one "" ""face does not have this mask stored in the Alignments File.\nYou should "" ""generate the required masks with the Mask Tool or set the Mask Type option to "" ""an existing Mask Type.\nA summary of existing masks is as follows:\nTotal "" f""faces: {self._alignments.faces_count}, "" f""Masks: {self._alignments.mask_summary}"") raise FaceswapError(msg) if self._args.mask_type == ""predicted"" and not self._predictor.has_predicted_mask: available_masks = [k for k, v in self._alignments.mask_summary.items() if k != ""none"" and v == self._alignments.faces_count] if not available_masks: msg = (""Predicted Mask selected, but the model was not trained with a mask and no "" ""masks are stored in the Alignments File.\nYou should generate the "" ""required masks with the Mask Tool or set the Mask Type to `none`."") raise FaceswapError(msg) mask_type = available_masks[0] logger.warning(""Predicted Mask selected, but the model was not trained with a "" ""mask. Selecting first available mask: '%s'"", mask_type) self._args.mask_type = mask_type ``` ###Assistant : Validate the Command Line Options. Ensure that certain cli selections are valid and won't result in an error. Checks: * If frames have been passed in with video output, ensure user supplies reference video. * If ""on-the-fly"" and a Neural Network mask is selected, warn and switch to 'extended' * If a mask-type is selected, ensure it exists in the alignments file. * If a predicted mask-type is selected, ensure model has been trained with a mask otherwise attempt to select first available masks, otherwise raise error. Raises ------ FaceswapError If an invalid selection has been found. " 2274,"def get_authenticators(self) -> List[BaseAuthentication]: # TODO: Increase test coverage and get this working for monolith mode. if SiloMode.get_current_mode() == SiloMode.MONOLITH: return super().get_authenticators() last_api_authenticator = ApiAuthentication([]) result: List[BaseAuthentication] = [] for authenticator_cls in self.authentication_classes: auth_type = ApiAuthenticatorType.from_authenticator(authenticator_cls) if auth_type: last_api_authenticator.types.append(auth_type) else: if last_api_authenticator.types: result.append(last_api_authenticator) last_api_authenticator = ApiAuthentication([]) result.append(authenticator_cls()) if last_api_authenticator.types: result.append(last_api_authenticator) return result "," Instantiates and returns the list of authenticators that this view can use. Aggregates together authenticators that can be supported using HybridCloud. ",21,52,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_authenticators(self) -> List[BaseAuthentication]: # TODO: Increase test coverage and get this working for monolith mode. if SiloMode.get_current_mode() == SiloMode.MONOLITH: return super().get_authenticators() last_api_authenticator = ApiAuthentication([]) result: List[BaseAuthentication] = [] for authenticator_cls in self.authentication_classes: auth_type = ApiAuthenticatorType.from_authenticator(authenticator_cls) if auth_type: last_api_authenticator.types.append(auth_type) else: if last_api_authenticator.types: result.append(last_api_authenticator) last_api_authenticator = ApiAuthentication([]) result.append(authenticator_cls()) if last_api_authenticator.types: result.append(last_api_authenticator) return result ``` ###Assistant : Instantiates and returns the list of authenticators that this view can use. Aggregates together authenticators that can be supported using HybridCloud. " 2275,"def test_naive_lowest_common_ancestor2(self): G = nx.DiGraph() G.add_edge(0, 1) G.add_edge(2, 0) G.add_edge(2, 3) G.add_edge(4, 0) G.add_edge(5, 2) assert naive_lca(G, 1, 3) == 2 ",Test that the one-pair function works for issue #4942.,9,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_naive_lowest_common_ancestor2(self): G = nx.DiGraph() G.add_edge(0, 1) G.add_edge(2, 0) G.add_edge(2, 3) G.add_edge(4, 0) G.add_edge(5, 2) assert naive_lca(G, 1, 3) == 2 ``` ###Assistant : Test that the one-pair function works for issue #4942. " 2276,"def get_references(state, model_tuple, field_tuple=()): for state_model_tuple, model_state in state.models.items(): for name, field in model_state.fields.items(): reference = field_references( state_model_tuple, field, model_tuple, *field_tuple ) if reference: yield model_state, name, field, reference "," Generator of (model_state, name, field, reference) referencing provided context. If field_tuple is provided only references to this particular field of model_tuple will be generated. ",24,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_references(state, model_tuple, field_tuple=()): for state_model_tuple, model_state in state.models.items(): for name, field in model_state.fields.items(): reference = field_references( state_model_tuple, field, model_tuple, *field_tuple ) if reference: yield model_state, name, field, reference ``` ###Assistant : Generator of (model_state, name, field, reference) referencing provided context. If field_tuple is provided only references to this particular field of model_tuple will be generated. " 2277,"async def test_media_player_eq_bands_not_supported(hass): device = ( ""media_player.test_bands"", ""on"", { ""friendly_name"": ""Test media player"", ""supported_features"": SUPPORT_SELECT_SOUND_MODE, ""sound_mode"": ""tv"", ""sound_mode_list"": [""movie"", ""music"", ""night"", ""sport"", ""tv"", ""rocknroll""], }, ) await discovery_test(device, hass) context = Context() # Test for SetBands Error request = get_new_request( ""Alexa.EqualizerController"", ""SetBands"", ""media_player#test_bands"" ) request[""directive""][""payload""] = {""bands"": [{""name"": ""BASS"", ""value"": -2}]} msg = await smart_home.async_handle_message( hass, get_default_config(hass), request, context ) assert ""event"" in msg msg = msg[""event""] assert msg[""header""][""name""] == ""ErrorResponse"" assert msg[""header""][""namespace""] == ""Alexa"" assert msg[""payload""][""type""] == ""INVALID_DIRECTIVE"" # Test for AdjustBands Error request = get_new_request( ""Alexa.EqualizerController"", ""AdjustBands"", ""media_player#test_bands"" ) request[""directive""][""payload""] = { ""bands"": [{""name"": ""BASS"", ""levelDelta"": 3, ""levelDirection"": ""UP""}] } msg = await smart_home.async_handle_message( hass, get_default_config(hass), request, context ) assert ""event"" in msg msg = msg[""event""] assert msg[""header""][""name""] == ""ErrorResponse"" assert msg[""header""][""namespace""] == ""Alexa"" assert msg[""payload""][""type""] == ""INVALID_DIRECTIVE"" # Test for ResetBands Error request = get_new_request( ""Alexa.EqualizerController"", ""ResetBands"", ""media_player#test_bands"" ) request[""directive""][""payload""] = { ""bands"": [{""name"": ""BASS"", ""levelDelta"": 3, ""levelDirection"": ""UP""}] } msg = await smart_home.async_handle_message( hass, get_default_config(hass), request, context ) assert ""event"" in msg msg = msg[""event""] assert msg[""header""][""name""] == ""ErrorResponse"" assert msg[""header""][""namespace""] == ""Alexa"" assert msg[""payload""][""type""] == ""INVALID_DIRECTIVE"" ",Test EqualizerController bands directive not supported.,6,181,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_media_player_eq_bands_not_supported(hass): device = ( ""media_player.test_bands"", ""on"", { ""friendly_name"": ""Test media player"", ""supported_features"": SUPPORT_SELECT_SOUND_MODE, ""sound_mode"": ""tv"", ""sound_mode_list"": [""movie"", ""music"", ""night"", ""sport"", ""tv"", ""rocknroll""], }, ) await discovery_test(device, hass) context = Context() # Test for SetBands Error request = get_new_request( ""Alexa.EqualizerController"", ""SetBands"", ""media_player#test_bands"" ) request[""directive""][""payload""] = {""bands"": [{""name"": ""BASS"", ""value"": -2}]} msg = await smart_home.async_handle_message( hass, get_default_config(hass), request, context ) assert ""event"" in msg msg = msg[""event""] assert msg[""header""][""name""] == ""ErrorResponse"" assert msg[""header""][""namespace""] == ""Alexa"" assert msg[""payload""][""type""] == ""INVALID_DIRECTIVE"" # Test for AdjustBands Error request = get_new_request( ""Alexa.EqualizerController"", ""AdjustBands"", ""media_player#test_bands"" ) request[""directive""][""payload""] = { ""bands"": [{""name"": ""BASS"", ""levelDelta"": 3, ""levelDirection"": ""UP""}] } msg = await smart_home.async_handle_message( hass, get_default_config(hass), request, context ) assert ""event"" in msg msg = msg[""event""] assert msg[""header""][""name""] == ""ErrorResponse"" assert msg[""header""][""namespace""] == ""Alexa"" assert msg[""payload""][""type""] == ""INVALID_DIRECTIVE"" # Test for ResetBands Error request = get_new_request( ""Alexa.EqualizerController"", ""ResetBands"", ""media_player#test_bands"" ) request[""directive""][""payload""] = { ""bands"": [{""name"": ""BASS"", ""levelDelta"": 3, ""levelDirection"": ""UP""}] } msg = await smart_home.async_handle_message( hass, get_default_config(hass), request, context ) assert ""event"" in msg msg = msg[""event""] assert msg[""header""][""name""] == ""ErrorResponse"" assert msg[""header""][""namespace""] == ""Alexa"" assert msg[""payload""][""type""] == ""INVALID_DIRECTIVE"" ``` ###Assistant : Test EqualizerController bands directive not supported. " 2278,"def lstsq(a, b): q, r = qr(a) x = solve_triangular(r, q.T.conj().dot(b)) residuals = b - a.dot(x) residuals = abs(residuals**2).sum(axis=0, keepdims=b.ndim == 1) token = tokenize(a, b) # r must be a triangular with single block # rank rname = ""lstsq-rank-"" + token rdsk = {(rname,): (np.linalg.matrix_rank, (r.name, 0, 0))} graph = HighLevelGraph.from_collections(rname, rdsk, dependencies=[r]) # rank must be an integer rank = Array(graph, rname, shape=(), chunks=(), dtype=int) # singular sname = ""lstsq-singular-"" + token rt = r.T.conj() sdsk = { (sname, 0): ( _reverse, (np.sqrt, (np.linalg.eigvalsh, (np.dot, (rt.name, 0, 0), (r.name, 0, 0)))), ) } graph = HighLevelGraph.from_collections(sname, sdsk, dependencies=[rt, r]) meta = meta_from_array(residuals, 1) s = Array(graph, sname, shape=(r.shape[0],), chunks=r.shape[0], meta=meta) return x, residuals, rank, s @derived_from(np.linalg)"," Return the least-squares solution to a linear matrix equation using QR decomposition. Solves the equation `a x = b` by computing a vector `x` that minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may be under-, well-, or over- determined (i.e., the number of linearly independent rows of `a` can be less than, equal to, or greater than its number of linearly independent columns). If `a` is square and of full rank, then `x` (but for round-off error) is the ""exact"" solution of the equation. Parameters ---------- a : (M, N) array_like ""Coefficient"" matrix. b : {(M,), (M, K)} array_like Ordinate or ""dependent variable"" values. If `b` is two-dimensional, the least-squares solution is calculated for each of the `K` columns of `b`. Returns ------- x : {(N,), (N, K)} Array Least-squares solution. If `b` is two-dimensional, the solutions are in the `K` columns of `x`. residuals : {(1,), (K,)} Array Sums of residuals; squared Euclidean 2-norm for each column in ``b - a*x``. If `b` is 1-dimensional, this is a (1,) shape array. Otherwise the shape is (K,). rank : Array Rank of matrix `a`. s : (min(M, N),) Array Singular values of `a`. ",198,118,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def lstsq(a, b): q, r = qr(a) x = solve_triangular(r, q.T.conj().dot(b)) residuals = b - a.dot(x) residuals = abs(residuals**2).sum(axis=0, keepdims=b.ndim == 1) token = tokenize(a, b) # r must be a triangular with single block # rank rname = ""lstsq-rank-"" + token rdsk = {(rname,): (np.linalg.matrix_rank, (r.name, 0, 0))} graph = HighLevelGraph.from_collections(rname, rdsk, dependencies=[r]) # rank must be an integer rank = Array(graph, rname, shape=(), chunks=(), dtype=int) # singular sname = ""lstsq-singular-"" + token rt = r.T.conj() sdsk = { (sname, 0): ( _reverse, (np.sqrt, (np.linalg.eigvalsh, (np.dot, (rt.name, 0, 0), (r.name, 0, 0)))), ) } graph = HighLevelGraph.from_collections(sname, sdsk, dependencies=[rt, r]) meta = meta_from_array(residuals, 1) s = Array(graph, sname, shape=(r.shape[0],), chunks=r.shape[0], meta=meta) return x, residuals, rank, s @derived_from(np.linalg) ``` ###Assistant : Return the least-squares solution to a linear matrix equation using QR decomposition. Solves the equation `a x = b` by computing a vector `x` that minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may be under-, well-, or over- determined (i.e., the number of linearly independent rows of `a` can be less than, equal to, or greater than its number of linearly independent columns). If `a` is square and of full rank, then `x` (but for round-off error) is the ""exact"" solution of the equation. Parameters ---------- a : (M, N) array_like ""Coefficient"" matrix. b : {(M,), (M, K)} array_like Ordinate or ""dependent variable"" values. If `b` is two-dimensional, the least-squares solution is calculated for each of the `K` columns of `b`. Returns ------- x : {(N,), (N, K)} Array Least-squares solution. If `b` is two-dimensional, the solutions are in the `K` columns of `x`. residuals : {(1,), (K,)} Array Sums of residuals; squared Euclidean 2-norm for each column in ``b - a*x``. If `b` is 1-dimensional, this is a (1,) shape array. Otherwise the shape is (K,). rank : Array Rank of matrix `a`. s : (min(M, N),) Array Singular values of `a`. " 2279,"def docker_environment(): # type: () -> t.Dict[str, str] env = common_environment() env.update(dict((key, os.environ[key]) for key in os.environ if key.startswith('DOCKER_') or key.startswith('CONTAINER_'))) return env ",Return a dictionary of docker related environment variables found in the current environment.,13,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def docker_environment(): # type: () -> t.Dict[str, str] env = common_environment() env.update(dict((key, os.environ[key]) for key in os.environ if key.startswith('DOCKER_') or key.startswith('CONTAINER_'))) return env ``` ###Assistant : Return a dictionary of docker related environment variables found in the current environment. " 2280,"def test_remove_from_figure(use_gridspec): fig, ax = plt.subplots() sc = ax.scatter([1, 2], [3, 4]) sc.set_array(np.array([5, 6])) pre_position = ax.get_position() cb = fig.colorbar(sc, use_gridspec=use_gridspec) fig.subplots_adjust() cb.remove() fig.subplots_adjust() post_position = ax.get_position() assert (pre_position.get_points() == post_position.get_points()).all() "," Test `remove` with the specified ``use_gridspec`` setting ",7,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_remove_from_figure(use_gridspec): fig, ax = plt.subplots() sc = ax.scatter([1, 2], [3, 4]) sc.set_array(np.array([5, 6])) pre_position = ax.get_position() cb = fig.colorbar(sc, use_gridspec=use_gridspec) fig.subplots_adjust() cb.remove() fig.subplots_adjust() post_position = ax.get_position() assert (pre_position.get_points() == post_position.get_points()).all() ``` ###Assistant : Test `remove` with the specified ``use_gridspec`` setting " 2281,"def groupby(func, seq): d = {} for item in seq: key = func(item) if key not in d: d[key] = [] d[key].append(item) return d "," Group a collection by a key function >>> from sympy.multipledispatch.utils import groupby >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank'] >>> groupby(len, names) # doctest: +SKIP {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']} >>> iseven = lambda x: x % 2 == 0 >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP {False: [1, 3, 5, 7], True: [2, 4, 6, 8]} See Also: ``countby`` ",72,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def groupby(func, seq): d = {} for item in seq: key = func(item) if key not in d: d[key] = [] d[key].append(item) return d ``` ###Assistant : Group a collection by a key function >>> from sympy.multipledispatch.utils import groupby >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank'] >>> groupby(len, names) # doctest: +SKIP {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']} >>> iseven = lambda x: x % 2 == 0 >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP {False: [1, 3, 5, 7], True: [2, 4, 6, 8]} See Also: ``countby`` " 2282,"def input_files(self) -> List[str]: metadata = self._plan.execute().get_metadata() files = set() for m in metadata: for f in m.input_files: files.add(f) return list(files) ","Return the list of input files for the dataset. Time complexity: O(num input files) Returns: The list of input files used to create the dataset, or an empty list if the input files is not known. ",36,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def input_files(self) -> List[str]: metadata = self._plan.execute().get_metadata() files = set() for m in metadata: for f in m.input_files: files.add(f) return list(files) ``` ###Assistant : Return the list of input files for the dataset. Time complexity: O(num input files) Returns: The list of input files used to create the dataset, or an empty list if the input files is not known. " 2283,"def get_nccl_reduce_op(reduce_op): if reduce_op not in NCCL_REDUCE_OP_MAP: raise RuntimeError(""NCCL does not support reduce op: '{}'."".format(reduce_op)) return NCCL_REDUCE_OP_MAP[reduce_op] ","Map the reduce op to NCCL reduce op type. Args: reduce_op (ReduceOp): ReduceOp Enum (SUM/PRODUCT/MIN/MAX). Returns: (nccl.ncclRedOp_t): the mapped NCCL reduce op. ",22,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_nccl_reduce_op(reduce_op): if reduce_op not in NCCL_REDUCE_OP_MAP: raise RuntimeError(""NCCL does not support reduce op: '{}'."".format(reduce_op)) return NCCL_REDUCE_OP_MAP[reduce_op] ``` ###Assistant : Map the reduce op to NCCL reduce op type. Args: reduce_op (ReduceOp): ReduceOp Enum (SUM/PRODUCT/MIN/MAX). Returns: (nccl.ncclRedOp_t): the mapped NCCL reduce op. " 2284,"def aligned(self) -> AlignedFace: assert self._aligned is not None return self._aligned ", The aligned face connected to this detected face. ,8,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def aligned(self) -> AlignedFace: assert self._aligned is not None return self._aligned ``` ###Assistant : The aligned face connected to this detected face. " 2285,"def upgrade(): conn = op.get_bind() if conn.dialect.name == ""sqlite"": # in sqlite TEXT and STRING column types are the same return if conn.dialect.name == ""mysql"": op.alter_column( 'connection', 'description', existing_type=sa.String(length=5000), type_=sa.Text(length=5000), existing_nullable=True, ) else: # postgres does not allow size modifier for text type op.alter_column('connection', 'description', existing_type=sa.String(length=5000), type_=sa.Text()) ",Apply Fix description field in ``connection`` to be ``text``,9,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def upgrade(): conn = op.get_bind() if conn.dialect.name == ""sqlite"": # in sqlite TEXT and STRING column types are the same return if conn.dialect.name == ""mysql"": op.alter_column( 'connection', 'description', existing_type=sa.String(length=5000), type_=sa.Text(length=5000), existing_nullable=True, ) else: # postgres does not allow size modifier for text type op.alter_column('connection', 'description', existing_type=sa.String(length=5000), type_=sa.Text()) ``` ###Assistant : Apply Fix description field in ``connection`` to be ``text`` " 2286,"def test_memory(self): params = sum(map(list, six.itervalues(self.net.params)), []) blobs = self.net.blobs.values() del self.net # now sum everything (forcing all memory to be read) total = 0 for p in params: total += p.data.sum() + p.diff.sum() for bl in blobs: total += bl.data.sum() + bl.diff.sum() ",Check that holding onto blob data beyond the life of a Net is OK,14,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_memory(self): params = sum(map(list, six.itervalues(self.net.params)), []) blobs = self.net.blobs.values() del self.net # now sum everything (forcing all memory to be read) total = 0 for p in params: total += p.data.sum() + p.diff.sum() for bl in blobs: total += bl.data.sum() + bl.diff.sum() ``` ###Assistant : Check that holding onto blob data beyond the life of a Net is OK " 2287,"async def test_timeout_stops_execution_in_sync_subflows(self, tmp_path): canary_file = tmp_path / ""canary"" "," Sync flow runs can be cancelled after a timeout once a task is called ",14,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_timeout_stops_execution_in_sync_subflows(self, tmp_path): canary_file = tmp_path / ""canary"" ``` ###Assistant : Sync flow runs can be cancelled after a timeout once a task is called " 2288,"def get_income_account(doctype, txt, searchfield, start, page_len, filters): from erpnext.controllers.queries import get_match_cond # income account can be any Credit account, # but can also be a Asset account with account_type='Income Account' in special circumstances. # Hence the first condition is an ""OR"" if not filters: filters = {} condition = """" if filters.get(""company""): condition += ""and tabAccount.company = %(company)s"" return frappe.db.sql( .format( condition=condition, match_condition=get_match_cond(doctype), key=searchfield ), {""txt"": ""%"" + txt + ""%"", ""company"": filters.get(""company"", """")}, ) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs","select tabAccount.name from `tabAccount` where (tabAccount.report_type = ""Profit and Loss"" or tabAccount.account_type in (""Income Account"", ""Temporary"")) and tabAccount.is_group=0 and tabAccount.`{key}` LIKE %(txt)s {condition} {match_condition} order by idx desc, name",29,77,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_income_account(doctype, txt, searchfield, start, page_len, filters): from erpnext.controllers.queries import get_match_cond # income account can be any Credit account, # but can also be a Asset account with account_type='Income Account' in special circumstances. # Hence the first condition is an ""OR"" if not filters: filters = {} condition = """" if filters.get(""company""): condition += ""and tabAccount.company = %(company)s"" return frappe.db.sql( .format( condition=condition, match_condition=get_match_cond(doctype), key=searchfield ), {""txt"": ""%"" + txt + ""%"", ""company"": filters.get(""company"", """")}, ) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs ``` ###Assistant : select tabAccount.name from `tabAccount` where (tabAccount.report_type = ""Profit and Loss"" or tabAccount.account_type in (""Income Account"", ""Temporary"")) and tabAccount.is_group=0 and tabAccount.`{key}` LIKE %(txt)s {condition} {match_condition} order by idx desc, name " 2289,"def win_find_exe(filename, installsubdir=None, env=""ProgramFiles""): # type: (str, Optional[Any], str) -> str fns = [filename] if filename.endswith("".exe"") else [filename + "".exe"", filename] # noqa: E501 for fn in fns: try: if installsubdir is None: path = _where(fn) else: path = _where(fn, dirs=[os.path.join(os.environ[env], installsubdir)]) # noqa: E501 except IOError: path = None else: break return path or """" ","Find executable in current dir, system path or in the given ProgramFiles subdir, and retuen its absolute path. ",18,56,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def win_find_exe(filename, installsubdir=None, env=""ProgramFiles""): # type: (str, Optional[Any], str) -> str fns = [filename] if filename.endswith("".exe"") else [filename + "".exe"", filename] # noqa: E501 for fn in fns: try: if installsubdir is None: path = _where(fn) else: path = _where(fn, dirs=[os.path.join(os.environ[env], installsubdir)]) # noqa: E501 except IOError: path = None else: break return path or """" ``` ###Assistant : Find executable in current dir, system path or in the given ProgramFiles subdir, and retuen its absolute path. " 2290,"def test_custom_page_queryset(self): self.assertIs(type(CustomManagerPage.objects.all()), CustomPageQuerySet) self.assertIs(type(CustomManagerPage.objects.about_spam()), CustomPageQuerySet) self.assertIs( type(CustomManagerPage.objects.all().about_spam()), CustomPageQuerySet ) self.assertIs( type(CustomManagerPage.objects.about_spam().all()), CustomPageQuerySet ) "," Managers that are constructed from a custom PageQuerySet (via PageManager.from_queryset(CustomPageQuerySet)) should return querysets of that type ",16,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_custom_page_queryset(self): self.assertIs(type(CustomManagerPage.objects.all()), CustomPageQuerySet) self.assertIs(type(CustomManagerPage.objects.about_spam()), CustomPageQuerySet) self.assertIs( type(CustomManagerPage.objects.all().about_spam()), CustomPageQuerySet ) self.assertIs( type(CustomManagerPage.objects.about_spam().all()), CustomPageQuerySet ) ``` ###Assistant : Managers that are constructed from a custom PageQuerySet (via PageManager.from_queryset(CustomPageQuerySet)) should return querysets of that type " 2291,"def distro_release_info(self): # type: () -> Dict[str, str] return self._distro_release_info "," Return a dictionary containing key-value pairs for the information items from the distro release file data source of the OS distribution. For details, see :func:`distro.distro_release_info`. ",25,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def distro_release_info(self): # type: () -> Dict[str, str] return self._distro_release_info ``` ###Assistant : Return a dictionary containing key-value pairs for the information items from the distro release file data source of the OS distribution. For details, see :func:`distro.distro_release_info`. " 2292,"def user_documents_dir(self) -> str: return os.path.normpath(get_win_folder(""CSIDL_PERSONAL"")) "," :return: documents directory tied to the user e.g. ``%USERPROFILE%\\Documents`` ",9,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def user_documents_dir(self) -> str: return os.path.normpath(get_win_folder(""CSIDL_PERSONAL"")) ``` ###Assistant : :return: documents directory tied to the user e.g. ``%USERPROFILE%\\Documents`` " 2293,"def round_robin_reduce_idx_iterator(self): idx = 0 round_idx = 0 while idx < self.output_num_blocks: for merge_idx in range(self.num_merge_tasks_per_round): if merge_idx < self._partitions_with_extra_task: reduce_idx = merge_idx * (self.merge_partition_size + 1) partition_size = self.merge_partition_size + 1 else: reduce_idx = self._partitions_with_extra_task * ( self.merge_partition_size + 1 ) merge_idx -= self._partitions_with_extra_task reduce_idx += merge_idx * self.merge_partition_size partition_size = self.merge_partition_size if round_idx >= partition_size: continue reduce_idx += round_idx yield reduce_idx idx += 1 round_idx += 1 "," When there are multiple nodes, merge tasks are spread throughout the cluster to improve load-balancing. Each merge task produces outputs for a contiguous partition of reduce tasks. This method creates an iterator that returns reduce task indices round-robin across the merge tasks. This can be used to submit reduce tasks in a way that spreads the load evenly across the cluster. ",61,69,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def round_robin_reduce_idx_iterator(self): idx = 0 round_idx = 0 while idx < self.output_num_blocks: for merge_idx in range(self.num_merge_tasks_per_round): if merge_idx < self._partitions_with_extra_task: reduce_idx = merge_idx * (self.merge_partition_size + 1) partition_size = self.merge_partition_size + 1 else: reduce_idx = self._partitions_with_extra_task * ( self.merge_partition_size + 1 ) merge_idx -= self._partitions_with_extra_task reduce_idx += merge_idx * self.merge_partition_size partition_size = self.merge_partition_size if round_idx >= partition_size: continue reduce_idx += round_idx yield reduce_idx idx += 1 round_idx += 1 ``` ###Assistant : When there are multiple nodes, merge tasks are spread throughout the cluster to improve load-balancing. Each merge task produces outputs for a contiguous partition of reduce tasks. This method creates an iterator that returns reduce task indices round-robin across the merge tasks. This can be used to submit reduce tasks in a way that spreads the load evenly across the cluster. " 2294,"def _check_index_name(self, result): if self._by is not None: # pandas does not name the index for this case result._query_compiler.set_index_name(None) return result "," Check the result of groupby aggregation on the need of resetting index name. Parameters ---------- result : DataFrame Group by aggregation result. Returns ------- DataFrame ",25,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_index_name(self, result): if self._by is not None: # pandas does not name the index for this case result._query_compiler.set_index_name(None) return result ``` ###Assistant : Check the result of groupby aggregation on the need of resetting index name. Parameters ---------- result : DataFrame Group by aggregation result. Returns ------- DataFrame " 2295,"def check_points_in_rotated_boxes(points, boxes): # [B, N, 5] -> [B, N, 4, 2] corners = box2corners(boxes) # [1, L, 2] -> [1, 1, L, 2] points = points.unsqueeze(0) # [B, N, 4, 2] -> [B, N, 1, 2] a, b, c, d = corners.split(4, axis=2) ab = b - a ad = d - a # [B, N, L, 2] ap = points - a # [B, N, L] norm_ab = paddle.sum(ab * ab, axis=-1) # [B, N, L] norm_ad = paddle.sum(ad * ad, axis=-1) # [B, N, L] dot product ap_dot_ab = paddle.sum(ap * ab, axis=-1) # [B, N, L] dot product ap_dot_ad = paddle.sum(ap * ad, axis=-1) # [B, N, L] = |A|*|B|*cos(theta) is_in_box = (ap_dot_ab >= 0) & (ap_dot_ab <= norm_ab) & (ap_dot_ad >= 0) & ( ap_dot_ad <= norm_ad) return is_in_box ","Check whether point is in rotated boxes Args: points (tensor): (1, L, 2) anchor points boxes (tensor): [B, N, 5] gt_bboxes eps (float): default 1e-9 Returns: is_in_box (tensor): (B, N, L) ",31,136,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_points_in_rotated_boxes(points, boxes): # [B, N, 5] -> [B, N, 4, 2] corners = box2corners(boxes) # [1, L, 2] -> [1, 1, L, 2] points = points.unsqueeze(0) # [B, N, 4, 2] -> [B, N, 1, 2] a, b, c, d = corners.split(4, axis=2) ab = b - a ad = d - a # [B, N, L, 2] ap = points - a # [B, N, L] norm_ab = paddle.sum(ab * ab, axis=-1) # [B, N, L] norm_ad = paddle.sum(ad * ad, axis=-1) # [B, N, L] dot product ap_dot_ab = paddle.sum(ap * ab, axis=-1) # [B, N, L] dot product ap_dot_ad = paddle.sum(ap * ad, axis=-1) # [B, N, L] = |A|*|B|*cos(theta) is_in_box = (ap_dot_ab >= 0) & (ap_dot_ab <= norm_ab) & (ap_dot_ad >= 0) & ( ap_dot_ad <= norm_ad) return is_in_box ``` ###Assistant : Check whether point is in rotated boxes Args: points (tensor): (1, L, 2) anchor points boxes (tensor): [B, N, 5] gt_bboxes eps (float): default 1e-9 Returns: is_in_box (tensor): (B, N, L) " 2296,"def write_ssh_wrapper(module): try: # make sure we have full permission to the module_dir, which # may not be the case if we're sudo'ing to a non-root user if os.access(module.tmpdir, os.W_OK | os.R_OK | os.X_OK): fd, wrapper_path = tempfile.mkstemp(prefix=module.tmpdir + '/') else: raise OSError except (IOError, OSError): fd, wrapper_path = tempfile.mkstemp() # use existing git_ssh/ssh_command, fallback to 'ssh' template = b( % os.environ.get('GIT_SSH', os.environ.get('GIT_SSH_COMMAND', 'ssh'))) # write it with os.fdopen(fd, 'w+b') as fh: fh.write(template) # set execute st = os.stat(wrapper_path) os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC) module.debug('Wrote temp git ssh wrapper (%s): %s' % (wrapper_path, template)) # ensure we cleanup after ourselves module.add_cleanup_file(path=wrapper_path) return wrapper_path "," This writes an shell wrapper for ssh options to be used with git this is only relevant for older versions of gitthat cannot handle the options themselves. Returns path to the script #!/bin/sh %s $GIT_SSH_OPTS ",35,102,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def write_ssh_wrapper(module): try: # make sure we have full permission to the module_dir, which # may not be the case if we're sudo'ing to a non-root user if os.access(module.tmpdir, os.W_OK | os.R_OK | os.X_OK): fd, wrapper_path = tempfile.mkstemp(prefix=module.tmpdir + '/') else: raise OSError except (IOError, OSError): fd, wrapper_path = tempfile.mkstemp() # use existing git_ssh/ssh_command, fallback to 'ssh' template = b( % os.environ.get('GIT_SSH', os.environ.get('GIT_SSH_COMMAND', 'ssh'))) # write it with os.fdopen(fd, 'w+b') as fh: fh.write(template) # set execute st = os.stat(wrapper_path) os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC) module.debug('Wrote temp git ssh wrapper (%s): %s' % (wrapper_path, template)) # ensure we cleanup after ourselves module.add_cleanup_file(path=wrapper_path) return wrapper_path ``` ###Assistant : This writes an shell wrapper for ssh options to be used with git this is only relevant for older versions of gitthat cannot handle the options themselves. Returns path to the script #!/bin/sh %s $GIT_SSH_OPTS " 2297,"def alpha_composite(self, im, dest=(0, 0), source=(0, 0)): if not isinstance(source, (list, tuple)): msg = ""Source must be a tuple"" raise ValueError(msg) if not isinstance(dest, (list, tuple)): msg = ""Destination must be a tuple"" raise ValueError(msg) if not len(source) in (2, 4): msg = ""Source must be a 2 or 4-tuple"" raise ValueError(msg) if not len(dest) == 2: msg = ""Destination must be a 2-tuple"" raise ValueError(msg) if min(source) < 0: msg = ""Source must be non-negative"" raise ValueError(msg) if len(source) == 2: source = source + im.size # over image, crop if it's not the whole thing. if source == (0, 0) + im.size: overlay = im else: overlay = im.crop(source) # target for the paste box = dest + (dest[0] + overlay.width, dest[1] + overlay.height) # destination image. don't copy if we're using the whole image. if box == (0, 0) + self.size: background = self else: background = self.crop(box) result = alpha_composite(background, overlay) self.paste(result, box) ","'In-place' analog of Image.alpha_composite. Composites an image onto this image. :param im: image to composite over this one :param dest: Optional 2 tuple (left, top) specifying the upper left corner in this (destination) image. :param source: Optional 2 (left, top) tuple for the upper left corner in the overlay source image, or 4 tuple (left, top, right, bottom) for the bounds of the source rectangle Performance Note: Not currently implemented in-place in the core layer. ",75,157,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def alpha_composite(self, im, dest=(0, 0), source=(0, 0)): if not isinstance(source, (list, tuple)): msg = ""Source must be a tuple"" raise ValueError(msg) if not isinstance(dest, (list, tuple)): msg = ""Destination must be a tuple"" raise ValueError(msg) if not len(source) in (2, 4): msg = ""Source must be a 2 or 4-tuple"" raise ValueError(msg) if not len(dest) == 2: msg = ""Destination must be a 2-tuple"" raise ValueError(msg) if min(source) < 0: msg = ""Source must be non-negative"" raise ValueError(msg) if len(source) == 2: source = source + im.size # over image, crop if it's not the whole thing. if source == (0, 0) + im.size: overlay = im else: overlay = im.crop(source) # target for the paste box = dest + (dest[0] + overlay.width, dest[1] + overlay.height) # destination image. don't copy if we're using the whole image. if box == (0, 0) + self.size: background = self else: background = self.crop(box) result = alpha_composite(background, overlay) self.paste(result, box) ``` ###Assistant : 'In-place' analog of Image.alpha_composite. Composites an image onto this image. :param im: image to composite over this one :param dest: Optional 2 tuple (left, top) specifying the upper left corner in this (destination) image. :param source: Optional 2 (left, top) tuple for the upper left corner in the overlay source image, or 4 tuple (left, top, right, bottom) for the bounds of the source rectangle Performance Note: Not currently implemented in-place in the core layer. " 2298,"def test_form_field_clean_name_override(self): field = ExtendedFormField.objects.create( page=self.form_page, sort_order=1, label=""quanti ge·là·to?"", field_type=""number"", # only number fields will add the ID as a prefix to the clean_name required=True, ) self.assertEqual(field.clean_name, ""number_field--quanti_gelato"") "," Creating a new field should use the overridden method See ExtendedFormField get_field_clean_name method ",13,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_form_field_clean_name_override(self): field = ExtendedFormField.objects.create( page=self.form_page, sort_order=1, label=""quanti ge·là·to?"", field_type=""number"", # only number fields will add the ID as a prefix to the clean_name required=True, ) self.assertEqual(field.clean_name, ""number_field--quanti_gelato"") ``` ###Assistant : Creating a new field should use the overridden method See ExtendedFormField get_field_clean_name method " 2299,"def polyder(p, m=1): _check_arraylike(""polyder"", p) m = core.concrete_or_error(operator.index, m, ""'m' argument of jnp.polyder"") p, = _promote_dtypes_inexact(p) if m < 0: raise ValueError(""Order of derivative must be positive"") if m == 0: return p coeff = (arange(len(p), m, -1)[np.newaxis, :] - 1 - arange(m)[:, np.newaxis]).prod(0) return p[:-m] * coeff _LEADING_ZEROS_DOC = @_wraps(np.polymul, lax_description=_LEADING_ZEROS_DOC)","\ Setting trim_leading_zeros=True makes the output match that of numpy. But prevents the function from being able to be used in compiled code. ",23,52,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def polyder(p, m=1): _check_arraylike(""polyder"", p) m = core.concrete_or_error(operator.index, m, ""'m' argument of jnp.polyder"") p, = _promote_dtypes_inexact(p) if m < 0: raise ValueError(""Order of derivative must be positive"") if m == 0: return p coeff = (arange(len(p), m, -1)[np.newaxis, :] - 1 - arange(m)[:, np.newaxis]).prod(0) return p[:-m] * coeff _LEADING_ZEROS_DOC = @_wraps(np.polymul, lax_description=_LEADING_ZEROS_DOC) ``` ###Assistant : \ Setting trim_leading_zeros=True makes the output match that of numpy. But prevents the function from being able to be used in compiled code. " 2300,"def probiou_loss(pred, target, eps=1e-3, mode='l1'): gbboxes1 = gbb_form(pred) gbboxes2 = gbb_form(target) x1, y1, a1_, b1_, c1_ = gbboxes1[:, 0], gbboxes1[:, 1], gbboxes1[:, 2], gbboxes1[:, 3], gbboxes1[:, 4] x2, y2, a2_, b2_, c2_ = gbboxes2[:, 0], gbboxes2[:, 1], gbboxes2[:, 2], gbboxes2[:, 3], gbboxes2[:, 4] a1, b1, c1 = rotated_form(a1_, b1_, c1_) a2, b2, c2 = rotated_form(a2_, b2_, c2_) t1 = 0.25 * ((a1 + a2) * (paddle.pow(y1 - y2, 2)) + (b1 + b2) * (paddle.pow(x1 - x2, 2))) + \ 0.5 * ((c1+c2)*(x2-x1)*(y1-y2)) t2 = (a1 + a2) * (b1 + b2) - paddle.pow(c1 + c2, 2) t3_ = (a1 * b1 - c1 * c1) * (a2 * b2 - c2 * c2) t3 = 0.5 * paddle.log(t2 / (4 * paddle.sqrt(F.relu(t3_)) + eps)) B_d = (t1 / t2) + t3 # B_d = t1 + t2 + t3 B_d = paddle.clip(B_d, min=eps, max=100.0) l1 = paddle.sqrt(1.0 - paddle.exp(-B_d) + eps) l_i = paddle.pow(l1, 2.0) l2 = -paddle.log(1.0 - l_i + eps) if mode == 'l1': probiou = l1 if mode == 'l2': probiou = l2 return probiou @serializable @register"," pred -> a matrix [N,5](x,y,w,h,angle - in radians) containing ours predicted box ;in case of HBB angle == 0 target -> a matrix [N,5](x,y,w,h,angle - in radians) containing ours target box ;in case of HBB angle == 0 eps -> threshold to avoid infinite values mode -> ('l1' in [0,1] or 'l2' in [0,inf]) metrics according our paper ",58,181,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def probiou_loss(pred, target, eps=1e-3, mode='l1'): gbboxes1 = gbb_form(pred) gbboxes2 = gbb_form(target) x1, y1, a1_, b1_, c1_ = gbboxes1[:, 0], gbboxes1[:, 1], gbboxes1[:, 2], gbboxes1[:, 3], gbboxes1[:, 4] x2, y2, a2_, b2_, c2_ = gbboxes2[:, 0], gbboxes2[:, 1], gbboxes2[:, 2], gbboxes2[:, 3], gbboxes2[:, 4] a1, b1, c1 = rotated_form(a1_, b1_, c1_) a2, b2, c2 = rotated_form(a2_, b2_, c2_) t1 = 0.25 * ((a1 + a2) * (paddle.pow(y1 - y2, 2)) + (b1 + b2) * (paddle.pow(x1 - x2, 2))) + \ 0.5 * ((c1+c2)*(x2-x1)*(y1-y2)) t2 = (a1 + a2) * (b1 + b2) - paddle.pow(c1 + c2, 2) t3_ = (a1 * b1 - c1 * c1) * (a2 * b2 - c2 * c2) t3 = 0.5 * paddle.log(t2 / (4 * paddle.sqrt(F.relu(t3_)) + eps)) B_d = (t1 / t2) + t3 # B_d = t1 + t2 + t3 B_d = paddle.clip(B_d, min=eps, max=100.0) l1 = paddle.sqrt(1.0 - paddle.exp(-B_d) + eps) l_i = paddle.pow(l1, 2.0) l2 = -paddle.log(1.0 - l_i + eps) if mode == 'l1': probiou = l1 if mode == 'l2': probiou = l2 return probiou @serializable @register ``` ###Assistant : pred -> a matrix [N,5](x,y,w,h,angle - in radians) containing ours predicted box ;in case of HBB angle == 0 target -> a matrix [N,5](x,y,w,h,angle - in radians) containing ours target box ;in case of HBB angle == 0 eps -> threshold to avoid infinite values mode -> ('l1' in [0,1] or 'l2' in [0,inf]) metrics according our paper " 2301,"def mayDisableConsoleWindow(): # TODO: What about MSYS2? return isWin32Windows() or isMacOS() ",":returns: bool derived from platform support of disabling the console,",10,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mayDisableConsoleWindow(): # TODO: What about MSYS2? return isWin32Windows() or isMacOS() ``` ###Assistant : :returns: bool derived from platform support of disabling the console, " 2302,"def compat_cfg(cfg): cfg = copy.deepcopy(cfg) cfg = compat_imgs_per_gpu(cfg) cfg = compat_loader_args(cfg) cfg = compat_runner_args(cfg) return cfg ","This function would modify some filed to keep the compatibility of config. For example, it will move some args which will be deprecated to the correct fields. ",27,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def compat_cfg(cfg): cfg = copy.deepcopy(cfg) cfg = compat_imgs_per_gpu(cfg) cfg = compat_loader_args(cfg) cfg = compat_runner_args(cfg) return cfg ``` ###Assistant : This function would modify some filed to keep the compatibility of config. For example, it will move some args which will be deprecated to the correct fields. " 2303,"def getsourcelines(object): object = unwrap(object) lines, lnum = findsource(object) if istraceback(object): object = object.tb_frame # for module or frame that corresponds to module, return all source lines if (ismodule(object) or (isframe(object) and object.f_code.co_name == """")): return lines, 0 else: return getblock(lines[lnum:]), lnum + 1 ","Return a list of source lines and starting line number for an object. The argument may be a module, class, method, function, traceback, frame, or code object. The source code is returned as a list of the lines corresponding to the object and the line number indicates where in the original source file the first line of code was found. An OSError is raised if the source code cannot be retrieved.",71,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def getsourcelines(object): object = unwrap(object) lines, lnum = findsource(object) if istraceback(object): object = object.tb_frame # for module or frame that corresponds to module, return all source lines if (ismodule(object) or (isframe(object) and object.f_code.co_name == """")): return lines, 0 else: return getblock(lines[lnum:]), lnum + 1 ``` ###Assistant : Return a list of source lines and starting line number for an object. The argument may be a module, class, method, function, traceback, frame, or code object. The source code is returned as a list of the lines corresponding to the object and the line number indicates where in the original source file the first line of code was found. An OSError is raised if the source code cannot be retrieved. " 2304,"def get_del_batches(self, objs, fields): field_names = [field.name for field in fields] conn_batch_size = max( connections[self.using].ops.bulk_batch_size(field_names, objs), 1 ) if len(objs) > conn_batch_size: return [ objs[i : i + conn_batch_size] for i in range(0, len(objs), conn_batch_size) ] else: return [objs] "," Return the objs in suitably sized batches for the used connection. ",11,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_del_batches(self, objs, fields): field_names = [field.name for field in fields] conn_batch_size = max( connections[self.using].ops.bulk_batch_size(field_names, objs), 1 ) if len(objs) > conn_batch_size: return [ objs[i : i + conn_batch_size] for i in range(0, len(objs), conn_batch_size) ] else: return [objs] ``` ###Assistant : Return the objs in suitably sized batches for the used connection. " 2305,"def save(self, global_step): save_path = osp.join(self.directory, f""{global_step:09d}.ckpt"") self.checkpoint.save(save_path) self.latest_checkpoint = save_path self.queue.put(True) ","Create a new checkpoint. Args: global_step (int): The iteration number which will be used to name the checkpoint. ",18,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save(self, global_step): save_path = osp.join(self.directory, f""{global_step:09d}.ckpt"") self.checkpoint.save(save_path) self.latest_checkpoint = save_path self.queue.put(True) ``` ###Assistant : Create a new checkpoint. Args: global_step (int): The iteration number which will be used to name the checkpoint. " 2306,"def versions_from_file(filename): try: with open(filename) as f: contents = f.read() except OSError: raise NotThisMethod(""unable to read _version.py"") mo = re.search(r""version_json = # END VERSION_JSON"", contents, re.M | re.S) if not mo: mo = re.search(r""version_json = # END VERSION_JSON"", contents, re.M | re.S) if not mo: raise NotThisMethod(""no version_json in _version.py"") return json.loads(mo.group(1)) ",Try to determine the version from _version.py if present.\n(.*)\r\n(.*),9,52,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def versions_from_file(filename): try: with open(filename) as f: contents = f.read() except OSError: raise NotThisMethod(""unable to read _version.py"") mo = re.search(r""version_json = # END VERSION_JSON"", contents, re.M | re.S) if not mo: mo = re.search(r""version_json = # END VERSION_JSON"", contents, re.M | re.S) if not mo: raise NotThisMethod(""no version_json in _version.py"") return json.loads(mo.group(1)) ``` ###Assistant : Try to determine the version from _version.py if present.\n(.*)\r\n(.*) " 2307,"def get_total_allocated_amount(payment_entry): return frappe.db.sql( , (payment_entry.payment_document, payment_entry.payment_entry), as_dict=True, ) "," SELECT SUM(btp.allocated_amount) as allocated_amount, bt.name FROM `tabBank Transaction Payments` as btp LEFT JOIN `tabBank Transaction` bt ON bt.name=btp.parent WHERE btp.payment_document = %s AND btp.payment_entry = %s AND bt.docstatus = 1",30,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_total_allocated_amount(payment_entry): return frappe.db.sql( , (payment_entry.payment_document, payment_entry.payment_entry), as_dict=True, ) ``` ###Assistant : SELECT SUM(btp.allocated_amount) as allocated_amount, bt.name FROM `tabBank Transaction Payments` as btp LEFT JOIN `tabBank Transaction` bt ON bt.name=btp.parent WHERE btp.payment_document = %s AND btp.payment_entry = %s AND bt.docstatus = 1 " 2308,"def min_weight_matching(G, maxcardinality=False, weight=""weight""): if len(G.edges) == 0: return max_weight_matching(G, maxcardinality, weight) G_edges = G.edges(data=weight, default=1) min_weight = min(w for _, _, w in G_edges) InvG = nx.Graph() edges = ((u, v, 1 / (1 + w - min_weight)) for u, v, w in G_edges) InvG.add_weighted_edges_from(edges, weight=weight) return max_weight_matching(InvG, maxcardinality, weight) @not_implemented_for(""multigraph"") @not_implemented_for(""directed"")","Computing a minimum-weight maximal matching of G. Use reciprocal edge weights with the maximum-weight algorithm. A matching is a subset of edges in which no node occurs more than once. The weight of a matching is the sum of the weights of its edges. A maximal matching cannot add more edges and still be a matching. The cardinality of a matching is the number of matched edges. This method replaces the weights with their reciprocal and then runs :func:`max_weight_matching`. Read the documentation of max_weight_matching for more information. Parameters ---------- G : NetworkX graph Undirected graph maxcardinality: bool, optional (default=False) If maxcardinality is True, compute the maximum-cardinality matching with minimum weight among all maximum-cardinality matchings. weight: string, optional (default='weight') Edge data key corresponding to the edge weight. If key not found, uses 1 as weight. Returns ------- matching : set A minimal weight matching of the graph. ",146,53,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def min_weight_matching(G, maxcardinality=False, weight=""weight""): if len(G.edges) == 0: return max_weight_matching(G, maxcardinality, weight) G_edges = G.edges(data=weight, default=1) min_weight = min(w for _, _, w in G_edges) InvG = nx.Graph() edges = ((u, v, 1 / (1 + w - min_weight)) for u, v, w in G_edges) InvG.add_weighted_edges_from(edges, weight=weight) return max_weight_matching(InvG, maxcardinality, weight) @not_implemented_for(""multigraph"") @not_implemented_for(""directed"") ``` ###Assistant : Computing a minimum-weight maximal matching of G. Use reciprocal edge weights with the maximum-weight algorithm. A matching is a subset of edges in which no node occurs more than once. The weight of a matching is the sum of the weights of its edges. A maximal matching cannot add more edges and still be a matching. The cardinality of a matching is the number of matched edges. This method replaces the weights with their reciprocal and then runs :func:`max_weight_matching`. Read the documentation of max_weight_matching for more information. Parameters ---------- G : NetworkX graph Undirected graph maxcardinality: bool, optional (default=False) If maxcardinality is True, compute the maximum-cardinality matching with minimum weight among all maximum-cardinality matchings. weight: string, optional (default='weight') Edge data key corresponding to the edge weight. If key not found, uses 1 as weight. Returns ------- matching : set A minimal weight matching of the graph. " 2309,"def site_config_dir(self) -> str: return self._append_app_name_and_version(""/Library/Preferences"") ",":return: config directory shared by the users, e.g. ``/Library/Preferences/$appname``",9,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def site_config_dir(self) -> str: return self._append_app_name_and_version(""/Library/Preferences"") ``` ###Assistant : :return: config directory shared by the users, e.g. ``/Library/Preferences/$appname`` " 2310,"def _cmp_op(self, other, op_name): lhs_dtype_class = self._get_dtype_cmp_class(self._dtype) rhs_dtype_class = self._get_dtype_cmp_class(other._dtype) res_dtype = get_dtype(bool) # In HDK comparison with NULL always results in NULL, # but in pandas it is True for 'ne' comparison and False # for others. # Also pandas allows 'eq' and 'ne' comparison for values # of incompatible types which doesn't work in HDK. if lhs_dtype_class != rhs_dtype_class: if op_name == ""eq"" or op_name == ""ne"": return LiteralExpr(op_name == ""ne"") else: raise TypeError( f""Invalid comparison between {self._dtype} and {other._dtype}"" ) else: cmp = OpExpr(self.binary_operations[op_name], [self, other], res_dtype) return build_if_then_else( self.is_null(), LiteralExpr(op_name == ""ne""), cmp, res_dtype ) "," Build a comparison expression. Parameters ---------- other : BaseExpr A value to compare with. op_name : str The comparison operation name. Returns ------- BaseExpr The resulting comparison expression. ",28,99,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _cmp_op(self, other, op_name): lhs_dtype_class = self._get_dtype_cmp_class(self._dtype) rhs_dtype_class = self._get_dtype_cmp_class(other._dtype) res_dtype = get_dtype(bool) # In HDK comparison with NULL always results in NULL, # but in pandas it is True for 'ne' comparison and False # for others. # Also pandas allows 'eq' and 'ne' comparison for values # of incompatible types which doesn't work in HDK. if lhs_dtype_class != rhs_dtype_class: if op_name == ""eq"" or op_name == ""ne"": return LiteralExpr(op_name == ""ne"") else: raise TypeError( f""Invalid comparison between {self._dtype} and {other._dtype}"" ) else: cmp = OpExpr(self.binary_operations[op_name], [self, other], res_dtype) return build_if_then_else( self.is_null(), LiteralExpr(op_name == ""ne""), cmp, res_dtype ) ``` ###Assistant : Build a comparison expression. Parameters ---------- other : BaseExpr A value to compare with. op_name : str The comparison operation name. Returns ------- BaseExpr The resulting comparison expression. " 2311,"def set_active(self, index): if index not in range(len(self.labels)): raise ValueError(f'Invalid CheckButton index: {index}') if colors.same_color( self._crosses.get_facecolor()[index], colors.to_rgba(""none"") ): self._crosses.get_facecolor()[index] = colors.to_rgba(""k"") else: self._crosses.get_facecolor()[index] = colors.to_rgba(""none"") if hasattr(self, ""_rectangles""): for i, p in enumerate(self._rectangles): p.set_facecolor(""k"" if colors.same_color( p.get_facecolor(), colors.to_rgba(""none"")) else ""none"") if self.drawon: self.ax.figure.canvas.draw() if self.eventson: self._observers.process('clicked', self.labels[index].get_text()) "," Toggle (activate or deactivate) a check button by index. Callbacks will be triggered if :attr:`eventson` is True. Parameters ---------- index : int Index of the check button to toggle. Raises ------ ValueError If *index* is invalid. ",36,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_active(self, index): if index not in range(len(self.labels)): raise ValueError(f'Invalid CheckButton index: {index}') if colors.same_color( self._crosses.get_facecolor()[index], colors.to_rgba(""none"") ): self._crosses.get_facecolor()[index] = colors.to_rgba(""k"") else: self._crosses.get_facecolor()[index] = colors.to_rgba(""none"") if hasattr(self, ""_rectangles""): for i, p in enumerate(self._rectangles): p.set_facecolor(""k"" if colors.same_color( p.get_facecolor(), colors.to_rgba(""none"")) else ""none"") if self.drawon: self.ax.figure.canvas.draw() if self.eventson: self._observers.process('clicked', self.labels[index].get_text()) ``` ###Assistant : Toggle (activate or deactivate) a check button by index. Callbacks will be triggered if :attr:`eventson` is True. Parameters ---------- index : int Index of the check button to toggle. Raises ------ ValueError If *index* is invalid. " 2312,"async def async_update(self) -> None: await self.ebox_data.async_update() if self.entity_description.key in self.ebox_data.data: self._attr_native_value = round( self.ebox_data.data[self.entity_description.key], 2 ) ",Get the latest data from EBox and update the state.,10,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_update(self) -> None: await self.ebox_data.async_update() if self.entity_description.key in self.ebox_data.data: self._attr_native_value = round( self.ebox_data.data[self.entity_description.key], 2 ) ``` ###Assistant : Get the latest data from EBox and update the state. " 2313,"def panoptic_evaluate(self, dataset, results, topk=20): # image to annotations gt_json = dataset.coco.img_ann_map result_files, tmp_dir = dataset.format_results(results) pred_json = mmcv.load(result_files['panoptic'])['annotations'] pred_folder = osp.join(tmp_dir.name, 'panoptic') gt_folder = dataset.seg_prefix pqs = {} prog_bar = mmcv.ProgressBar(len(results)) for i in range(len(results)): data_info = dataset.prepare_train_img(i) image_id = data_info['img_info']['id'] gt_ann = { 'image_id': image_id, 'segments_info': gt_json[image_id], 'file_name': data_info['img_info']['segm_file'] } pred_ann = pred_json[i] pq_stat = pq_compute_single_core( i, [(gt_ann, pred_ann)], gt_folder, pred_folder, dataset.categories, dataset.file_client, print_log=False) pq_results, classwise_results = pq_stat.pq_average( dataset.categories, isthing=None) pqs[i] = pq_results['pq'] prog_bar.update() if tmp_dir is not None: tmp_dir.cleanup() # descending select topk image pqs = list(sorted(pqs.items(), key=lambda kv: kv[1])) good_pqs = pqs[-topk:] bad_pqs = pqs[:topk] return good_pqs, bad_pqs ","Evaluation for panoptic segmentation. Args: dataset (Dataset): A PyTorch dataset. results (list): Panoptic segmentation results from test results pkl file. topk (int): Number of the highest topk and lowest topk after evaluation index sorting. Default: 20. Returns: tuple: A tuple contains good samples and bad samples. good_pqs (dict[int, float]): A dict contains good samples's indices in dataset and model's performance on them. bad_pqs (dict[int, float]): A dict contains bad samples's indices in dataset and model's performance on them. ",78,102,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def panoptic_evaluate(self, dataset, results, topk=20): # image to annotations gt_json = dataset.coco.img_ann_map result_files, tmp_dir = dataset.format_results(results) pred_json = mmcv.load(result_files['panoptic'])['annotations'] pred_folder = osp.join(tmp_dir.name, 'panoptic') gt_folder = dataset.seg_prefix pqs = {} prog_bar = mmcv.ProgressBar(len(results)) for i in range(len(results)): data_info = dataset.prepare_train_img(i) image_id = data_info['img_info']['id'] gt_ann = { 'image_id': image_id, 'segments_info': gt_json[image_id], 'file_name': data_info['img_info']['segm_file'] } pred_ann = pred_json[i] pq_stat = pq_compute_single_core( i, [(gt_ann, pred_ann)], gt_folder, pred_folder, dataset.categories, dataset.file_client, print_log=False) pq_results, classwise_results = pq_stat.pq_average( dataset.categories, isthing=None) pqs[i] = pq_results['pq'] prog_bar.update() if tmp_dir is not None: tmp_dir.cleanup() # descending select topk image pqs = list(sorted(pqs.items(), key=lambda kv: kv[1])) good_pqs = pqs[-topk:] bad_pqs = pqs[:topk] return good_pqs, bad_pqs ``` ###Assistant : Evaluation for panoptic segmentation. Args: dataset (Dataset): A PyTorch dataset. results (list): Panoptic segmentation results from test results pkl file. topk (int): Number of the highest topk and lowest topk after evaluation index sorting. Default: 20. Returns: tuple: A tuple contains good samples and bad samples. good_pqs (dict[int, float]): A dict contains good samples's indices in dataset and model's performance on them. bad_pqs (dict[int, float]): A dict contains bad samples's indices in dataset and model's performance on them. " 2314,"def _compute_mi_cd(c, d, n_neighbors): n_samples = c.shape[0] c = c.reshape((-1, 1)) radius = np.empty(n_samples) label_counts = np.empty(n_samples) k_all = np.empty(n_samples) nn = NearestNeighbors() for label in np.unique(d): mask = d == label count = np.sum(mask) if count > 1: k = min(n_neighbors, count - 1) nn.set_params(n_neighbors=k) nn.fit(c[mask]) r = nn.kneighbors()[0] radius[mask] = np.nextafter(r[:, -1], 0) k_all[mask] = k label_counts[mask] = count # Ignore points with unique labels. mask = label_counts > 1 n_samples = np.sum(mask) label_counts = label_counts[mask] k_all = k_all[mask] c = c[mask] radius = radius[mask] kd = KDTree(c) m_all = kd.query_radius(c, radius, count_only=True, return_distance=False) m_all = np.array(m_all) mi = ( digamma(n_samples) + np.mean(digamma(k_all)) - np.mean(digamma(label_counts)) - np.mean(digamma(m_all)) ) return max(0, mi) ","Compute mutual information between continuous and discrete variables. Parameters ---------- c : ndarray, shape (n_samples,) Samples of a continuous random variable. d : ndarray, shape (n_samples,) Samples of a discrete random variable. n_neighbors : int Number of nearest neighbors to search for each point, see [1]_. Returns ------- mi : float Estimated mutual information. If it turned out to be negative it is replace by 0. Notes ----- True mutual information can't be negative. If its estimate by a numerical method is negative, it means (providing the method is adequate) that the mutual information is close to 0 and replacing it by 0 is a reasonable strategy. References ---------- .. [1] B. C. Ross ""Mutual Information between Discrete and Continuous Data Sets"". PLoS ONE 9(2), 2014. ",126,113,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _compute_mi_cd(c, d, n_neighbors): n_samples = c.shape[0] c = c.reshape((-1, 1)) radius = np.empty(n_samples) label_counts = np.empty(n_samples) k_all = np.empty(n_samples) nn = NearestNeighbors() for label in np.unique(d): mask = d == label count = np.sum(mask) if count > 1: k = min(n_neighbors, count - 1) nn.set_params(n_neighbors=k) nn.fit(c[mask]) r = nn.kneighbors()[0] radius[mask] = np.nextafter(r[:, -1], 0) k_all[mask] = k label_counts[mask] = count # Ignore points with unique labels. mask = label_counts > 1 n_samples = np.sum(mask) label_counts = label_counts[mask] k_all = k_all[mask] c = c[mask] radius = radius[mask] kd = KDTree(c) m_all = kd.query_radius(c, radius, count_only=True, return_distance=False) m_all = np.array(m_all) mi = ( digamma(n_samples) + np.mean(digamma(k_all)) - np.mean(digamma(label_counts)) - np.mean(digamma(m_all)) ) return max(0, mi) ``` ###Assistant : Compute mutual information between continuous and discrete variables. Parameters ---------- c : ndarray, shape (n_samples,) Samples of a continuous random variable. d : ndarray, shape (n_samples,) Samples of a discrete random variable. n_neighbors : int Number of nearest neighbors to search for each point, see [1]_. Returns ------- mi : float Estimated mutual information. If it turned out to be negative it is replace by 0. Notes ----- True mutual information can't be negative. If its estimate by a numerical method is negative, it means (providing the method is adequate) that the mutual information is close to 0 and replacing it by 0 is a reasonable strategy. References ---------- .. [1] B. C. Ross ""Mutual Information between Discrete and Continuous Data Sets"". PLoS ONE 9(2), 2014. " 2315,"def handle_app_config(self, app_config, **options): raise NotImplementedError( ""Subclasses of AppCommand must provide a handle_app_config() method."" ) "," Perform the command's actions for app_config, an AppConfig instance corresponding to an application label given on the command line. ",19,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def handle_app_config(self, app_config, **options): raise NotImplementedError( ""Subclasses of AppCommand must provide a handle_app_config() method."" ) ``` ###Assistant : Perform the command's actions for app_config, an AppConfig instance corresponding to an application label given on the command line. " 2316,"def test_inferred_max_features_integer(max_features): clf = RandomForestClassifier(n_estimators=5, random_state=0) transformer = SelectFromModel( estimator=clf, max_features=max_features, threshold=-np.inf ) X_trans = transformer.fit_transform(data, y) assert transformer.max_features_ == max_features assert X_trans.shape[1] == transformer.max_features_ @pytest.mark.parametrize( ""max_features"", [lambda X: 1, lambda X: X.shape[1], lambda X: min(X.shape[1], 10000)], )",Check max_features_ and output shape for integer max_features.,8,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_inferred_max_features_integer(max_features): clf = RandomForestClassifier(n_estimators=5, random_state=0) transformer = SelectFromModel( estimator=clf, max_features=max_features, threshold=-np.inf ) X_trans = transformer.fit_transform(data, y) assert transformer.max_features_ == max_features assert X_trans.shape[1] == transformer.max_features_ @pytest.mark.parametrize( ""max_features"", [lambda X: 1, lambda X: X.shape[1], lambda X: min(X.shape[1], 10000)], ) ``` ###Assistant : Check max_features_ and output shape for integer max_features. " 2317,"def test_naive_all_pairs_lowest_common_ancestor3(self): all_pairs = product(self.DG.nodes(), self.DG.nodes()) ans = naive_all_pairs_lca(self.DG, pairs=all_pairs) self.assert_lca_dicts_same(dict(ans), self.gold) ",Produces the correct results when all pairs given as a generator.,11,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_naive_all_pairs_lowest_common_ancestor3(self): all_pairs = product(self.DG.nodes(), self.DG.nodes()) ans = naive_all_pairs_lca(self.DG, pairs=all_pairs) self.assert_lca_dicts_same(dict(ans), self.gold) ``` ###Assistant : Produces the correct results when all pairs given as a generator. " 2318,"def _resolve_dependency(dependency): if dependency[0] != ""__setting__"": return dependency, False resolved_app_label, resolved_object_name = getattr( settings, dependency[1] ).split(""."") return (resolved_app_label, resolved_object_name.lower()) + dependency[2:], True "," Return the resolved dependency and a boolean denoting whether or not it was swappable. ",14,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _resolve_dependency(dependency): if dependency[0] != ""__setting__"": return dependency, False resolved_app_label, resolved_object_name = getattr( settings, dependency[1] ).split(""."") return (resolved_app_label, resolved_object_name.lower()) + dependency[2:], True ``` ###Assistant : Return the resolved dependency and a boolean denoting whether or not it was swappable. " 2319,"def lowest_common_ancestor(G, node1, node2, default=None): ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)])) if ans: assert len(ans) == 1 return ans[0][1] else: return default @not_implemented_for(""undirected"") @not_implemented_for(""multigraph"")","Compute the lowest common ancestor of the given pair of nodes. Parameters ---------- G : NetworkX directed graph node1, node2 : nodes in the graph. default : object Returned if no common ancestor between `node1` and `node2` Returns ------- The lowest common ancestor of node1 and node2, or default if they have no common ancestors. Examples -------- >>> G = nx.DiGraph([(0, 1), (0, 2), (2, 3), (2, 4), (1, 6), (4, 5)]) >>> nx.lowest_common_ancestor(G, 3, 5) 2 We can also set `default` argument as below. The value of default is returned if there are no common ancestors of given two nodes. >>> G = nx.DiGraph([(4, 5), (12, 13)]) >>> nx.lowest_common_ancestor(G, 12, 5, default=""No common ancestors!"") 'No common ancestors!' Notes ----- Only defined on non-null directed acyclic graphs. Takes n log(n) time in the size of the graph. See `all_pairs_lowest_common_ancestor` when you have more than one pair of nodes of interest. See Also -------- tree_all_pairs_lowest_common_ancestor all_pairs_lowest_common_ancestor ",155,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def lowest_common_ancestor(G, node1, node2, default=None): ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)])) if ans: assert len(ans) == 1 return ans[0][1] else: return default @not_implemented_for(""undirected"") @not_implemented_for(""multigraph"") ``` ###Assistant : Compute the lowest common ancestor of the given pair of nodes. Parameters ---------- G : NetworkX directed graph node1, node2 : nodes in the graph. default : object Returned if no common ancestor between `node1` and `node2` Returns ------- The lowest common ancestor of node1 and node2, or default if they have no common ancestors. Examples -------- >>> G = nx.DiGraph([(0, 1), (0, 2), (2, 3), (2, 4), (1, 6), (4, 5)]) >>> nx.lowest_common_ancestor(G, 3, 5) 2 We can also set `default` argument as below. The value of default is returned if there are no common ancestors of given two nodes. >>> G = nx.DiGraph([(4, 5), (12, 13)]) >>> nx.lowest_common_ancestor(G, 12, 5, default=""No common ancestors!"") 'No common ancestors!' Notes ----- Only defined on non-null directed acyclic graphs. Takes n log(n) time in the size of the graph. See `all_pairs_lowest_common_ancestor` when you have more than one pair of nodes of interest. See Also -------- tree_all_pairs_lowest_common_ancestor all_pairs_lowest_common_ancestor " 2320,"def get_data(filters=None): data = [] conditions = get_filter_conditions(filters) fee_details = frappe.db.sql( % (conditions), as_dict=1, ) for entry in fee_details: data.append( { ""program"": entry.program, ""fees_collected"": entry.paid_amount, ""outstanding_amount"": entry.outstanding_amount, ""grand_total"": entry.grand_total, } ) return data "," SELECT FeesCollected.program, FeesCollected.paid_amount, FeesCollected.outstanding_amount, FeesCollected.grand_total FROM ( SELECT sum(grand_total) - sum(outstanding_amount) AS paid_amount, program, sum(outstanding_amount) AS outstanding_amount, sum(grand_total) AS grand_total FROM `tabFees` WHERE docstatus = 1 and program IS NOT NULL %s GROUP BY program ) AS FeesCollected ORDER BY FeesCollected.paid_amount DESC ",42,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_data(filters=None): data = [] conditions = get_filter_conditions(filters) fee_details = frappe.db.sql( % (conditions), as_dict=1, ) for entry in fee_details: data.append( { ""program"": entry.program, ""fees_collected"": entry.paid_amount, ""outstanding_amount"": entry.outstanding_amount, ""grand_total"": entry.grand_total, } ) return data ``` ###Assistant : SELECT FeesCollected.program, FeesCollected.paid_amount, FeesCollected.outstanding_amount, FeesCollected.grand_total FROM ( SELECT sum(grand_total) - sum(outstanding_amount) AS paid_amount, program, sum(outstanding_amount) AS outstanding_amount, sum(grand_total) AS grand_total FROM `tabFees` WHERE docstatus = 1 and program IS NOT NULL %s GROUP BY program ) AS FeesCollected ORDER BY FeesCollected.paid_amount DESC " 2321,"def get_dependencies_from_json(ireq): if ireq.editable or not is_pinned_requirement(ireq): return # It is technically possible to parse extras out of the JSON API's # requirement format, but it is such a chore let's just use the simple API. if ireq.extras: return session = requests.session() atexit.register(session.close) version = str(ireq.req.specifier).lstrip(""="") ","Retrieves dependencies for the given install requirement from the json api. :param ireq: A single InstallRequirement :type ireq: :class:`~pipenv.patched.pip._internal.req.req_install.InstallRequirement` :return: A set of dependency lines for generating new InstallRequirements. :rtype: set(str) or None ",33,46,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_dependencies_from_json(ireq): if ireq.editable or not is_pinned_requirement(ireq): return # It is technically possible to parse extras out of the JSON API's # requirement format, but it is such a chore let's just use the simple API. if ireq.extras: return session = requests.session() atexit.register(session.close) version = str(ireq.req.specifier).lstrip(""="") ``` ###Assistant : Retrieves dependencies for the given install requirement from the json api. :param ireq: A single InstallRequirement :type ireq: :class:`~pipenv.patched.pip._internal.req.req_install.InstallRequirement` :return: A set of dependency lines for generating new InstallRequirements. :rtype: set(str) or None " 2322,"def assign(self, **kwargs) -> DataFrame: r data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data "," Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 ",268,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def assign(self, **kwargs) -> DataFrame: r data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data ``` ###Assistant : Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 " 2323,"def resize_feats(self, feats): out = [] for i in range(len(feats)): if i == 0: out.append( F.interpolate( feats[0], size=feats[i + 1].shape[-2:], mode='bilinear', align_corners=False)) elif i == len(feats) - 1: out.append( F.interpolate( feats[i], size=feats[i - 1].shape[-2:], mode='bilinear', align_corners=False)) else: out.append(feats[i]) return out ",Downsample the first feat and upsample last feat in feats.,10,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def resize_feats(self, feats): out = [] for i in range(len(feats)): if i == 0: out.append( F.interpolate( feats[0], size=feats[i + 1].shape[-2:], mode='bilinear', align_corners=False)) elif i == len(feats) - 1: out.append( F.interpolate( feats[i], size=feats[i - 1].shape[-2:], mode='bilinear', align_corners=False)) else: out.append(feats[i]) return out ``` ###Assistant : Downsample the first feat and upsample last feat in feats. " 2324,"def get_bin_list(filters): conditions = [] if filters.item_code: conditions.append(""item_code = '%s' "" % filters.item_code) if filters.warehouse: warehouse_details = frappe.db.get_value( ""Warehouse"", filters.warehouse, [""lft"", ""rgt""], as_dict=1 ) if warehouse_details: conditions.append( "" exists (select name from `tabWarehouse` wh \ where wh.lft >= %s and wh.rgt <= %s and bin.warehouse = wh.name)"" % (warehouse_details.lft, warehouse_details.rgt) ) bin_list = frappe.db.sql( .format( conditions="" where "" + "" and "".join(conditions) if conditions else """" ), as_dict=1, ) return bin_list ","select item_code, warehouse, actual_qty, planned_qty, indented_qty, ordered_qty, reserved_qty, reserved_qty_for_production, reserved_qty_for_sub_contract, projected_qty from tabBin bin {conditions} order by item_code, warehouse ",19,71,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_bin_list(filters): conditions = [] if filters.item_code: conditions.append(""item_code = '%s' "" % filters.item_code) if filters.warehouse: warehouse_details = frappe.db.get_value( ""Warehouse"", filters.warehouse, [""lft"", ""rgt""], as_dict=1 ) if warehouse_details: conditions.append( "" exists (select name from `tabWarehouse` wh \ where wh.lft >= %s and wh.rgt <= %s and bin.warehouse = wh.name)"" % (warehouse_details.lft, warehouse_details.rgt) ) bin_list = frappe.db.sql( .format( conditions="" where "" + "" and "".join(conditions) if conditions else """" ), as_dict=1, ) return bin_list ``` ###Assistant : select item_code, warehouse, actual_qty, planned_qty, indented_qty, ordered_qty, reserved_qty, reserved_qty_for_production, reserved_qty_for_sub_contract, projected_qty from tabBin bin {conditions} order by item_code, warehouse " 2325,"def run_eagerly(self): if ( self.dynamic and self._run_eagerly is False ): # pylint:disable=g-bool-id-comparison # TODO(fchollet): consider using py_func to enable this. raise ValueError( ""Your model contains layers that can only be "" ""successfully run in eager execution (layers "" ""constructed with `dynamic=True`). "" ""You cannot set `run_eagerly=False`."" ) if self._cluster_coordinator and self._run_eagerly: raise ValueError( ""When using `Model` with `ParameterServerStrategy`, "" ""`run_eagerly` is not supported."" ) # Run eagerly logic, by priority: # (1) Dynamic models must be run eagerly. # (2) Explicitly setting run_eagerly causes a Model to be run eagerly. # (3) Not explicitly setting run_eagerly defaults to TF's global setting. return ( self.dynamic or self._run_eagerly or (tf.config.functions_run_eagerly() and self._run_eagerly is None) ) ","Settable attribute indicating whether the model should run eagerly. Running eagerly means that your model will be run step by step, like Python code. Your model might run slower, but it should become easier for you to debug it by stepping into individual layer calls. By default, we will attempt to compile your model to a static graph to deliver the best execution performance. Returns: Boolean, whether the model should run eagerly. ",72,113,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def run_eagerly(self): if ( self.dynamic and self._run_eagerly is False ): # pylint:disable=g-bool-id-comparison # TODO(fchollet): consider using py_func to enable this. raise ValueError( ""Your model contains layers that can only be "" ""successfully run in eager execution (layers "" ""constructed with `dynamic=True`). "" ""You cannot set `run_eagerly=False`."" ) if self._cluster_coordinator and self._run_eagerly: raise ValueError( ""When using `Model` with `ParameterServerStrategy`, "" ""`run_eagerly` is not supported."" ) # Run eagerly logic, by priority: # (1) Dynamic models must be run eagerly. # (2) Explicitly setting run_eagerly causes a Model to be run eagerly. # (3) Not explicitly setting run_eagerly defaults to TF's global setting. return ( self.dynamic or self._run_eagerly or (tf.config.functions_run_eagerly() and self._run_eagerly is None) ) ``` ###Assistant : Settable attribute indicating whether the model should run eagerly. Running eagerly means that your model will be run step by step, like Python code. Your model might run slower, but it should become easier for you to debug it by stepping into individual layer calls. By default, we will attempt to compile your model to a static graph to deliver the best execution performance. Returns: Boolean, whether the model should run eagerly. " 2326,"def get_preview_context(self, request, *args, **kwargs): return {""object"": self, ""request"": request} "," Returns a context dictionary for use in templates for previewing this object. ",12,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_preview_context(self, request, *args, **kwargs): return {""object"": self, ""request"": request} ``` ###Assistant : Returns a context dictionary for use in templates for previewing this object. " 2327,"async def test_default_disabling_entity(hass, create_registrations, webhook_client): webhook_id = create_registrations[1][""webhook_id""] webhook_url = f""/api/webhook/{webhook_id}"" reg_resp = await webhook_client.post( webhook_url, json={ ""type"": ""register_sensor"", ""data"": { ""name"": ""Battery State"", ""type"": ""sensor"", ""unique_id"": ""battery_state"", ""default_disabled"": True, }, }, ) assert reg_resp.status == HTTPStatus.CREATED json = await reg_resp.json() assert json == {""success"": True} await hass.async_block_till_done() entity = hass.states.get(""sensor.test_1_battery_state"") assert entity is None assert ( er.async_get(hass).async_get(""sensor.test_1_battery_state"").disabled_by == er.RegistryEntryDisabler.INTEGRATION ) ",Test that sensors can be disabled by default upon registration.,10,61,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_default_disabling_entity(hass, create_registrations, webhook_client): webhook_id = create_registrations[1][""webhook_id""] webhook_url = f""/api/webhook/{webhook_id}"" reg_resp = await webhook_client.post( webhook_url, json={ ""type"": ""register_sensor"", ""data"": { ""name"": ""Battery State"", ""type"": ""sensor"", ""unique_id"": ""battery_state"", ""default_disabled"": True, }, }, ) assert reg_resp.status == HTTPStatus.CREATED json = await reg_resp.json() assert json == {""success"": True} await hass.async_block_till_done() entity = hass.states.get(""sensor.test_1_battery_state"") assert entity is None assert ( er.async_get(hass).async_get(""sensor.test_1_battery_state"").disabled_by == er.RegistryEntryDisabler.INTEGRATION ) ``` ###Assistant : Test that sensors can be disabled by default upon registration. " 2328,"def _create_dd_meta(cls, dataset_info): # Collect necessary information from dataset_info schema = dataset_info[""schema""] index = dataset_info[""index""] categories = dataset_info[""categories""] partition_obj = dataset_info[""partitions""] partitions = dataset_info[""partition_names""] physical_column_names = dataset_info.get(""physical_schema"", schema).names columns = None # Set index and column names using # pandas metadata (when available) pandas_metadata = _get_pandas_metadata(schema) if pandas_metadata: ( index_names, column_names, storage_name_mapping, column_index_names, ) = _parse_pandas_metadata(pandas_metadata) if categories is None: categories = [] for col in pandas_metadata[""columns""]: if (col[""pandas_type""] == ""categorical"") and ( col[""name""] not in categories ): categories.append(col[""name""]) else: # No pandas metadata implies no index, unless selected by the user index_names = [] column_names = physical_column_names storage_name_mapping = {k: k for k in column_names} column_index_names = [None] if index is None and index_names: # Pandas metadata has provided the index name for us index = index_names # Ensure that there is no overlap between partition columns # and explicit column storage if partitions: _partitions = [p for p in partitions if p not in physical_column_names] if not _partitions: partitions = [] dataset_info[""partitions""] = None dataset_info[""partition_keys""] = {} dataset_info[""partition_names""] = partitions elif len(_partitions) != len(partitions): raise ValueError( ""No partition-columns should be written in the \n"" ""file unless they are ALL written in the file.\n"" ""physical columns: {} | partitions: {}"".format( physical_column_names, partitions ) ) column_names, index_names = _normalize_index_columns( columns, column_names + partitions, index, index_names ) all_columns = index_names + column_names # Check that categories are included in columns if categories and not set(categories).intersection(all_columns): raise ValueError( ""categories not in available columns.\n"" ""categories: {} | columns: {}"".format(categories, list(all_columns)) ) dtypes = _get_pyarrow_dtypes(schema, categories) dtypes = {storage_name_mapping.get(k, k): v for k, v in dtypes.items()} index_cols = index or () meta = _meta_from_dtypes(all_columns, dtypes, index_cols, column_index_names) if categories: # Make sure all categories are set to ""unknown"". # Cannot include index names in the `cols` argument. meta = clear_known_categories( meta, cols=[c for c in categories if c not in meta.index.names] ) if partition_obj: for partition in partition_obj: if isinstance(index, list) and partition.name == index[0]: # Index from directory structure meta.index = pd.CategoricalIndex( [], categories=partition.keys, name=index[0] ) elif partition.name == meta.index.name: # Index created from a categorical column meta.index = pd.CategoricalIndex( [], categories=partition.keys, name=meta.index.name ) elif partition.name in meta.columns: meta[partition.name] = pd.Series( pd.Categorical(categories=partition.keys, values=[]), index=meta.index, ) # Update `dataset_info` and return `meta` dataset_info[""index""] = index dataset_info[""index_cols""] = index_cols dataset_info[""categories""] = categories return meta ","Use parquet schema and hive-partition information (stored in dataset_info) to construct DataFrame metadata. This method is used by both arrow engines. ",21,379,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_dd_meta(cls, dataset_info): # Collect necessary information from dataset_info schema = dataset_info[""schema""] index = dataset_info[""index""] categories = dataset_info[""categories""] partition_obj = dataset_info[""partitions""] partitions = dataset_info[""partition_names""] physical_column_names = dataset_info.get(""physical_schema"", schema).names columns = None # Set index and column names using # pandas metadata (when available) pandas_metadata = _get_pandas_metadata(schema) if pandas_metadata: ( index_names, column_names, storage_name_mapping, column_index_names, ) = _parse_pandas_metadata(pandas_metadata) if categories is None: categories = [] for col in pandas_metadata[""columns""]: if (col[""pandas_type""] == ""categorical"") and ( col[""name""] not in categories ): categories.append(col[""name""]) else: # No pandas metadata implies no index, unless selected by the user index_names = [] column_names = physical_column_names storage_name_mapping = {k: k for k in column_names} column_index_names = [None] if index is None and index_names: # Pandas metadata has provided the index name for us index = index_names # Ensure that there is no overlap between partition columns # and explicit column storage if partitions: _partitions = [p for p in partitions if p not in physical_column_names] if not _partitions: partitions = [] dataset_info[""partitions""] = None dataset_info[""partition_keys""] = {} dataset_info[""partition_names""] = partitions elif len(_partitions) != len(partitions): raise ValueError( ""No partition-columns should be written in the \n"" ""file unless they are ALL written in the file.\n"" ""physical columns: {} | partitions: {}"".format( physical_column_names, partitions ) ) column_names, index_names = _normalize_index_columns( columns, column_names + partitions, index, index_names ) all_columns = index_names + column_names # Check that categories are included in columns if categories and not set(categories).intersection(all_columns): raise ValueError( ""categories not in available columns.\n"" ""categories: {} | columns: {}"".format(categories, list(all_columns)) ) dtypes = _get_pyarrow_dtypes(schema, categories) dtypes = {storage_name_mapping.get(k, k): v for k, v in dtypes.items()} index_cols = index or () meta = _meta_from_dtypes(all_columns, dtypes, index_cols, column_index_names) if categories: # Make sure all categories are set to ""unknown"". # Cannot include index names in the `cols` argument. meta = clear_known_categories( meta, cols=[c for c in categories if c not in meta.index.names] ) if partition_obj: for partition in partition_obj: if isinstance(index, list) and partition.name == index[0]: # Index from directory structure meta.index = pd.CategoricalIndex( [], categories=partition.keys, name=index[0] ) elif partition.name == meta.index.name: # Index created from a categorical column meta.index = pd.CategoricalIndex( [], categories=partition.keys, name=meta.index.name ) elif partition.name in meta.columns: meta[partition.name] = pd.Series( pd.Categorical(categories=partition.keys, values=[]), index=meta.index, ) # Update `dataset_info` and return `meta` dataset_info[""index""] = index dataset_info[""index_cols""] = index_cols dataset_info[""categories""] = categories return meta ``` ###Assistant : Use parquet schema and hive-partition information (stored in dataset_info) to construct DataFrame metadata. This method is used by both arrow engines. " 2329,"def convert_type(self, value, schema_type, stringify_dict=True): if isinstance(value, datetime.datetime): iso_format_value = value.isoformat() if value.tzinfo is None: return iso_format_value return pendulum.parse(iso_format_value).float_timestamp if isinstance(value, datetime.date): return value.isoformat() if isinstance(value, datetime.time): formatted_time = time.strptime(str(value), ""%H:%M:%S"") time_delta = datetime.timedelta( hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec ) return str(time_delta) if stringify_dict and isinstance(value, dict): return json.dumps(value) if isinstance(value, Decimal): return float(value) return value "," Takes a value from Postgres, and converts it to a value that's safe for JSON/Google Cloud Storage/BigQuery. Timezone aware Datetime are converted to UTC seconds. Unaware Datetime, Date and Time are converted to ISO formatted strings. Decimals are converted to floats. :param value: Postgres column value. :param schema_type: BigQuery data type. :param stringify_dict: Specify whether to convert dict to string. ",60,54,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def convert_type(self, value, schema_type, stringify_dict=True): if isinstance(value, datetime.datetime): iso_format_value = value.isoformat() if value.tzinfo is None: return iso_format_value return pendulum.parse(iso_format_value).float_timestamp if isinstance(value, datetime.date): return value.isoformat() if isinstance(value, datetime.time): formatted_time = time.strptime(str(value), ""%H:%M:%S"") time_delta = datetime.timedelta( hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec ) return str(time_delta) if stringify_dict and isinstance(value, dict): return json.dumps(value) if isinstance(value, Decimal): return float(value) return value ``` ###Assistant : Takes a value from Postgres, and converts it to a value that's safe for JSON/Google Cloud Storage/BigQuery. Timezone aware Datetime are converted to UTC seconds. Unaware Datetime, Date and Time are converted to ISO formatted strings. Decimals are converted to floats. :param value: Postgres column value. :param schema_type: BigQuery data type. :param stringify_dict: Specify whether to convert dict to string. " 2330,"def _filter_top_k(x, k): _, top_k_idx = tf.math.top_k(x, k, sorted=False) top_k_mask = tf.reduce_sum( tf.one_hot(top_k_idx, tf.shape(x)[-1], axis=-1), axis=-2 ) return x * top_k_mask + NEG_INF * (1 - top_k_mask) ","Filters top-k values in the last dim of x and set the rest to NEG_INF. Used for computing top-k prediction values in dense labels (which has the same shape as predictions) for recall and precision top-k metrics. Args: x: tensor with any dimensions. k: the number of values to keep. Returns: tensor with same shape and dtype as x. ",59,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _filter_top_k(x, k): _, top_k_idx = tf.math.top_k(x, k, sorted=False) top_k_mask = tf.reduce_sum( tf.one_hot(top_k_idx, tf.shape(x)[-1], axis=-1), axis=-2 ) return x * top_k_mask + NEG_INF * (1 - top_k_mask) ``` ###Assistant : Filters top-k values in the last dim of x and set the rest to NEG_INF. Used for computing top-k prediction values in dense labels (which has the same shape as predictions) for recall and precision top-k metrics. Args: x: tensor with any dimensions. k: the number of values to keep. Returns: tensor with same shape and dtype as x. " 2331,"def test_pick_two_individuals_eligible_for_crossover_bad(): ind1 = creator.Individual.from_string( 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)', tpot_obj._pset ) ind2 = creator.Individual.from_string( 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)', tpot_obj._pset ) ind3 = creator.Individual.from_string( 'GaussianNB(input_matrix)', tpot_obj._pset ) # Ind1 and ind2 are not a pair because they are the same, ind3 shares no primitive pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1, ind2, ind3]) assert pick1 is None and pick2 is None # You can not do crossover with a population of only 1. pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1]) assert pick1 is None and pick2 is None # You can not do crossover with a population of 0. pick1, pick2 = pick_two_individuals_eligible_for_crossover([]) assert pick1 is None and pick2 is None ",Assert that pick_two_individuals_eligible_for_crossover() returns the right output when no pair is eligible,12,102,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_pick_two_individuals_eligible_for_crossover_bad(): ind1 = creator.Individual.from_string( 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)', tpot_obj._pset ) ind2 = creator.Individual.from_string( 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)', tpot_obj._pset ) ind3 = creator.Individual.from_string( 'GaussianNB(input_matrix)', tpot_obj._pset ) # Ind1 and ind2 are not a pair because they are the same, ind3 shares no primitive pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1, ind2, ind3]) assert pick1 is None and pick2 is None # You can not do crossover with a population of only 1. pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1]) assert pick1 is None and pick2 is None # You can not do crossover with a population of 0. pick1, pick2 = pick_two_individuals_eligible_for_crossover([]) assert pick1 is None and pick2 is None ``` ###Assistant : Assert that pick_two_individuals_eligible_for_crossover() returns the right output when no pair is eligible " 2332,"def get_ips(v6=False): # type: (bool) -> Dict[NetworkInterface, List[str]] res = {} for iface in six.itervalues(conf.ifaces): if v6: res[iface] = iface.ips[6] else: res[iface] = iface.ips[4] return res ","Returns all available IPs matching to interfaces, using the windows system. Should only be used as a WinPcapy fallback. :param v6: IPv6 addresses ",23,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_ips(v6=False): # type: (bool) -> Dict[NetworkInterface, List[str]] res = {} for iface in six.itervalues(conf.ifaces): if v6: res[iface] = iface.ips[6] else: res[iface] = iface.ips[4] return res ``` ###Assistant : Returns all available IPs matching to interfaces, using the windows system. Should only be used as a WinPcapy fallback. :param v6: IPv6 addresses " 2333,"def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (OPTDecoder)): module.gradient_checkpointing = value OPT_GENERATION_EXAMPLE = r OPT_INPUTS_DOCSTRING = r "," Generation example: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> model = OPTForCausalLM.from_pretrained(""ArthurZ/opt-350m"") >>> tokenizer = GPT2Tokenizer.from_pretrained(""patrickvonplaten/opt_gpt2_tokenizer"") >>> TEXTS_TO_GENERATE = ""Hey, are you consciours? Can you talk to me?"" ""Hi there, my name is Barack"" >>> inputs = tokenizer([TEXTS_TO_GENERATE], max_length=1024, return_tensors=""pt"") >>> # Generate >>> generate_ids = model.generate(inputs[""input_ids""], num_beams=2, min_length=0, max_length=20) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] 'I'm not conscious.<\s>' ``` Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`GPT2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_inputs`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ",470,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (OPTDecoder)): module.gradient_checkpointing = value OPT_GENERATION_EXAMPLE = r OPT_INPUTS_DOCSTRING = r ``` ###Assistant : Generation example: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> model = OPTForCausalLM.from_pretrained(""ArthurZ/opt-350m"") >>> tokenizer = GPT2Tokenizer.from_pretrained(""patrickvonplaten/opt_gpt2_tokenizer"") >>> TEXTS_TO_GENERATE = ""Hey, are you consciours? Can you talk to me?"" ""Hi there, my name is Barack"" >>> inputs = tokenizer([TEXTS_TO_GENERATE], max_length=1024, return_tensors=""pt"") >>> # Generate >>> generate_ids = model.generate(inputs[""input_ids""], num_beams=2, min_length=0, max_length=20) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] 'I'm not conscious.<\s>' ``` Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`GPT2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_inputs`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. " 2334,"def to(self, device=None, dtype=None) -> None: r # .to() on the tensors handles None correctly self.shadow_params = [ p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device) for p in self.shadow_params ] ","Move internal buffers of the ExponentialMovingAverage to `device`. Args: device: like `device` argument to `torch.Tensor.to` ",15,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def to(self, device=None, dtype=None) -> None: r # .to() on the tensors handles None correctly self.shadow_params = [ p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device) for p in self.shadow_params ] ``` ###Assistant : Move internal buffers of the ExponentialMovingAverage to `device`. Args: device: like `device` argument to `torch.Tensor.to` " 2335,"def get_granger_causality(dependent_series, independent_series, lags): granger_set = pd.concat([dependent_series, independent_series], axis=1) granger = grangercausalitytests(granger_set, [lags], verbose=False) return granger ","Calculate granger tests Parameters ---------- dependent_series: Series The series you want to test Granger Causality for. independent_series: Series The series that you want to test whether it Granger-causes time_series_y lags : int The amount of lags for the Granger test. By default, this is set to 3. ",47,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_granger_causality(dependent_series, independent_series, lags): granger_set = pd.concat([dependent_series, independent_series], axis=1) granger = grangercausalitytests(granger_set, [lags], verbose=False) return granger ``` ###Assistant : Calculate granger tests Parameters ---------- dependent_series: Series The series you want to test Granger Causality for. independent_series: Series The series that you want to test whether it Granger-causes time_series_y lags : int The amount of lags for the Granger test. By default, this is set to 3. " 2336,"def _multi_decorate(decorators, method): if hasattr(decorators, ""__iter__""): # Apply a list/tuple of decorators if 'decorators' is one. Decorator # functions are applied so that the call order is the same as the # order in which they appear in the iterable. decorators = decorators[::-1] else: decorators = [decorators] "," Decorate `method` with one or more function decorators. `decorators` can be a single decorator or an iterable of decorators. ",19,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _multi_decorate(decorators, method): if hasattr(decorators, ""__iter__""): # Apply a list/tuple of decorators if 'decorators' is one. Decorator # functions are applied so that the call order is the same as the # order in which they appear in the iterable. decorators = decorators[::-1] else: decorators = [decorators] ``` ###Assistant : Decorate `method` with one or more function decorators. `decorators` can be a single decorator or an iterable of decorators. " 2337,"def generate_level_targets(self, img_size, text_polys, ignore_polys): h, w = img_size lv_size_divs = self.level_size_divisors lv_proportion_range = self.level_proportion_range lv_text_polys = [[] for i in range(len(lv_size_divs))] lv_ignore_polys = [[] for i in range(len(lv_size_divs))] level_maps = [] for poly in text_polys: # assert len(poly) == 1 # text_instance = [[poly[i], poly[i + 1]] # for i in range(0, len(poly), 2)] polygon = np.array(poly, dtype=np.int).reshape((1, -1, 2)) _, _, box_w, box_h = cv2.boundingRect(polygon) proportion = max(box_h, box_w) / (h + 1e-8) for ind, proportion_range in enumerate(lv_proportion_range): if proportion_range[0] < proportion < proportion_range[1]: lv_text_polys[ind].append(poly / lv_size_divs[ind]) for ignore_poly in ignore_polys: # assert len(ignore_poly) == 1 # text_instance = [[ignore_poly[i], ignore_poly[i + 1]] # for i in range(0, len(ignore_poly), 2)] polygon = np.array(ignore_poly, dtype=np.int).reshape((1, -1, 2)) _, _, box_w, box_h = cv2.boundingRect(polygon) proportion = max(box_h, box_w) / (h + 1e-8) for ind, proportion_range in enumerate(lv_proportion_range): if proportion_range[0] < proportion < proportion_range[1]: lv_ignore_polys[ind].append(ignore_poly / lv_size_divs[ind]) for ind, size_divisor in enumerate(lv_size_divs): current_level_maps = [] level_img_size = (h // size_divisor, w // size_divisor) text_region = self.generate_text_region_mask( level_img_size, lv_text_polys[ind])[None] current_level_maps.append(text_region) center_region = self.generate_center_region_mask( level_img_size, lv_text_polys[ind])[None] current_level_maps.append(center_region) effective_mask = self.generate_effective_mask( level_img_size, lv_ignore_polys[ind])[None] current_level_maps.append(effective_mask) fourier_real_map, fourier_image_maps = self.generate_fourier_maps( level_img_size, lv_text_polys[ind]) current_level_maps.append(fourier_real_map) current_level_maps.append(fourier_image_maps) level_maps.append(np.concatenate(current_level_maps)) return level_maps ","Generate ground truth target on each level. Args: img_size (list[int]): Shape of input image. text_polys (list[list[ndarray]]): A list of ground truth polygons. ignore_polys (list[list[ndarray]]): A list of ignored polygons. Returns: level_maps (list(ndarray)): A list of ground target on each level. ",40,191,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def generate_level_targets(self, img_size, text_polys, ignore_polys): h, w = img_size lv_size_divs = self.level_size_divisors lv_proportion_range = self.level_proportion_range lv_text_polys = [[] for i in range(len(lv_size_divs))] lv_ignore_polys = [[] for i in range(len(lv_size_divs))] level_maps = [] for poly in text_polys: # assert len(poly) == 1 # text_instance = [[poly[i], poly[i + 1]] # for i in range(0, len(poly), 2)] polygon = np.array(poly, dtype=np.int).reshape((1, -1, 2)) _, _, box_w, box_h = cv2.boundingRect(polygon) proportion = max(box_h, box_w) / (h + 1e-8) for ind, proportion_range in enumerate(lv_proportion_range): if proportion_range[0] < proportion < proportion_range[1]: lv_text_polys[ind].append(poly / lv_size_divs[ind]) for ignore_poly in ignore_polys: # assert len(ignore_poly) == 1 # text_instance = [[ignore_poly[i], ignore_poly[i + 1]] # for i in range(0, len(ignore_poly), 2)] polygon = np.array(ignore_poly, dtype=np.int).reshape((1, -1, 2)) _, _, box_w, box_h = cv2.boundingRect(polygon) proportion = max(box_h, box_w) / (h + 1e-8) for ind, proportion_range in enumerate(lv_proportion_range): if proportion_range[0] < proportion < proportion_range[1]: lv_ignore_polys[ind].append(ignore_poly / lv_size_divs[ind]) for ind, size_divisor in enumerate(lv_size_divs): current_level_maps = [] level_img_size = (h // size_divisor, w // size_divisor) text_region = self.generate_text_region_mask( level_img_size, lv_text_polys[ind])[None] current_level_maps.append(text_region) center_region = self.generate_center_region_mask( level_img_size, lv_text_polys[ind])[None] current_level_maps.append(center_region) effective_mask = self.generate_effective_mask( level_img_size, lv_ignore_polys[ind])[None] current_level_maps.append(effective_mask) fourier_real_map, fourier_image_maps = self.generate_fourier_maps( level_img_size, lv_text_polys[ind]) current_level_maps.append(fourier_real_map) current_level_maps.append(fourier_image_maps) level_maps.append(np.concatenate(current_level_maps)) return level_maps ``` ###Assistant : Generate ground truth target on each level. Args: img_size (list[int]): Shape of input image. text_polys (list[list[ndarray]]): A list of ground truth polygons. ignore_polys (list[list[ndarray]]): A list of ignored polygons. Returns: level_maps (list(ndarray)): A list of ground target on each level. " 2338,"def get_reserved_qty(item_code, warehouse): reserved_qty = frappe.db.sql( , (item_code, warehouse, item_code, warehouse), ) return flt(reserved_qty[0][0]) if reserved_qty else 0 "," select sum(dnpi_qty * ((so_item_qty - so_item_delivered_qty) / so_item_qty)) from ( (select qty as dnpi_qty, ( select qty from `tabSales Order Item` where name = dnpi.parent_detail_docname and (delivered_by_supplier is null or delivered_by_supplier = 0) ) as so_item_qty, ( select delivered_qty from `tabSales Order Item` where name = dnpi.parent_detail_docname and delivered_by_supplier = 0 ) as so_item_delivered_qty, parent, name from ( select qty, parent_detail_docname, parent, name from `tabPacked Item` dnpi_in where item_code = %s and warehouse = %s and parenttype=""Sales Order"" and item_code != parent_item and exists (select * from `tabSales Order` so where name = dnpi_in.parent and docstatus = 1 and status != 'Closed') ) dnpi) union (select stock_qty as dnpi_qty, qty as so_item_qty, delivered_qty as so_item_delivered_qty, parent, name from `tabSales Order Item` so_item where item_code = %s and warehouse = %s and (so_item.delivered_by_supplier is null or so_item.delivered_by_supplier = 0) and exists(select * from `tabSales Order` so where so.name = so_item.parent and so.docstatus = 1 and so.status != 'Closed')) ) tab where so_item_qty >= so_item_delivered_qty ",163,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_reserved_qty(item_code, warehouse): reserved_qty = frappe.db.sql( , (item_code, warehouse, item_code, warehouse), ) return flt(reserved_qty[0][0]) if reserved_qty else 0 ``` ###Assistant : select sum(dnpi_qty * ((so_item_qty - so_item_delivered_qty) / so_item_qty)) from ( (select qty as dnpi_qty, ( select qty from `tabSales Order Item` where name = dnpi.parent_detail_docname and (delivered_by_supplier is null or delivered_by_supplier = 0) ) as so_item_qty, ( select delivered_qty from `tabSales Order Item` where name = dnpi.parent_detail_docname and delivered_by_supplier = 0 ) as so_item_delivered_qty, parent, name from ( select qty, parent_detail_docname, parent, name from `tabPacked Item` dnpi_in where item_code = %s and warehouse = %s and parenttype=""Sales Order"" and item_code != parent_item and exists (select * from `tabSales Order` so where name = dnpi_in.parent and docstatus = 1 and status != 'Closed') ) dnpi) union (select stock_qty as dnpi_qty, qty as so_item_qty, delivered_qty as so_item_delivered_qty, parent, name from `tabSales Order Item` so_item where item_code = %s and warehouse = %s and (so_item.delivered_by_supplier is null or so_item.delivered_by_supplier = 0) and exists(select * from `tabSales Order` so where so.name = so_item.parent and so.docstatus = 1 and so.status != 'Closed')) ) tab where so_item_qty >= so_item_delivered_qty " 2339,"def cool(): set_cmap('cool') # Autogenerated by boilerplate.py. Do not edit as changes will be lost."," Set the colormap to 'cool'. This changes the default colormap as well as the colormap of the current image if there is one. See ``help(colormaps)`` for more information. ",28,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cool(): set_cmap('cool') # Autogenerated by boilerplate.py. Do not edit as changes will be lost. ``` ###Assistant : Set the colormap to 'cool'. This changes the default colormap as well as the colormap of the current image if there is one. See ``help(colormaps)`` for more information. " 2340,"def add_parent(self, parent): # type: (Packet) -> None self.parent = parent ","Set packet parent. When packet is an element in PacketListField, parent field would point to the list owner packet.",19,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add_parent(self, parent): # type: (Packet) -> None self.parent = parent ``` ###Assistant : Set packet parent. When packet is an element in PacketListField, parent field would point to the list owner packet. " 2341,"def make_tempfile(name): open(name, 'w', encoding='utf-8').close() try: yield finally: os.unlink(name) "," Create an empty, named, temporary file for the duration of the context. ",12,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def make_tempfile(name): open(name, 'w', encoding='utf-8').close() try: yield finally: os.unlink(name) ``` ###Assistant : Create an empty, named, temporary file for the duration of the context. " 2342,"def update_cached_response(self, request, response): cache_url = self.cache_url(request.url) cached_response = self.serializer.loads(request, self.cache.get(cache_url)) if not cached_response: # we didn't have a cached response return response # Lets update our headers with the headers from the new request: # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1 # # The server isn't supposed to send headers that would make # the cached body invalid. But... just in case, we'll be sure # to strip out ones we know that might be problmatic due to # typical assumptions. excluded_headers = [""content-length""] cached_response.headers.update( dict( (k, v) for k, v in response.headers.items() if k.lower() not in excluded_headers ) ) # we want a 200 b/c we have content via the cache cached_response.status = 200 # update our cache self._cache_set(cache_url, request, cached_response) return cached_response ","On a 304 we will get a new set of headers that we want to update our cached value with, assuming we have one. This should only ever be called when we've sent an ETag and gotten a 304 as the response. ",42,120,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update_cached_response(self, request, response): cache_url = self.cache_url(request.url) cached_response = self.serializer.loads(request, self.cache.get(cache_url)) if not cached_response: # we didn't have a cached response return response # Lets update our headers with the headers from the new request: # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1 # # The server isn't supposed to send headers that would make # the cached body invalid. But... just in case, we'll be sure # to strip out ones we know that might be problmatic due to # typical assumptions. excluded_headers = [""content-length""] cached_response.headers.update( dict( (k, v) for k, v in response.headers.items() if k.lower() not in excluded_headers ) ) # we want a 200 b/c we have content via the cache cached_response.status = 200 # update our cache self._cache_set(cache_url, request, cached_response) return cached_response ``` ###Assistant : On a 304 we will get a new set of headers that we want to update our cached value with, assuming we have one. This should only ever be called when we've sent an ETag and gotten a 304 as the response. " 2343,"def binary_xloss(logits, labels, ignore=None): logits, labels = flatten_binary_scores(logits, labels, ignore) loss = StableBCELoss()(logits, Variable(labels.float())) return loss # --------------------------- MULTICLASS LOSSES --------------------------- "," Binary Cross entropy loss logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) ignore: void class id ",33,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def binary_xloss(logits, labels, ignore=None): logits, labels = flatten_binary_scores(logits, labels, ignore) loss = StableBCELoss()(logits, Variable(labels.float())) return loss # --------------------------- MULTICLASS LOSSES --------------------------- ``` ###Assistant : Binary Cross entropy loss logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) ignore: void class id " 2344,"def generate_navigator_js(os=None, navigator=None, platform=None, device_type=None): config = generate_navigator( os=os, navigator=navigator, platform=platform, device_type=device_type ) return { ""appCodeName"": config[""app_code_name""], ""appName"": config[""app_name""], ""appVersion"": config[""app_version""], ""platform"": config[""platform""], ""userAgent"": config[""user_agent""], ""oscpu"": config[""oscpu""], ""product"": config[""product""], ""productSub"": config[""product_sub""], ""vendor"": config[""vendor""], ""vendorSub"": config[""vendor_sub""], ""buildID"": config[""build_id""], } "," Generates web navigator's config with keys corresponding to keys of `windows.navigator` JavaScript object. :param os: limit list of oses for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: ""desktop"", ""smartphone"", ""tablet"", ""all"" :return: User-Agent config :rtype: dict with keys (TODO) :raises InvalidOption: if could not generate user-agent for any combination of allowed oses and navigators :raise InvalidOption: if any of passed options is invalid ",95,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def generate_navigator_js(os=None, navigator=None, platform=None, device_type=None): config = generate_navigator( os=os, navigator=navigator, platform=platform, device_type=device_type ) return { ""appCodeName"": config[""app_code_name""], ""appName"": config[""app_name""], ""appVersion"": config[""app_version""], ""platform"": config[""platform""], ""userAgent"": config[""user_agent""], ""oscpu"": config[""oscpu""], ""product"": config[""product""], ""productSub"": config[""product_sub""], ""vendor"": config[""vendor""], ""vendorSub"": config[""vendor_sub""], ""buildID"": config[""build_id""], } ``` ###Assistant : Generates web navigator's config with keys corresponding to keys of `windows.navigator` JavaScript object. :param os: limit list of oses for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: ""desktop"", ""smartphone"", ""tablet"", ""all"" :return: User-Agent config :rtype: dict with keys (TODO) :raises InvalidOption: if could not generate user-agent for any combination of allowed oses and navigators :raise InvalidOption: if any of passed options is invalid " 2345,"def printable_text(text): # These functions want `str` for both Python2 and Python3, but in one case # it's a Unicode string and in the other it's a byte string. if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode(""utf-8"", ""ignore"") else: raise ValueError(""Unsupported string type: %s"" % (type(text))) elif six.PY2: if isinstance(text, str): return text elif isinstance(text, unicode): return text.encode(""utf-8"") else: raise ValueError(""Unsupported string type: %s"" % (type(text))) else: raise ValueError(""Not running on Python2 or Python 3?"") ",Returns text encoded in a way suitable for print or `tf.logging`.,11,79,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def printable_text(text): # These functions want `str` for both Python2 and Python3, but in one case # it's a Unicode string and in the other it's a byte string. if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode(""utf-8"", ""ignore"") else: raise ValueError(""Unsupported string type: %s"" % (type(text))) elif six.PY2: if isinstance(text, str): return text elif isinstance(text, unicode): return text.encode(""utf-8"") else: raise ValueError(""Unsupported string type: %s"" % (type(text))) else: raise ValueError(""Not running on Python2 or Python 3?"") ``` ###Assistant : Returns text encoded in a way suitable for print or `tf.logging`. " 2346,"def is_tradesignal(self, action): # trade signal return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or (action == Actions.Short.value and self._position == Positions.Short) or (action == Actions.Long.value and self._position == Positions.Long)) "," not trade signal is : Action: Neutral, position: Neutral -> Nothing Action: Long, position: Long -> Hold Long Action: Short, position: Short -> Hold Short ",25,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_tradesignal(self, action): # trade signal return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or (action == Actions.Short.value and self._position == Positions.Short) or (action == Actions.Long.value and self._position == Positions.Long)) ``` ###Assistant : not trade signal is : Action: Neutral, position: Neutral -> Nothing Action: Long, position: Long -> Hold Long Action: Short, position: Short -> Hold Short " 2347,"def log_message(self, format, *args): sys.stderr.write(""%s - - [%s] %s\n"" % (self.address_string(), self.log_date_time_string(), format%args)) ","Log an arbitrary message. This is used by all other logging functions. Override it if you have specific logging wishes. The first argument, FORMAT, is a format string for the message to be logged. If the format string contains any % escapes requiring parameters, they should be specified as subsequent arguments (it's just like printf!). The client ip and current date/time are prefixed to every message. ",66,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def log_message(self, format, *args): sys.stderr.write(""%s - - [%s] %s\n"" % (self.address_string(), self.log_date_time_string(), format%args)) ``` ###Assistant : Log an arbitrary message. This is used by all other logging functions. Override it if you have specific logging wishes. The first argument, FORMAT, is a format string for the message to be logged. If the format string contains any % escapes requiring parameters, they should be specified as subsequent arguments (it's just like printf!). The client ip and current date/time are prefixed to every message. " 2348,"def warning_advice(self, *args, **kwargs): no_advisory_warnings = os.getenv(""DIFFUSERS_NO_ADVISORY_WARNINGS"", False) if no_advisory_warnings: return self.warning(*args, **kwargs) logging.Logger.warning_advice = warning_advice "," This method is identical to `logger.warninging()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this warning will not be printed ",19,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def warning_advice(self, *args, **kwargs): no_advisory_warnings = os.getenv(""DIFFUSERS_NO_ADVISORY_WARNINGS"", False) if no_advisory_warnings: return self.warning(*args, **kwargs) logging.Logger.warning_advice = warning_advice ``` ###Assistant : This method is identical to `logger.warninging()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this warning will not be printed " 2349,"def zip_row_op(self, i, k, f): for j in range(self.cols): self[i, j] = f(self[i, j], self[k, j]) ","In-place operation on row ``i`` using two-arg functor whose args are interpreted as ``(self[i, j], self[k, j])``. Examples ======== >>> from sympy import eye >>> M = eye(3) >>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M Matrix([ [1, 0, 0], [2, 1, 0], [0, 0, 1]]) See Also ======== row row_op col_op ",54,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def zip_row_op(self, i, k, f): for j in range(self.cols): self[i, j] = f(self[i, j], self[k, j]) ``` ###Assistant : In-place operation on row ``i`` using two-arg functor whose args are interpreted as ``(self[i, j], self[k, j])``. Examples ======== >>> from sympy import eye >>> M = eye(3) >>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M Matrix([ [1, 0, 0], [2, 1, 0], [0, 0, 1]]) See Also ======== row row_op col_op " 2350,"def legendre_poly(n, x=None, polys=False): r return named_poly(n, dup_legendre, QQ, ""Legendre polynomial"", (x,), polys) ","Generates the Legendre polynomial `P_n(x)`. Parameters ========== n : int Degree of the polynomial. x : optional polys : bool, optional If True, return a Poly, otherwise (default) return an expression. ",31,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def legendre_poly(n, x=None, polys=False): r return named_poly(n, dup_legendre, QQ, ""Legendre polynomial"", (x,), polys) ``` ###Assistant : Generates the Legendre polynomial `P_n(x)`. Parameters ========== n : int Degree of the polynomial. x : optional polys : bool, optional If True, return a Poly, otherwise (default) return an expression. " 2351,"def _concat_short_text_reuslts(self, input_texts, results): long_text_lens = [len(text) for text in input_texts] concat_results = [] single_results = {} count = 0 for text in input_texts: text_len = len(text) while True: if len(single_results) == 0 or len(single_results[ ""text""]) < text_len: if len(single_results) == 0: single_results = copy.deepcopy(results[count]) else: single_results[""text""] += results[count][""text""] single_results[""items""].extend(results[count][""items""]) count += 1 elif len(single_results[""text""]) == text_len: concat_results.append(single_results) single_results = {} break else: raise Exception( ""The length of input text and raw text is not equal."") for result in concat_results: pred_words = result['items'] pred_words = self._reset_offset(pred_words) result['items'] = pred_words return concat_results "," Concat the model output of short texts to the total result of long text. ",14,91,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _concat_short_text_reuslts(self, input_texts, results): long_text_lens = [len(text) for text in input_texts] concat_results = [] single_results = {} count = 0 for text in input_texts: text_len = len(text) while True: if len(single_results) == 0 or len(single_results[ ""text""]) < text_len: if len(single_results) == 0: single_results = copy.deepcopy(results[count]) else: single_results[""text""] += results[count][""text""] single_results[""items""].extend(results[count][""items""]) count += 1 elif len(single_results[""text""]) == text_len: concat_results.append(single_results) single_results = {} break else: raise Exception( ""The length of input text and raw text is not equal."") for result in concat_results: pred_words = result['items'] pred_words = self._reset_offset(pred_words) result['items'] = pred_words return concat_results ``` ###Assistant : Concat the model output of short texts to the total result of long text. " 2352,"def get_pred(self, bboxes, bbox_num, im_shape, scale_factor): origin_shape = paddle.floor(im_shape / scale_factor + 0.5) origin_shape_list = [] scale_factor_list = [] # scale_factor: scale_y, scale_x for i in range(bbox_num.shape[0]): expand_shape = paddle.expand(origin_shape[i:i + 1, :], [bbox_num[i], 2]) scale_y, scale_x = scale_factor[i][0], scale_factor[i][1] scale = paddle.concat([ scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x, scale_y ]) expand_scale = paddle.expand(scale, [bbox_num[i], 8]) origin_shape_list.append(expand_shape) scale_factor_list.append(expand_scale) origin_shape_list = paddle.concat(origin_shape_list) scale_factor_list = paddle.concat(scale_factor_list) # bboxes: [N, 10], label, score, bbox pred_label_score = bboxes[:, 0:2] pred_bbox = bboxes[:, 2:] # rescale bbox to original image pred_bbox = pred_bbox.reshape([-1, 8]) scaled_bbox = pred_bbox / scale_factor_list origin_h = origin_shape_list[:, 0] origin_w = origin_shape_list[:, 1] bboxes = scaled_bbox zeros = paddle.zeros_like(origin_h) x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], origin_w - 1), zeros) y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], origin_h - 1), zeros) x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], origin_w - 1), zeros) y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], origin_h - 1), zeros) x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], origin_w - 1), zeros) y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], origin_h - 1), zeros) x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], origin_w - 1), zeros) y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], origin_h - 1), zeros) pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=-1) pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1) return pred_result "," Rescale, clip and filter the bbox from the output of NMS to get final prediction. Args: bboxes(Tensor): bboxes [N, 10] bbox_num(Tensor): bbox_num im_shape(Tensor): [1 2] scale_factor(Tensor): [1 2] Returns: bbox_pred(Tensor): The output is the prediction with shape [N, 8] including labels, scores and bboxes. The size of bboxes are corresponding to the original image. ",54,191,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_pred(self, bboxes, bbox_num, im_shape, scale_factor): origin_shape = paddle.floor(im_shape / scale_factor + 0.5) origin_shape_list = [] scale_factor_list = [] # scale_factor: scale_y, scale_x for i in range(bbox_num.shape[0]): expand_shape = paddle.expand(origin_shape[i:i + 1, :], [bbox_num[i], 2]) scale_y, scale_x = scale_factor[i][0], scale_factor[i][1] scale = paddle.concat([ scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x, scale_y ]) expand_scale = paddle.expand(scale, [bbox_num[i], 8]) origin_shape_list.append(expand_shape) scale_factor_list.append(expand_scale) origin_shape_list = paddle.concat(origin_shape_list) scale_factor_list = paddle.concat(scale_factor_list) # bboxes: [N, 10], label, score, bbox pred_label_score = bboxes[:, 0:2] pred_bbox = bboxes[:, 2:] # rescale bbox to original image pred_bbox = pred_bbox.reshape([-1, 8]) scaled_bbox = pred_bbox / scale_factor_list origin_h = origin_shape_list[:, 0] origin_w = origin_shape_list[:, 1] bboxes = scaled_bbox zeros = paddle.zeros_like(origin_h) x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], origin_w - 1), zeros) y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], origin_h - 1), zeros) x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], origin_w - 1), zeros) y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], origin_h - 1), zeros) x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], origin_w - 1), zeros) y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], origin_h - 1), zeros) x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], origin_w - 1), zeros) y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], origin_h - 1), zeros) pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=-1) pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1) return pred_result ``` ###Assistant : Rescale, clip and filter the bbox from the output of NMS to get final prediction. Args: bboxes(Tensor): bboxes [N, 10] bbox_num(Tensor): bbox_num im_shape(Tensor): [1 2] scale_factor(Tensor): [1 2] Returns: bbox_pred(Tensor): The output is the prediction with shape [N, 8] including labels, scores and bboxes. The size of bboxes are corresponding to the original image. " 2353,"def unset_existing_data(company): linked = frappe.db.sql( , as_dict=True, ) # remove accounts data from company update_values = {d.fieldname: """" for d in linked} frappe.db.set_value(""Company"", company, update_values, update_values) # remove accounts data from various doctypes for doctype in [ ""Account"", ""Party Account"", ""Mode of Payment Account"", ""Tax Withholding Account"", ""Sales Taxes and Charges Template"", ""Purchase Taxes and Charges Template"", ]: frappe.db.sql( .format(doctype) % (company) # nosec ) ","select fieldname from tabDocField where fieldtype=""Link"" and options=""Account"" and parent=""Company""delete from `tab{0}` where `company`=""%s""",14,65,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def unset_existing_data(company): linked = frappe.db.sql( , as_dict=True, ) # remove accounts data from company update_values = {d.fieldname: """" for d in linked} frappe.db.set_value(""Company"", company, update_values, update_values) # remove accounts data from various doctypes for doctype in [ ""Account"", ""Party Account"", ""Mode of Payment Account"", ""Tax Withholding Account"", ""Sales Taxes and Charges Template"", ""Purchase Taxes and Charges Template"", ]: frappe.db.sql( .format(doctype) % (company) # nosec ) ``` ###Assistant : select fieldname from tabDocField where fieldtype=""Link"" and options=""Account"" and parent=""Company""delete from `tab{0}` where `company`=""%s"" " 2354,"def simple_test(self, feats, batch_img_metas, rescale=False): outs = self.forward(feats) results_list = self.get_results( *outs, batch_img_metas=batch_img_metas, rescale=rescale) return results_list ","Test function without test-time augmentation. Args: feats (tuple[torch.Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). ",91,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def simple_test(self, feats, batch_img_metas, rescale=False): outs = self.forward(feats) results_list = self.get_results( *outs, batch_img_metas=batch_img_metas, rescale=rescale) return results_list ``` ###Assistant : Test function without test-time augmentation. Args: feats (tuple[torch.Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). " 2355,"def compare_pt_tf_models(pt_model, pt_input, tf_model, tf_input): pt_outputs = pt_model(**pt_input, output_hidden_states=True) tf_outputs = tf_model(**tf_input, output_hidden_states=True) # 1. All output attributes must be the same pt_out_attrs = set(pt_outputs.keys()) tf_out_attrs = set(tf_outputs.keys()) if pt_out_attrs != tf_out_attrs: raise ValueError( f""The model outputs have different attributes, aborting. (Pytorch: {pt_out_attrs}, TensorFlow:"" f"" {tf_out_attrs})"" ) # 2. For each output attribute, ALL values must be the same"," Compares the TensorFlow and PyTorch models, given their inputs, returning a tuple with the maximum observed difference and its source. ",20,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def compare_pt_tf_models(pt_model, pt_input, tf_model, tf_input): pt_outputs = pt_model(**pt_input, output_hidden_states=True) tf_outputs = tf_model(**tf_input, output_hidden_states=True) # 1. All output attributes must be the same pt_out_attrs = set(pt_outputs.keys()) tf_out_attrs = set(tf_outputs.keys()) if pt_out_attrs != tf_out_attrs: raise ValueError( f""The model outputs have different attributes, aborting. (Pytorch: {pt_out_attrs}, TensorFlow:"" f"" {tf_out_attrs})"" ) # 2. For each output attribute, ALL values must be the same ``` ###Assistant : Compares the TensorFlow and PyTorch models, given their inputs, returning a tuple with the maximum observed difference and its source. " 2356,"def _get_all_parser_float_precision_combinations(): params = [] ids = [] for parser, parser_id in zip(_all_parsers, _all_parser_ids): if hasattr(parser, ""values""): # Wrapped in pytest.param, get the actual parser back parser = parser.values[0] for precision in parser.float_precision_choices: # Re-wrap in pytest.param for pyarrow mark = pytest.mark.single_cpu if parser.engine == ""pyarrow"" else () param = pytest.param((parser(), precision), marks=mark) params.append(param) ids.append(f""{parser_id}-{precision}"") return {""params"": params, ""ids"": ids} @pytest.fixture( params=_get_all_parser_float_precision_combinations()[""params""], ids=_get_all_parser_float_precision_combinations()[""ids""], )"," Return all allowable parser and float precision combinations and corresponding ids. ",11,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_all_parser_float_precision_combinations(): params = [] ids = [] for parser, parser_id in zip(_all_parsers, _all_parser_ids): if hasattr(parser, ""values""): # Wrapped in pytest.param, get the actual parser back parser = parser.values[0] for precision in parser.float_precision_choices: # Re-wrap in pytest.param for pyarrow mark = pytest.mark.single_cpu if parser.engine == ""pyarrow"" else () param = pytest.param((parser(), precision), marks=mark) params.append(param) ids.append(f""{parser_id}-{precision}"") return {""params"": params, ""ids"": ids} @pytest.fixture( params=_get_all_parser_float_precision_combinations()[""params""], ids=_get_all_parser_float_precision_combinations()[""ids""], ) ``` ###Assistant : Return all allowable parser and float precision combinations and corresponding ids. " 2357,"def test_egg3(self): egg_name = ""%s/omelet.egg"" % self.egg_dir with extend_sys_path(egg_name): with self.settings(INSTALLED_APPS=[""omelet.app_with_models""]): models_module = apps.get_app_config(""app_with_models"").models_module self.assertIsNotNone(models_module) del apps.all_models[""app_with_models""] ",Models module can be loaded from an app located under an egg's top-level package,14,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_egg3(self): egg_name = ""%s/omelet.egg"" % self.egg_dir with extend_sys_path(egg_name): with self.settings(INSTALLED_APPS=[""omelet.app_with_models""]): models_module = apps.get_app_config(""app_with_models"").models_module self.assertIsNotNone(models_module) del apps.all_models[""app_with_models""] ``` ###Assistant : Models module can be loaded from an app located under an egg's top-level package " 2358,"def test_auditing_case_names(lgpo, setting_name, setting, enable_legacy_auditing): lgpo.set_computer_policy(setting_name, setting) result = lgpo.get_policy(setting_name, ""machine"") assert result == setting @pytest.mark.parametrize(""setting"", [""Enabled"", ""Disabled""])"," Helper function to set an audit setting and assert that it was successful ",13,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_auditing_case_names(lgpo, setting_name, setting, enable_legacy_auditing): lgpo.set_computer_policy(setting_name, setting) result = lgpo.get_policy(setting_name, ""machine"") assert result == setting @pytest.mark.parametrize(""setting"", [""Enabled"", ""Disabled""]) ``` ###Assistant : Helper function to set an audit setting and assert that it was successful " 2359,"def test_get(self): # Generate signature signature = generate_signature(self.image.id, ""fill-800x600"") # Get the image response = self.client.get( reverse( ""wagtailimages_serve"", args=(signature, self.image.id, ""fill-800x600"") ) ) # Check response self.assertEqual(response.status_code, 200) self.assertTrue(response.streaming) self.assertEqual(response[""Content-Type""], ""image/png"") "," Test a valid GET request to the view ",8,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get(self): # Generate signature signature = generate_signature(self.image.id, ""fill-800x600"") # Get the image response = self.client.get( reverse( ""wagtailimages_serve"", args=(signature, self.image.id, ""fill-800x600"") ) ) # Check response self.assertEqual(response.status_code, 200) self.assertTrue(response.streaming) self.assertEqual(response[""Content-Type""], ""image/png"") ``` ###Assistant : Test a valid GET request to the view " 2360,"def _object2proto(self) -> Slice_PB: slice_pb = Slice_PB() if self.start: slice_pb.start = self.start slice_pb.has_start = True if self.stop: slice_pb.stop = self.stop slice_pb.has_stop = True if self.step: slice_pb.step = self.step slice_pb.has_step = True slice_pb.id.CopyFrom(serialize(obj=self._id)) return slice_pb "," Serialize the Slice object instance returning a protobuf. Returns: Slice_PB: returns a protobuf object class representing this Slice object. ",19,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _object2proto(self) -> Slice_PB: slice_pb = Slice_PB() if self.start: slice_pb.start = self.start slice_pb.has_start = True if self.stop: slice_pb.stop = self.stop slice_pb.has_stop = True if self.step: slice_pb.step = self.step slice_pb.has_step = True slice_pb.id.CopyFrom(serialize(obj=self._id)) return slice_pb ``` ###Assistant : Serialize the Slice object instance returning a protobuf. Returns: Slice_PB: returns a protobuf object class representing this Slice object. " 2361,"def __call__(self) -> List[Tuple[int, int]]: logger.info(""Sorting face distances. Depending on your dataset this may take some time..."") if self._threshold: self._threshold = self._result_linkage[:, 2].max() * self._threshold result_order = self._seriation(self._result_linkage, self._num_predictions, self._num_predictions + self._num_predictions - 2) return result_order "," Process the linkages. Transforms a distance matrix into a sorted distance matrix according to the order implied by the hierarchical tree (dendrogram). Returns ------- list: List of indices with the order implied by the hierarchical tree or list of tuples of (`index`, `bin`) if a binning threshold was provided ",49,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __call__(self) -> List[Tuple[int, int]]: logger.info(""Sorting face distances. Depending on your dataset this may take some time..."") if self._threshold: self._threshold = self._result_linkage[:, 2].max() * self._threshold result_order = self._seriation(self._result_linkage, self._num_predictions, self._num_predictions + self._num_predictions - 2) return result_order ``` ###Assistant : Process the linkages. Transforms a distance matrix into a sorted distance matrix according to the order implied by the hierarchical tree (dendrogram). Returns ------- list: List of indices with the order implied by the hierarchical tree or list of tuples of (`index`, `bin`) if a binning threshold was provided " 2362,"def get_pred(self, bboxes, bbox_num, im_shape, scale_factor): if self.export_eb: # enable rcnn models for edgeboard hw to skip the following postprocess. return bboxes, bboxes, bbox_num if not self.export_onnx: bboxes_list = [] bbox_num_list = [] id_start = 0 fake_bboxes = paddle.to_tensor( np.array( [[0., 0.0, 0.0, 0.0, 1.0, 1.0]], dtype='float32')) fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32')) # add fake bbox when output is empty for each batch for i in range(bbox_num.shape[0]): if bbox_num[i] == 0: bboxes_i = fake_bboxes bbox_num_i = fake_bbox_num else: bboxes_i = bboxes[id_start:id_start + bbox_num[i], :] bbox_num_i = bbox_num[i] id_start += bbox_num[i] bboxes_list.append(bboxes_i) bbox_num_list.append(bbox_num_i) bboxes = paddle.concat(bboxes_list) bbox_num = paddle.concat(bbox_num_list) origin_shape = paddle.floor(im_shape / scale_factor + 0.5) if not self.export_onnx: origin_shape_list = [] scale_factor_list = [] # scale_factor: scale_y, scale_x for i in range(bbox_num.shape[0]): expand_shape = paddle.expand(origin_shape[i:i + 1, :], [bbox_num[i], 2]) scale_y, scale_x = scale_factor[i][0], scale_factor[i][1] scale = paddle.concat([scale_x, scale_y, scale_x, scale_y]) expand_scale = paddle.expand(scale, [bbox_num[i], 4]) origin_shape_list.append(expand_shape) scale_factor_list.append(expand_scale) self.origin_shape_list = paddle.concat(origin_shape_list) scale_factor_list = paddle.concat(scale_factor_list) else: # simplify the computation for bs=1 when exporting onnx scale_y, scale_x = scale_factor[0][0], scale_factor[0][1] scale = paddle.concat( [scale_x, scale_y, scale_x, scale_y]).unsqueeze(0) self.origin_shape_list = paddle.expand(origin_shape, [bbox_num[0], 2]) scale_factor_list = paddle.expand(scale, [bbox_num[0], 4]) # bboxes: [N, 6], label, score, bbox pred_label = bboxes[:, 0:1] pred_score = bboxes[:, 1:2] pred_bbox = bboxes[:, 2:] # rescale bbox to original image scaled_bbox = pred_bbox / scale_factor_list origin_h = self.origin_shape_list[:, 0] origin_w = self.origin_shape_list[:, 1] zeros = paddle.zeros_like(origin_h) # clip bbox to [0, original_size] x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros) y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros) x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros) y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros) pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1) # filter empty bbox keep_mask = nonempty_bbox(pred_bbox, return_mask=True) keep_mask = paddle.unsqueeze(keep_mask, [1]) pred_label = paddle.where(keep_mask, pred_label, paddle.ones_like(pred_label) * -1) pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1) return bboxes, pred_result, bbox_num "," Rescale, clip and filter the bbox from the output of NMS to get final prediction. Notes: Currently only support bs = 1. Args: bboxes (Tensor): The output bboxes with shape [N, 6] after decode and NMS, including labels, scores and bboxes. bbox_num (Tensor): The number of prediction boxes of each batch with shape [1], and is N. im_shape (Tensor): The shape of the input image. scale_factor (Tensor): The scale factor of the input image. Returns: pred_result (Tensor): The final prediction results with shape [N, 6] including labels, scores and bboxes. ",90,292,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_pred(self, bboxes, bbox_num, im_shape, scale_factor): if self.export_eb: # enable rcnn models for edgeboard hw to skip the following postprocess. return bboxes, bboxes, bbox_num if not self.export_onnx: bboxes_list = [] bbox_num_list = [] id_start = 0 fake_bboxes = paddle.to_tensor( np.array( [[0., 0.0, 0.0, 0.0, 1.0, 1.0]], dtype='float32')) fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32')) # add fake bbox when output is empty for each batch for i in range(bbox_num.shape[0]): if bbox_num[i] == 0: bboxes_i = fake_bboxes bbox_num_i = fake_bbox_num else: bboxes_i = bboxes[id_start:id_start + bbox_num[i], :] bbox_num_i = bbox_num[i] id_start += bbox_num[i] bboxes_list.append(bboxes_i) bbox_num_list.append(bbox_num_i) bboxes = paddle.concat(bboxes_list) bbox_num = paddle.concat(bbox_num_list) origin_shape = paddle.floor(im_shape / scale_factor + 0.5) if not self.export_onnx: origin_shape_list = [] scale_factor_list = [] # scale_factor: scale_y, scale_x for i in range(bbox_num.shape[0]): expand_shape = paddle.expand(origin_shape[i:i + 1, :], [bbox_num[i], 2]) scale_y, scale_x = scale_factor[i][0], scale_factor[i][1] scale = paddle.concat([scale_x, scale_y, scale_x, scale_y]) expand_scale = paddle.expand(scale, [bbox_num[i], 4]) origin_shape_list.append(expand_shape) scale_factor_list.append(expand_scale) self.origin_shape_list = paddle.concat(origin_shape_list) scale_factor_list = paddle.concat(scale_factor_list) else: # simplify the computation for bs=1 when exporting onnx scale_y, scale_x = scale_factor[0][0], scale_factor[0][1] scale = paddle.concat( [scale_x, scale_y, scale_x, scale_y]).unsqueeze(0) self.origin_shape_list = paddle.expand(origin_shape, [bbox_num[0], 2]) scale_factor_list = paddle.expand(scale, [bbox_num[0], 4]) # bboxes: [N, 6], label, score, bbox pred_label = bboxes[:, 0:1] pred_score = bboxes[:, 1:2] pred_bbox = bboxes[:, 2:] # rescale bbox to original image scaled_bbox = pred_bbox / scale_factor_list origin_h = self.origin_shape_list[:, 0] origin_w = self.origin_shape_list[:, 1] zeros = paddle.zeros_like(origin_h) # clip bbox to [0, original_size] x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros) y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros) x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros) y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros) pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1) # filter empty bbox keep_mask = nonempty_bbox(pred_bbox, return_mask=True) keep_mask = paddle.unsqueeze(keep_mask, [1]) pred_label = paddle.where(keep_mask, pred_label, paddle.ones_like(pred_label) * -1) pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1) return bboxes, pred_result, bbox_num ``` ###Assistant : Rescale, clip and filter the bbox from the output of NMS to get final prediction. Notes: Currently only support bs = 1. Args: bboxes (Tensor): The output bboxes with shape [N, 6] after decode and NMS, including labels, scores and bboxes. bbox_num (Tensor): The number of prediction boxes of each batch with shape [1], and is N. im_shape (Tensor): The shape of the input image. scale_factor (Tensor): The scale factor of the input image. Returns: pred_result (Tensor): The final prediction results with shape [N, 6] including labels, scores and bboxes. " 2363,"def _signal_zone_update(self): async_dispatcher_send(self.hass, f""{SIGNAL_ZONE_UPDATE}-{self._zone.zone_id}"") ","Signal a zone update. Whenever the underlying library does an action against a zone, the data for the zone is updated. Update a single zone. ",25,4,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _signal_zone_update(self): async_dispatcher_send(self.hass, f""{SIGNAL_ZONE_UPDATE}-{self._zone.zone_id}"") ``` ###Assistant : Signal a zone update. Whenever the underlying library does an action against a zone, the data for the zone is updated. Update a single zone. " 2364,"def test_asymmetric_error(quantile): n_samples = 10_000 rng = np.random.RandomState(42) # take care that X @ coef + intercept > 0 X = np.concatenate( ( np.abs(rng.randn(n_samples)[:, None]), -rng.randint(2, size=(n_samples, 1)), ), axis=1, ) intercept = 1.23 coef = np.array([0.5, -2]) # For an exponential distribution with rate lambda, e.g. exp(-lambda * x), # the quantile at level q is: # quantile(q) = - log(1 - q) / lambda # scale = 1/lambda = -quantile(q) / log(1-q) y = rng.exponential( scale=-(X @ coef + intercept) / np.log(1 - quantile), size=n_samples ) model = HistGradientBoostingRegressor( loss=""quantile"", quantile=quantile, max_iter=25, random_state=0, max_leaf_nodes=10, ).fit(X, y) assert_allclose(np.mean(model.predict(X) > y), quantile, rtol=1e-2) pinball_loss = PinballLoss(quantile=quantile) loss_true_quantile = pinball_loss(y, X @ coef + intercept) loss_pred_quantile = pinball_loss(y, model.predict(X)) # we are overfitting assert loss_pred_quantile <= loss_true_quantile @pytest.mark.parametrize(""y"", [([1.0, -2.0, 0.0]), ([0.0, 0.0, 0.0])])",Test quantile regression for asymmetric distributed targets.,7,133,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_asymmetric_error(quantile): n_samples = 10_000 rng = np.random.RandomState(42) # take care that X @ coef + intercept > 0 X = np.concatenate( ( np.abs(rng.randn(n_samples)[:, None]), -rng.randint(2, size=(n_samples, 1)), ), axis=1, ) intercept = 1.23 coef = np.array([0.5, -2]) # For an exponential distribution with rate lambda, e.g. exp(-lambda * x), # the quantile at level q is: # quantile(q) = - log(1 - q) / lambda # scale = 1/lambda = -quantile(q) / log(1-q) y = rng.exponential( scale=-(X @ coef + intercept) / np.log(1 - quantile), size=n_samples ) model = HistGradientBoostingRegressor( loss=""quantile"", quantile=quantile, max_iter=25, random_state=0, max_leaf_nodes=10, ).fit(X, y) assert_allclose(np.mean(model.predict(X) > y), quantile, rtol=1e-2) pinball_loss = PinballLoss(quantile=quantile) loss_true_quantile = pinball_loss(y, X @ coef + intercept) loss_pred_quantile = pinball_loss(y, model.predict(X)) # we are overfitting assert loss_pred_quantile <= loss_true_quantile @pytest.mark.parametrize(""y"", [([1.0, -2.0, 0.0]), ([0.0, 0.0, 0.0])]) ``` ###Assistant : Test quantile regression for asymmetric distributed targets. " 2365,"def update(self) -> None: with self.lock: # Fetch valid stop information once if not self._origin: stops = self._pygtfs.stops_by_id(self.origin) if not stops: self._available = False _LOGGER.warning(""Origin stop ID %s not found"", self.origin) return self._origin = stops[0] if not self._destination: stops = self._pygtfs.stops_by_id(self.destination) if not stops: self._available = False _LOGGER.warning( ""Destination stop ID %s not found"", self.destination ) return self._destination = stops[0] self._available = True # Fetch next departure self._departure = get_next_departure( self._pygtfs, self.origin, self.destination, self._offset, self._include_tomorrow, ) # Define the state as a UTC timestamp with ISO 8601 format if not self._departure: self._state = None else: self._state = self._departure[""departure_time""].replace( tzinfo=dt_util.UTC ) # Fetch trip and route details once, unless updated if not self._departure: self._trip = None else: trip_id = self._departure[""trip_id""] if not self._trip or self._trip.trip_id != trip_id: _LOGGER.debug(""Fetching trip details for %s"", trip_id) self._trip = self._pygtfs.trips_by_id(trip_id)[0] route_id = self._departure[""route_id""] if not self._route or self._route.route_id != route_id: _LOGGER.debug(""Fetching route details for %s"", route_id) self._route = self._pygtfs.routes_by_id(route_id)[0] # Fetch agency details exactly once if self._agency is None and self._route: _LOGGER.debug(""Fetching agency details for %s"", self._route.agency_id) try: self._agency = self._pygtfs.agencies_by_id(self._route.agency_id)[0] except IndexError: _LOGGER.warning( ""Agency ID '%s' was not found in agency table, "" ""you may want to update the routes database table "" ""to fix this missing reference"", self._route.agency_id, ) self._agency = False # Assign attributes, icon and name self.update_attributes() if self._agency: self._attr_attribution = self._agency.agency_name else: self._attr_attribution = None if self._route: self._icon = ICONS.get(self._route.route_type, ICON) else: self._icon = ICON name = ( f""{getattr(self._agency, 'agency_name', DEFAULT_NAME)} "" f""{self.origin} to {self.destination} next departure"" ) if not self._departure: name = f""{DEFAULT_NAME}"" self._name = self._custom_name or name ",Get the latest data from GTFS and update the states.,10,259,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update(self) -> None: with self.lock: # Fetch valid stop information once if not self._origin: stops = self._pygtfs.stops_by_id(self.origin) if not stops: self._available = False _LOGGER.warning(""Origin stop ID %s not found"", self.origin) return self._origin = stops[0] if not self._destination: stops = self._pygtfs.stops_by_id(self.destination) if not stops: self._available = False _LOGGER.warning( ""Destination stop ID %s not found"", self.destination ) return self._destination = stops[0] self._available = True # Fetch next departure self._departure = get_next_departure( self._pygtfs, self.origin, self.destination, self._offset, self._include_tomorrow, ) # Define the state as a UTC timestamp with ISO 8601 format if not self._departure: self._state = None else: self._state = self._departure[""departure_time""].replace( tzinfo=dt_util.UTC ) # Fetch trip and route details once, unless updated if not self._departure: self._trip = None else: trip_id = self._departure[""trip_id""] if not self._trip or self._trip.trip_id != trip_id: _LOGGER.debug(""Fetching trip details for %s"", trip_id) self._trip = self._pygtfs.trips_by_id(trip_id)[0] route_id = self._departure[""route_id""] if not self._route or self._route.route_id != route_id: _LOGGER.debug(""Fetching route details for %s"", route_id) self._route = self._pygtfs.routes_by_id(route_id)[0] # Fetch agency details exactly once if self._agency is None and self._route: _LOGGER.debug(""Fetching agency details for %s"", self._route.agency_id) try: self._agency = self._pygtfs.agencies_by_id(self._route.agency_id)[0] except IndexError: _LOGGER.warning( ""Agency ID '%s' was not found in agency table, "" ""you may want to update the routes database table "" ""to fix this missing reference"", self._route.agency_id, ) self._agency = False # Assign attributes, icon and name self.update_attributes() if self._agency: self._attr_attribution = self._agency.agency_name else: self._attr_attribution = None if self._route: self._icon = ICONS.get(self._route.route_type, ICON) else: self._icon = ICON name = ( f""{getattr(self._agency, 'agency_name', DEFAULT_NAME)} "" f""{self.origin} to {self.destination} next departure"" ) if not self._departure: name = f""{DEFAULT_NAME}"" self._name = self._custom_name or name ``` ###Assistant : Get the latest data from GTFS and update the states. " 2366,"async def test_carbon_monoxide_sensor_read_state(hass, utcnow): helper = await setup_test_component(hass, create_carbon_monoxide_sensor_service) await helper.async_update( ServicesTypes.CARBON_MONOXIDE_SENSOR, {CharacteristicsTypes.CARBON_MONOXIDE_DETECTED: 0}, ) state = await helper.poll_and_get_state() assert state.state == ""off"" await helper.async_update( ServicesTypes.CARBON_MONOXIDE_SENSOR, {CharacteristicsTypes.CARBON_MONOXIDE_DETECTED: 1}, ) state = await helper.poll_and_get_state() assert state.state == ""on"" assert state.attributes[""device_class""] == BinarySensorDeviceClass.CO ",Test that we can read the state of a HomeKit contact accessory.,12,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_carbon_monoxide_sensor_read_state(hass, utcnow): helper = await setup_test_component(hass, create_carbon_monoxide_sensor_service) await helper.async_update( ServicesTypes.CARBON_MONOXIDE_SENSOR, {CharacteristicsTypes.CARBON_MONOXIDE_DETECTED: 0}, ) state = await helper.poll_and_get_state() assert state.state == ""off"" await helper.async_update( ServicesTypes.CARBON_MONOXIDE_SENSOR, {CharacteristicsTypes.CARBON_MONOXIDE_DETECTED: 1}, ) state = await helper.poll_and_get_state() assert state.state == ""on"" assert state.attributes[""device_class""] == BinarySensorDeviceClass.CO ``` ###Assistant : Test that we can read the state of a HomeKit contact accessory. " 2367,"def save_config(self) -> TritonArtifact: device = self.device if self.inference_stage != PREDICTOR: device = ""cpu"" self.config = TritonConfig( self.full_model_name, self.input_features, self.output_features, self.max_batch_size, self.max_queue_delay_microseconds, device, self.model_instance_count, self.inference_stage, ) config_path = os.path.join(self.base_path, ""config.pbtxt"") with open(config_path, ""w"") as f: formatted_config = remove_empty_lines(self.config.get_model_config()) f.write(formatted_config) config_artifact = TritonArtifact( model_name=self.full_model_name, model_version=self.model_version, platform=""pytorch_libtorch"", path=config_path, content_type=""text/x-protobuf"", content_length=os.path.getsize(config_path), ) return config_artifact @dataclass","Save the Triton config. Return the appropriate artifact. ",8,52,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save_config(self) -> TritonArtifact: device = self.device if self.inference_stage != PREDICTOR: device = ""cpu"" self.config = TritonConfig( self.full_model_name, self.input_features, self.output_features, self.max_batch_size, self.max_queue_delay_microseconds, device, self.model_instance_count, self.inference_stage, ) config_path = os.path.join(self.base_path, ""config.pbtxt"") with open(config_path, ""w"") as f: formatted_config = remove_empty_lines(self.config.get_model_config()) f.write(formatted_config) config_artifact = TritonArtifact( model_name=self.full_model_name, model_version=self.model_version, platform=""pytorch_libtorch"", path=config_path, content_type=""text/x-protobuf"", content_length=os.path.getsize(config_path), ) return config_artifact @dataclass ``` ###Assistant : Save the Triton config. Return the appropriate artifact. " 2368,"def test_unknown_device(self) -> None: url = ""/_synapse/admin/v2/users/%s/devices/unknown_device"" % urllib.parse.quote( self.other_user ) channel = self.make_request( ""GET"", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body[""errcode""]) channel = self.make_request( ""PUT"", url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) channel = self.make_request( ""DELETE"", url, access_token=self.admin_user_tok, ) # Delete unknown device returns status 200 self.assertEqual(200, channel.code, msg=channel.json_body) "," Tests that a lookup for a device that does not exist returns either 404 or 200. ",16,50,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_unknown_device(self) -> None: url = ""/_synapse/admin/v2/users/%s/devices/unknown_device"" % urllib.parse.quote( self.other_user ) channel = self.make_request( ""GET"", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body[""errcode""]) channel = self.make_request( ""PUT"", url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) channel = self.make_request( ""DELETE"", url, access_token=self.admin_user_tok, ) # Delete unknown device returns status 200 self.assertEqual(200, channel.code, msg=channel.json_body) ``` ###Assistant : Tests that a lookup for a device that does not exist returns either 404 or 200. " 2369,"def complete_code(accelerator, model, tokenizer, dataloader, n_tasks, batch_size=20, **gen_kwargs): gen_token_dict = defaultdict(list) # dict of list of generated tokens for step, batch in tqdm(enumerate(dataloader)): with torch.no_grad(): gen_kwargs[""stopping_criteria""][0].start_length = batch[""ids""].shape[-1] generated_tokens = accelerator.unwrap_model(model).generate( input_ids=batch[""ids""][:, : batch[""input_len""]], num_return_sequences=batch_size, **gen_kwargs ) # each task is generated batch_size times generated_tasks = batch[""task_id""].repeat(batch_size) generated_tokens = accelerator.pad_across_processes( generated_tokens, dim=1, pad_index=tokenizer.pad_token_id ) generated_tokens, generated_tasks = accelerator.gather((generated_tokens, generated_tasks)) generated_tokens = generated_tokens.cpu().numpy() generated_tasks = generated_tasks.cpu().numpy() for task, generated_tokens in zip(generated_tasks, generated_tokens): gen_token_dict[task].append(generated_tokens) code_gens = [[] for _ in range(n_tasks)] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: gen_code = tokenizer.decode(s, skip_special_tokens=True, clean_up_tokenization_spaces=True) code_gens[task].append(remove_last_block(gen_code)) return code_gens ","Generate multiple codes for each task in the dataset. This function leverage accelerator to distribute the processing to multiple GPUs. dataloader, a wrapper around a TokenizeDataset objectm is supposed to send all the prompts from the evalution dataset to the modelm as the following: [p_0_0, p_0_1, ..., p_0_nc-1, p_1_0, ..., p_nt-1_nc-1] where nc is the number of copies of the prompt, and nt is the number of tasks. nc is such that num_sample = nc * batch_size Parameters ---------- accelerator: Accelerator model: transformers.PreTrainedModel Code generation model. AutoTokenizer.from_pretrained(model_ckpt), ex model_ckpt = ""lvwerra/codeparrot"" tokenizer: transformers.AutoTokenizer The tokenizer used to train model dataloader: DataLoader The dataloader is a wrapper around a TokenizeDataset object. It is designed to be used with multiple GPUs. n_tasks: int The number of tasks in the dataset. It is used to determine the length of the output. Should be aligned with the number of tasks in the TokenizeDataset. batch_size: int num_return_sequences per copy of the prompt such that num_sample = batch_size * n_copies gen_kwargs: dict Keyword arguments for the generation function of the model. Returns ------- code_gens: list of list of str, of length n_tasks List of generated codes for each task. Each element is a list of generated codes for each task, with length num_samples ",207,96,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def complete_code(accelerator, model, tokenizer, dataloader, n_tasks, batch_size=20, **gen_kwargs): gen_token_dict = defaultdict(list) # dict of list of generated tokens for step, batch in tqdm(enumerate(dataloader)): with torch.no_grad(): gen_kwargs[""stopping_criteria""][0].start_length = batch[""ids""].shape[-1] generated_tokens = accelerator.unwrap_model(model).generate( input_ids=batch[""ids""][:, : batch[""input_len""]], num_return_sequences=batch_size, **gen_kwargs ) # each task is generated batch_size times generated_tasks = batch[""task_id""].repeat(batch_size) generated_tokens = accelerator.pad_across_processes( generated_tokens, dim=1, pad_index=tokenizer.pad_token_id ) generated_tokens, generated_tasks = accelerator.gather((generated_tokens, generated_tasks)) generated_tokens = generated_tokens.cpu().numpy() generated_tasks = generated_tasks.cpu().numpy() for task, generated_tokens in zip(generated_tasks, generated_tokens): gen_token_dict[task].append(generated_tokens) code_gens = [[] for _ in range(n_tasks)] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: gen_code = tokenizer.decode(s, skip_special_tokens=True, clean_up_tokenization_spaces=True) code_gens[task].append(remove_last_block(gen_code)) return code_gens ``` ###Assistant : Generate multiple codes for each task in the dataset. This function leverage accelerator to distribute the processing to multiple GPUs. dataloader, a wrapper around a TokenizeDataset objectm is supposed to send all the prompts from the evalution dataset to the modelm as the following: [p_0_0, p_0_1, ..., p_0_nc-1, p_1_0, ..., p_nt-1_nc-1] where nc is the number of copies of the prompt, and nt is the number of tasks. nc is such that num_sample = nc * batch_size Parameters ---------- accelerator: Accelerator model: transformers.PreTrainedModel Code generation model. AutoTokenizer.from_pretrained(model_ckpt), ex model_ckpt = ""lvwerra/codeparrot"" tokenizer: transformers.AutoTokenizer The tokenizer used to train model dataloader: DataLoader The dataloader is a wrapper around a TokenizeDataset object. It is designed to be used with multiple GPUs. n_tasks: int The number of tasks in the dataset. It is used to determine the length of the output. Should be aligned with the number of tasks in the TokenizeDataset. batch_size: int num_return_sequences per copy of the prompt such that num_sample = batch_size * n_copies gen_kwargs: dict Keyword arguments for the generation function of the model. Returns ------- code_gens: list of list of str, of length n_tasks List of generated codes for each task. Each element is a list of generated codes for each task, with length num_samples " 2370,"def get_status(start_date, end_date): if not end_date: return ""Active"" start_date = getdate(start_date) end_date = getdate(end_date) now_date = getdate(nowdate()) return ""Active"" if start_date <= now_date <= end_date else ""Inactive"" "," Get a Contract's status based on the start, current and end dates Args: start_date (str): The start date of the contract end_date (str): The end date of the contract Returns: str: 'Active' if within range, otherwise 'Inactive' ",37,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_status(start_date, end_date): if not end_date: return ""Active"" start_date = getdate(start_date) end_date = getdate(end_date) now_date = getdate(nowdate()) return ""Active"" if start_date <= now_date <= end_date else ""Inactive"" ``` ###Assistant : Get a Contract's status based on the start, current and end dates Args: start_date (str): The start date of the contract end_date (str): The end date of the contract Returns: str: 'Active' if within range, otherwise 'Inactive' " 2371,"def calc_position(self, s): x = self.sx.calc_position(s) y = self.sy.calc_position(s) return x, y "," calc position Parameters ---------- s : float distance from the start point. if `s` is outside the data point's range, return None. Returns ------- x : float x position for given s. y : float y position for given s. ",40,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def calc_position(self, s): x = self.sx.calc_position(s) y = self.sy.calc_position(s) return x, y ``` ###Assistant : calc position Parameters ---------- s : float distance from the start point. if `s` is outside the data point's range, return None. Returns ------- x : float x position for given s. y : float y position for given s. " 2372,"def push(self, exit): # We use an unbound method rather than a bound method to follow # the standard lookup behaviour for special methods. _cb_type = type(exit) try: exit_method = _cb_type.__exit__ except AttributeError: # Not a context manager, so assume it's a callable. self._push_exit_callback(exit) else: self._push_cm_exit(exit, exit_method) return exit # Allow use as a decorator. ","Registers a callback with the standard __exit__ method signature. Can suppress exceptions the same way __exit__ method can. Also accepts any object with an __exit__ method (registering a call to the method instead of the object itself). ",37,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def push(self, exit): # We use an unbound method rather than a bound method to follow # the standard lookup behaviour for special methods. _cb_type = type(exit) try: exit_method = _cb_type.__exit__ except AttributeError: # Not a context manager, so assume it's a callable. self._push_exit_callback(exit) else: self._push_cm_exit(exit, exit_method) return exit # Allow use as a decorator. ``` ###Assistant : Registers a callback with the standard __exit__ method signature. Can suppress exceptions the same way __exit__ method can. Also accepts any object with an __exit__ method (registering a call to the method instead of the object itself). " 2373,"async def async_media_play(self) -> None: if self._status[""state""] == ""pause"": await self._client.pause(0) else: await self._client.play() ",Service to send the MPD the command for play/pause.,9,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_media_play(self) -> None: if self._status[""state""] == ""pause"": await self._client.pause(0) else: await self._client.play() ``` ###Assistant : Service to send the MPD the command for play/pause. " 2374,"def download_datev_csv(filters): if isinstance(filters, str): filters = json.loads(filters) validate(filters) company = filters.get(""company"") fiscal_year = get_fiscal_year(date=filters.get(""from_date""), company=company) filters[""fiscal_year_start""] = fiscal_year[1] # set chart of accounts used coa = frappe.get_value(""Company"", company, ""chart_of_accounts"") filters[""skr""] = ""04"" if ""SKR04"" in coa else (""03"" if ""SKR03"" in coa else """") datev_settings = frappe.get_doc(""DATEV Settings"", company) filters[""account_number_length""] = datev_settings.account_number_length filters[""temporary_against_account_number""] = datev_settings.temporary_against_account_number transactions = get_transactions(filters) account_names = get_account_names(filters) customers = get_customers(filters) suppliers = get_suppliers(filters) zip_name = ""{} DATEV.zip"".format(frappe.utils.datetime.date.today()) zip_and_download( zip_name, [ { ""file_name"": ""EXTF_Buchungsstapel.csv"", ""csv_data"": get_datev_csv(transactions, filters, csv_class=Transactions), }, { ""file_name"": ""EXTF_Kontenbeschriftungen.csv"", ""csv_data"": get_datev_csv(account_names, filters, csv_class=AccountNames), }, { ""file_name"": ""EXTF_Kunden.csv"", ""csv_data"": get_datev_csv(customers, filters, csv_class=DebtorsCreditors), }, { ""file_name"": ""EXTF_Lieferanten.csv"", ""csv_data"": get_datev_csv(suppliers, filters, csv_class=DebtorsCreditors), }, ], ) "," Provide accounting entries for download in DATEV format. Validate the filters, get the data, produce the CSV file and provide it for download. Can be called like this: GET /api/method/erpnext.regional.report.datev.datev.download_datev_csv Arguments / Params: filters -- dict of filters to be passed to the sql query ",45,109,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def download_datev_csv(filters): if isinstance(filters, str): filters = json.loads(filters) validate(filters) company = filters.get(""company"") fiscal_year = get_fiscal_year(date=filters.get(""from_date""), company=company) filters[""fiscal_year_start""] = fiscal_year[1] # set chart of accounts used coa = frappe.get_value(""Company"", company, ""chart_of_accounts"") filters[""skr""] = ""04"" if ""SKR04"" in coa else (""03"" if ""SKR03"" in coa else """") datev_settings = frappe.get_doc(""DATEV Settings"", company) filters[""account_number_length""] = datev_settings.account_number_length filters[""temporary_against_account_number""] = datev_settings.temporary_against_account_number transactions = get_transactions(filters) account_names = get_account_names(filters) customers = get_customers(filters) suppliers = get_suppliers(filters) zip_name = ""{} DATEV.zip"".format(frappe.utils.datetime.date.today()) zip_and_download( zip_name, [ { ""file_name"": ""EXTF_Buchungsstapel.csv"", ""csv_data"": get_datev_csv(transactions, filters, csv_class=Transactions), }, { ""file_name"": ""EXTF_Kontenbeschriftungen.csv"", ""csv_data"": get_datev_csv(account_names, filters, csv_class=AccountNames), }, { ""file_name"": ""EXTF_Kunden.csv"", ""csv_data"": get_datev_csv(customers, filters, csv_class=DebtorsCreditors), }, { ""file_name"": ""EXTF_Lieferanten.csv"", ""csv_data"": get_datev_csv(suppliers, filters, csv_class=DebtorsCreditors), }, ], ) ``` ###Assistant : Provide accounting entries for download in DATEV format. Validate the filters, get the data, produce the CSV file and provide it for download. Can be called like this: GET /api/method/erpnext.regional.report.datev.datev.download_datev_csv Arguments / Params: filters -- dict of filters to be passed to the sql query " 2375,"def taggedsent_to_conll(sentence): for (i, (word, tag)) in enumerate(sentence, start=1): input_str = [str(i), word, ""_"", tag, tag, ""_"", ""0"", ""a"", ""_"", ""_""] input_str = ""\t"".join(input_str) + ""\n"" yield input_str "," A module to convert a single POS tagged sentence into CONLL format. >>> from nltk import word_tokenize, pos_tag >>> text = ""This is a foobar sentence."" >>> for line in taggedsent_to_conll(pos_tag(word_tokenize(text))): # doctest: +NORMALIZE_WHITESPACE ... print(line, end="""") 1 This _ DT DT _ 0 a _ _ 2 is _ VBZ VBZ _ 0 a _ _ 3 a _ DT DT _ 0 a _ _ 4 foobar _ JJ JJ _ 0 a _ _ 5 sentence _ NN NN _ 0 a _ _ 6 . _ . . _ 0 a _ _ :param sentence: A single input sentence to parse :type sentence: list(tuple(str, str)) :rtype: iter(str) :return: a generator yielding a single sentence in CONLL format. ",121,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def taggedsent_to_conll(sentence): for (i, (word, tag)) in enumerate(sentence, start=1): input_str = [str(i), word, ""_"", tag, tag, ""_"", ""0"", ""a"", ""_"", ""_""] input_str = ""\t"".join(input_str) + ""\n"" yield input_str ``` ###Assistant : A module to convert a single POS tagged sentence into CONLL format. >>> from nltk import word_tokenize, pos_tag >>> text = ""This is a foobar sentence."" >>> for line in taggedsent_to_conll(pos_tag(word_tokenize(text))): # doctest: +NORMALIZE_WHITESPACE ... print(line, end="""") 1 This _ DT DT _ 0 a _ _ 2 is _ VBZ VBZ _ 0 a _ _ 3 a _ DT DT _ 0 a _ _ 4 foobar _ JJ JJ _ 0 a _ _ 5 sentence _ NN NN _ 0 a _ _ 6 . _ . . _ 0 a _ _ :param sentence: A single input sentence to parse :type sentence: list(tuple(str, str)) :rtype: iter(str) :return: a generator yielding a single sentence in CONLL format. " 2376,"def get_system_encoding(): try: encoding = locale.getdefaultlocale()[1] or ""ascii"" codecs.lookup(encoding) except Exception: encoding = ""ascii"" return encoding DEFAULT_LOCALE_ENCODING = get_system_encoding() "," The encoding of the default system locale. Fallback to 'ascii' if the #encoding is unsupported by Python or could not be determined. See tickets #10335 and #5846. ",27,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_system_encoding(): try: encoding = locale.getdefaultlocale()[1] or ""ascii"" codecs.lookup(encoding) except Exception: encoding = ""ascii"" return encoding DEFAULT_LOCALE_ENCODING = get_system_encoding() ``` ###Assistant : The encoding of the default system locale. Fallback to 'ascii' if the #encoding is unsupported by Python or could not be determined. See tickets #10335 and #5846. " 2377,"def get_menu_item(self): if self.modeladmin_instances: submenu = Menu(items=self.get_submenu_items()) return GroupMenuItem(self, self.get_menu_order(), submenu) "," Utilised by Wagtail's 'register_menu_item' hook to create a menu for this group with a submenu linking to listing pages for any associated ModelAdmin instances ",24,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_menu_item(self): if self.modeladmin_instances: submenu = Menu(items=self.get_submenu_items()) return GroupMenuItem(self, self.get_menu_order(), submenu) ``` ###Assistant : Utilised by Wagtail's 'register_menu_item' hook to create a menu for this group with a submenu linking to listing pages for any associated ModelAdmin instances " 2378,"def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath): path, transform = self._get_text_path_transform( x, y, s, prop, angle, ismath) color = gc.get_rgb() gc.set_linewidth(0.0) self.draw_path(gc, path, transform, rgbFace=color) "," Draw the text by converting them to paths using `.TextToPath`. Parameters ---------- x : float The x location of the text in display coords. y : float The y location of the text baseline in display coords. s : str The text to be converted. prop : `~matplotlib.font_manager.FontProperties` The font property. angle : float Angle in degrees to render the text at. ismath : bool or ""TeX"" If True, use mathtext parser. If ""TeX"", use tex for rendering. ",78,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath): path, transform = self._get_text_path_transform( x, y, s, prop, angle, ismath) color = gc.get_rgb() gc.set_linewidth(0.0) self.draw_path(gc, path, transform, rgbFace=color) ``` ###Assistant : Draw the text by converting them to paths using `.TextToPath`. Parameters ---------- x : float The x location of the text in display coords. y : float The y location of the text baseline in display coords. s : str The text to be converted. prop : `~matplotlib.font_manager.FontProperties` The font property. angle : float Angle in degrees to render the text at. ismath : bool or ""TeX"" If True, use mathtext parser. If ""TeX"", use tex for rendering. " 2379,"def __call__(self, match_quality_matrix): assert match_quality_matrix.dim() == 2 if match_quality_matrix.numel() == 0: default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64) # When no gt boxes exist, we define IOU = 0 and therefore set labels # to `self.labels[0]`, which usually defaults to background class 0 # To choose to ignore instead, # can make labels=[-1,0,-1,1] + set appropriate thresholds default_match_labels = match_quality_matrix.new_full( (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8 ) return default_matches, default_match_labels assert torch.all(match_quality_matrix >= 0) # match_quality_matrix is M (gt) x N (predicted) # Max over gt elements (dim 0) to find best gt candidate for each prediction matched_vals, matches = match_quality_matrix.max(dim=0) match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): low_high = (matched_vals >= low) & (matched_vals < high) match_labels[low_high] = l if self.allow_low_quality_matches: self.set_low_quality_matches_(match_labels, match_quality_matrix) return matches, match_labels "," Args: match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in :meth:`set_low_quality_matches_`). Returns: matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M) match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates true or false positive or ignored ",69,128,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __call__(self, match_quality_matrix): assert match_quality_matrix.dim() == 2 if match_quality_matrix.numel() == 0: default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64) # When no gt boxes exist, we define IOU = 0 and therefore set labels # to `self.labels[0]`, which usually defaults to background class 0 # To choose to ignore instead, # can make labels=[-1,0,-1,1] + set appropriate thresholds default_match_labels = match_quality_matrix.new_full( (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8 ) return default_matches, default_match_labels assert torch.all(match_quality_matrix >= 0) # match_quality_matrix is M (gt) x N (predicted) # Max over gt elements (dim 0) to find best gt candidate for each prediction matched_vals, matches = match_quality_matrix.max(dim=0) match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): low_high = (matched_vals >= low) & (matched_vals < high) match_labels[low_high] = l if self.allow_low_quality_matches: self.set_low_quality_matches_(match_labels, match_quality_matrix) return matches, match_labels ``` ###Assistant : Args: match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in :meth:`set_low_quality_matches_`). Returns: matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M) match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates true or false positive or ignored " 2380,"def ismemberdescriptor(object): return isinstance(object, types.MemberDescriptorType) else: # Other implementations","Return true if the object is a member descriptor. Member descriptors are specialized descriptors defined in extension modules.",18,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ismemberdescriptor(object): return isinstance(object, types.MemberDescriptorType) else: # Other implementations ``` ###Assistant : Return true if the object is a member descriptor. Member descriptors are specialized descriptors defined in extension modules. " 2381,"def pandas_dtype_to_arrow_c(dtype) -> str: if isinstance(dtype, pandas.CategoricalDtype): return ArrowCTypes.INT64 elif dtype == np.dtype(""O""): return ArrowCTypes.STRING format_str = getattr(ArrowCTypes, dtype.name.upper(), None) if format_str is not None: return format_str if is_datetime64_dtype(dtype): # Selecting the first char of resolution string: # dtype.str -> ' str: if isinstance(dtype, pandas.CategoricalDtype): return ArrowCTypes.INT64 elif dtype == np.dtype(""O""): return ArrowCTypes.STRING format_str = getattr(ArrowCTypes, dtype.name.upper(), None) if format_str is not None: return format_str if is_datetime64_dtype(dtype): # Selecting the first char of resolution string: # dtype.str -> ' bool: if not (hasattr(obj, ""read"") or hasattr(obj, ""write"")): return False return bool(hasattr(obj, ""__iter__"")) "," Check if the object is a file-like object. For objects to be considered file-like, they must be an iterator AND have either a `read` and/or `write` method as an attribute. Note: file-like objects must be iterable, but iterable objects need not be file-like. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` has file-like properties. Examples -------- >>> import io >>> buffer = io.StringIO(""data"") >>> is_file_like(buffer) True >>> is_file_like([1, 2, 3]) False ",76,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_file_like(obj) -> bool: if not (hasattr(obj, ""read"") or hasattr(obj, ""write"")): return False return bool(hasattr(obj, ""__iter__"")) ``` ###Assistant : Check if the object is a file-like object. For objects to be considered file-like, they must be an iterator AND have either a `read` and/or `write` method as an attribute. Note: file-like objects must be iterable, but iterable objects need not be file-like. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` has file-like properties. Examples -------- >>> import io >>> buffer = io.StringIO(""data"") >>> is_file_like(buffer) True >>> is_file_like([1, 2, 3]) False " 2383,"def update(self) -> Union[SourceRead, DestinationRead, ConnectionRead]: return self._create_or_update(self._update_fn, self.update_payload) ","Public function to update the resource on the remote Airbyte instance. Returns: Union[SourceRead, DestinationRead, ConnectionRead]: The updated resource. ",18,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update(self) -> Union[SourceRead, DestinationRead, ConnectionRead]: return self._create_or_update(self._update_fn, self.update_payload) ``` ###Assistant : Public function to update the resource on the remote Airbyte instance. Returns: Union[SourceRead, DestinationRead, ConnectionRead]: The updated resource. " 2384,"def test_callback_session(self) -> None: request = Mock(spec=[""args"", ""getCookie"", ""cookies""]) # Missing cookie request.args = {} request.getCookie.return_value = None self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError(""missing_session"", ""No session cookie found"") # Missing session parameter request.args = {} request.getCookie.return_value = ""session"" self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError(""invalid_request"", ""State parameter is missing"") # Invalid cookie request.args = {} request.args[b""state""] = [b""state""] request.getCookie.return_value = ""session"" self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError(""invalid_session"") # Mismatching session session = self._generate_oidc_session_token( state=""state"", nonce=""nonce"", client_redirect_url=""http://client/redirect"", ) request.args = {} request.args[b""state""] = [b""mismatching state""] request.getCookie.return_value = session self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError(""mismatching_session"") # Valid session request.args = {} request.args[b""state""] = [b""state""] request.getCookie.return_value = session self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError(""invalid_request"") ",The callback verifies the session presence and validity,8,90,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_callback_session(self) -> None: request = Mock(spec=[""args"", ""getCookie"", ""cookies""]) # Missing cookie request.args = {} request.getCookie.return_value = None self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError(""missing_session"", ""No session cookie found"") # Missing session parameter request.args = {} request.getCookie.return_value = ""session"" self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError(""invalid_request"", ""State parameter is missing"") # Invalid cookie request.args = {} request.args[b""state""] = [b""state""] request.getCookie.return_value = ""session"" self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError(""invalid_session"") # Mismatching session session = self._generate_oidc_session_token( state=""state"", nonce=""nonce"", client_redirect_url=""http://client/redirect"", ) request.args = {} request.args[b""state""] = [b""mismatching state""] request.getCookie.return_value = session self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError(""mismatching_session"") # Valid session request.args = {} request.args[b""state""] = [b""state""] request.getCookie.return_value = session self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError(""invalid_request"") ``` ###Assistant : The callback verifies the session presence and validity " 2385,"def _create_repo_url(self) -> str: url_components = urllib.parse.urlparse(self.repository_url) if url_components.scheme == ""https"" and self.credentials is not None: repo_url = url_components.netloc + url_components.path updated_components = url_components._replace( netloc=f""{self.credentials.get_secret_value()}@{url_components.netloc}"" ) full_url = urllib.parse.urlunparse(updated_components) else: full_url = self.repository_url return full_url ","Format the URL provided to the `git clone` command. For private repos: https://@github.com//.git All other repos should be the same as `self.repository`. ",22,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_repo_url(self) -> str: url_components = urllib.parse.urlparse(self.repository_url) if url_components.scheme == ""https"" and self.credentials is not None: repo_url = url_components.netloc + url_components.path updated_components = url_components._replace( netloc=f""{self.credentials.get_secret_value()}@{url_components.netloc}"" ) full_url = urllib.parse.urlunparse(updated_components) else: full_url = self.repository_url return full_url ``` ###Assistant : Format the URL provided to the `git clone` command. For private repos: https://@github.com//.git All other repos should be the same as `self.repository`. " 2386,"def test_lookup_with_dynamic_value(self): modeladmin = DepartmentFilterDynamicValueBookAdmin(Book, site) "," Ensure SimpleListFilter can access self.value() inside the lookup. ",8,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_lookup_with_dynamic_value(self): modeladmin = DepartmentFilterDynamicValueBookAdmin(Book, site) ``` ###Assistant : Ensure SimpleListFilter can access self.value() inside the lookup. " 2387,"def test_batch_encode_dynamic_overflowing(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) with self.subTest(f""{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})""): if is_torch_available(): returned_tensor = ""pt"" elif is_tf_available(): returned_tensor = ""tf"" elif is_flax_available(): returned_tensor = ""jax"" else: return if not tokenizer.pad_token or tokenizer.pad_token_id < 0: return tokens = tokenizer.encode_plus( ""HuggingFace is solving NLP one commit at a time"", max_length=6, padding=True, truncation=True, return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: ""overflow_to_sample_mapping"" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) # Mono sample tokens = tokenizer.batch_encode_plus( [""HuggingFace is solving NLP one commit at a time""], max_length=6, padding=True, truncation=""only_first"", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: ""overflow_to_sample_mapping"" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6) # Multi sample tokens = tokenizer.batch_encode_plus( [""HuggingFace is solving NLP one commit at a time"", ""Very tiny input""], max_length=6, padding=True, truncation=""only_first"", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: ""overflow_to_sample_mapping"" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6) "," When calling batch_encode with multiple sequence it can returns different number of overflowing encoding for each sequence: [ Sequence 1: [Encoding 1, Encoding 2], Sequence 2: [Encoding 1], Sequence 3: [Encoding 1, Encoding 2, ... Encoding N] ] This needs to be padded so that it can represented as a tensor ",51,144,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_batch_encode_dynamic_overflowing(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) with self.subTest(f""{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})""): if is_torch_available(): returned_tensor = ""pt"" elif is_tf_available(): returned_tensor = ""tf"" elif is_flax_available(): returned_tensor = ""jax"" else: return if not tokenizer.pad_token or tokenizer.pad_token_id < 0: return tokens = tokenizer.encode_plus( ""HuggingFace is solving NLP one commit at a time"", max_length=6, padding=True, truncation=True, return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: ""overflow_to_sample_mapping"" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) # Mono sample tokens = tokenizer.batch_encode_plus( [""HuggingFace is solving NLP one commit at a time""], max_length=6, padding=True, truncation=""only_first"", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: ""overflow_to_sample_mapping"" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6) # Multi sample tokens = tokenizer.batch_encode_plus( [""HuggingFace is solving NLP one commit at a time"", ""Very tiny input""], max_length=6, padding=True, truncation=""only_first"", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: ""overflow_to_sample_mapping"" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6) ``` ###Assistant : When calling batch_encode with multiple sequence it can returns different number of overflowing encoding for each sequence: [ Sequence 1: [Encoding 1, Encoding 2], Sequence 2: [Encoding 1], Sequence 3: [Encoding 1, Encoding 2, ... Encoding N] ] This needs to be padded so that it can represented as a tensor " 2388,"def deserialize(config, custom_objects=None): # loss_scale_optimizer has a direct dependency of optimizer, import here # rather than top to avoid the cyclic dependency. from keras.mixed_precision import ( loss_scale_optimizer, ) # pylint: disable=g-import-not-at-top all_classes = { ""adadelta"": adadelta_v2.Adadelta, ""adagrad"": adagrad_v2.Adagrad, ""adam"": adam_v2.Adam, ""adamax"": adamax_v2.Adamax, ""experimentaladadelta"": adadelta_experimental.Adadelta, ""experimentaladagrad"": adagrad_experimental.Adagrad, ""experimentaladam"": adam_experimental.Adam, ""experimentalsgd"": sgd_experimental.SGD, ""nadam"": nadam_v2.Nadam, ""rmsprop"": rmsprop_v2.RMSprop, ""sgd"": gradient_descent_v2.SGD, ""ftrl"": ftrl.Ftrl, ""lossscaleoptimizer"": loss_scale_optimizer.LossScaleOptimizer, ""lossscaleoptimizerv3"": loss_scale_optimizer.LossScaleOptimizerV3, # LossScaleOptimizerV1 was an old version of LSO that was removed. # Deserializing it turns it into a LossScaleOptimizer ""lossscaleoptimizerv1"": loss_scale_optimizer.LossScaleOptimizer, } # Make deserialization case-insensitive for built-in optimizers. if config[""class_name""].lower() in all_classes: config[""class_name""] = config[""class_name""].lower() return deserialize_keras_object( config, module_objects=all_classes, custom_objects=custom_objects, printable_module_name=""optimizer"", ) @keras_export(""keras.optimizers.get"")","Inverse of the `serialize` function. Args: config: Optimizer configuration dictionary. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras Optimizer instance. ",32,106,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def deserialize(config, custom_objects=None): # loss_scale_optimizer has a direct dependency of optimizer, import here # rather than top to avoid the cyclic dependency. from keras.mixed_precision import ( loss_scale_optimizer, ) # pylint: disable=g-import-not-at-top all_classes = { ""adadelta"": adadelta_v2.Adadelta, ""adagrad"": adagrad_v2.Adagrad, ""adam"": adam_v2.Adam, ""adamax"": adamax_v2.Adamax, ""experimentaladadelta"": adadelta_experimental.Adadelta, ""experimentaladagrad"": adagrad_experimental.Adagrad, ""experimentaladam"": adam_experimental.Adam, ""experimentalsgd"": sgd_experimental.SGD, ""nadam"": nadam_v2.Nadam, ""rmsprop"": rmsprop_v2.RMSprop, ""sgd"": gradient_descent_v2.SGD, ""ftrl"": ftrl.Ftrl, ""lossscaleoptimizer"": loss_scale_optimizer.LossScaleOptimizer, ""lossscaleoptimizerv3"": loss_scale_optimizer.LossScaleOptimizerV3, # LossScaleOptimizerV1 was an old version of LSO that was removed. # Deserializing it turns it into a LossScaleOptimizer ""lossscaleoptimizerv1"": loss_scale_optimizer.LossScaleOptimizer, } # Make deserialization case-insensitive for built-in optimizers. if config[""class_name""].lower() in all_classes: config[""class_name""] = config[""class_name""].lower() return deserialize_keras_object( config, module_objects=all_classes, custom_objects=custom_objects, printable_module_name=""optimizer"", ) @keras_export(""keras.optimizers.get"") ``` ###Assistant : Inverse of the `serialize` function. Args: config: Optimizer configuration dictionary. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras Optimizer instance. " 2389,"def commutes_with(self, other): a = self.array_form b = other.array_form return _af_commutes_with(a, b) "," Checks if the elements are commuting. Examples ======== >>> from sympy.combinatorics import Permutation >>> a = Permutation([1, 4, 3, 0, 2, 5]) >>> b = Permutation([0, 1, 2, 3, 4, 5]) >>> a.commutes_with(b) True >>> b = Permutation([2, 3, 5, 4, 1, 0]) >>> a.commutes_with(b) False ",46,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def commutes_with(self, other): a = self.array_form b = other.array_form return _af_commutes_with(a, b) ``` ###Assistant : Checks if the elements are commuting. Examples ======== >>> from sympy.combinatorics import Permutation >>> a = Permutation([1, 4, 3, 0, 2, 5]) >>> b = Permutation([0, 1, 2, 3, 4, 5]) >>> a.commutes_with(b) True >>> b = Permutation([2, 3, 5, 4, 1, 0]) >>> a.commutes_with(b) False " 2390,"def get_all_styles(): yield from STYLE_MAP for name, _ in find_plugin_styles(): yield name ","Return a generator for all styles by name, both builtin and plugin.",12,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_all_styles(): yield from STYLE_MAP for name, _ in find_plugin_styles(): yield name ``` ###Assistant : Return a generator for all styles by name, both builtin and plugin. " 2391,"def lift(cooccurrence): diag_rows, diag_cols = _get_row_and_column_matrix(cooccurrence.diagonal()) with np.errstate(invalid=""ignore"", divide=""ignore""): result = cooccurrence / (diag_rows * diag_cols) return np.array(result) ","Helper method to calculate the Lift of a matrix of co-occurrences. In comparison with basic co-occurrence and Jaccard similarity, lift favours discoverability and serendipity, as opposed to co-occurrence that favours the most popular items, and Jaccard that is a compromise between the two. Args: cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences of items. Returns: numpy.ndarray: The matrix of Lifts between any two items. ",63,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def lift(cooccurrence): diag_rows, diag_cols = _get_row_and_column_matrix(cooccurrence.diagonal()) with np.errstate(invalid=""ignore"", divide=""ignore""): result = cooccurrence / (diag_rows * diag_cols) return np.array(result) ``` ###Assistant : Helper method to calculate the Lift of a matrix of co-occurrences. In comparison with basic co-occurrence and Jaccard similarity, lift favours discoverability and serendipity, as opposed to co-occurrence that favours the most popular items, and Jaccard that is a compromise between the two. Args: cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences of items. Returns: numpy.ndarray: The matrix of Lifts between any two items. " 2392,"def update(self, props): return self._update_props( props, ""{cls.__name__!r} object has no property {prop_name!r}"") "," Update this artist's properties from the dict *props*. Parameters ---------- props : dict ",13,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update(self, props): return self._update_props( props, ""{cls.__name__!r} object has no property {prop_name!r}"") ``` ###Assistant : Update this artist's properties from the dict *props*. Parameters ---------- props : dict " 2393,"def new_workers_size(self): remote_resources = ray.available_resources() max_remote_workers = self._max_workers new_remote_workers = min(remote_resources.get(""CPU"", 0), max_remote_workers) if self._use_gpu: new_remote_workers = min(remote_resources.get(""GPU"", 0), new_remote_workers) return new_remote_workers ",Returns number of workers to create based on available resources.,10,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def new_workers_size(self): remote_resources = ray.available_resources() max_remote_workers = self._max_workers new_remote_workers = min(remote_resources.get(""CPU"", 0), max_remote_workers) if self._use_gpu: new_remote_workers = min(remote_resources.get(""GPU"", 0), new_remote_workers) return new_remote_workers ``` ###Assistant : Returns number of workers to create based on available resources. " 2394,"def setmonitor(self, enable=True): # type: (bool) -> bool # We must reset the monitor cache if enable: res = self.setmode('monitor') else: res = self.setmode('managed') if not res: log_runtime.error(""Npcap WlanHelper returned with an error code !"") self.cache_mode = None tmp = self.cache_mode = self.ismonitor() return tmp if enable else (not tmp) ","Alias for setmode('monitor') or setmode('managed') Only available with Npcap",9,50,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def setmonitor(self, enable=True): # type: (bool) -> bool # We must reset the monitor cache if enable: res = self.setmode('monitor') else: res = self.setmode('managed') if not res: log_runtime.error(""Npcap WlanHelper returned with an error code !"") self.cache_mode = None tmp = self.cache_mode = self.ismonitor() return tmp if enable else (not tmp) ``` ###Assistant : Alias for setmode('monitor') or setmode('managed') Only available with Npcap " 2395,"def _rotated_rect_with_max_area(h, w, angle): angle = math.radians(angle) width_is_longer = w >= h side_long, side_short = (w, h) if width_is_longer else (h, w) # since the solutions for angle, -angle and 180-angle are all the same, # it is sufficient to look at the first quadrant and the absolute values of sin,cos: sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle)) if side_short <= 2.0 * sin_a * cos_a * side_long or abs(sin_a - cos_a) < 1e-10: # half constrained case: two crop corners touch the longer side, # the other two corners are on the mid-line parallel to the longer line x = 0.5 * side_short wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a) else: # fully constrained case: crop touches all 4 sides cos_2a = cos_a * cos_a - sin_a * sin_a wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a return dict( x_min=max(0, int(w / 2 - wr / 2)), x_max=min(w, int(w / 2 + wr / 2)), y_min=max(0, int(h / 2 - hr / 2)), y_max=min(h, int(h / 2 + hr / 2)), ) "," Given a rectangle of size wxh that has been rotated by 'angle' (in degrees), computes the width and height of the largest possible axis-aligned rectangle (maximal area) within the rotated rectangle. Code from: https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders ",34,195,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _rotated_rect_with_max_area(h, w, angle): angle = math.radians(angle) width_is_longer = w >= h side_long, side_short = (w, h) if width_is_longer else (h, w) # since the solutions for angle, -angle and 180-angle are all the same, # it is sufficient to look at the first quadrant and the absolute values of sin,cos: sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle)) if side_short <= 2.0 * sin_a * cos_a * side_long or abs(sin_a - cos_a) < 1e-10: # half constrained case: two crop corners touch the longer side, # the other two corners are on the mid-line parallel to the longer line x = 0.5 * side_short wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a) else: # fully constrained case: crop touches all 4 sides cos_2a = cos_a * cos_a - sin_a * sin_a wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a return dict( x_min=max(0, int(w / 2 - wr / 2)), x_max=min(w, int(w / 2 + wr / 2)), y_min=max(0, int(h / 2 - hr / 2)), y_max=min(h, int(h / 2 + hr / 2)), ) ``` ###Assistant : Given a rectangle of size wxh that has been rotated by 'angle' (in degrees), computes the width and height of the largest possible axis-aligned rectangle (maximal area) within the rotated rectangle. Code from: https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders " 2396,"def using(_other, **kwargs): gt_kwargs = {} if 'state' in kwargs: s = kwargs.pop('state') if isinstance(s, (list, tuple)): gt_kwargs['stack'] = s else: gt_kwargs['stack'] = ('root', s) if _other is this:"," Callback that processes the match with a different lexer. The keyword arguments are forwarded to the lexer, except `state` which is handled separately. `state` specifies the state that the new lexer will start in, and can be an enumerable such as ('root', 'inline', 'string') or a simple string which is assumed to be on top of the root state. Note: For that to work, `_other` must not be an `ExtendedRegexLexer`. ",70,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def using(_other, **kwargs): gt_kwargs = {} if 'state' in kwargs: s = kwargs.pop('state') if isinstance(s, (list, tuple)): gt_kwargs['stack'] = s else: gt_kwargs['stack'] = ('root', s) if _other is this: ``` ###Assistant : Callback that processes the match with a different lexer. The keyword arguments are forwarded to the lexer, except `state` which is handled separately. `state` specifies the state that the new lexer will start in, and can be an enumerable such as ('root', 'inline', 'string') or a simple string which is assumed to be on top of the root state. Note: For that to work, `_other` must not be an `ExtendedRegexLexer`. " 2397,"def test_unset_document_storage_path(self): self.assertEqual(Document.objects.filter(storage_path=None).count(), 5) bulk_edit.set_storage_path( [self.doc1.id], self.sp1.id, ) self.assertEqual(Document.objects.filter(storage_path=None).count(), 4) bulk_edit.set_storage_path( [self.doc1.id], None, ) self.assertEqual(Document.objects.filter(storage_path=None).count(), 5) self.async_task.assert_called() args, kwargs = self.async_task.call_args self.assertCountEqual(kwargs[""document_ids""], [self.doc1.id]) "," GIVEN: - 4 documents without defined storage path - 1 document with a defined storage WHEN: - Bulk edit called to remove storage path from 1 document THEN: - Single document storage path removed ",34,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_unset_document_storage_path(self): self.assertEqual(Document.objects.filter(storage_path=None).count(), 5) bulk_edit.set_storage_path( [self.doc1.id], self.sp1.id, ) self.assertEqual(Document.objects.filter(storage_path=None).count(), 4) bulk_edit.set_storage_path( [self.doc1.id], None, ) self.assertEqual(Document.objects.filter(storage_path=None).count(), 5) self.async_task.assert_called() args, kwargs = self.async_task.call_args self.assertCountEqual(kwargs[""document_ids""], [self.doc1.id]) ``` ###Assistant : GIVEN: - 4 documents without defined storage path - 1 document with a defined storage WHEN: - Bulk edit called to remove storage path from 1 document THEN: - Single document storage path removed " 2398,"def test_with_spinner(self): function_with_spinner() self.assertFalse(self.forward_msg_queue.is_empty()) ","If the show_spinner flag is set, there should be one element in the report queue. ",15,4,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_with_spinner(self): function_with_spinner() self.assertFalse(self.forward_msg_queue.is_empty()) ``` ###Assistant : If the show_spinner flag is set, there should be one element in the report queue. " 2399,"def test_update_device_too_long_display_name(self) -> None: # Set iniital display name. update = {""display_name"": ""new display""} self.get_success( self.handler.update_device( self.other_user, self.other_user_device_id, update ) ) # Request to update a device display name with a new value that is longer than allowed. update = { ""display_name"": ""a"" * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1) } channel = self.make_request( ""PUT"", self.url, access_token=self.admin_user_tok, content=update, ) self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.TOO_LARGE, channel.json_body[""errcode""]) # Ensure the display name was not updated. channel = self.make_request( ""GET"", self.url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(""new display"", channel.json_body[""display_name""]) "," Update a device with a display name that is invalid (too long). ",12,82,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_update_device_too_long_display_name(self) -> None: # Set iniital display name. update = {""display_name"": ""new display""} self.get_success( self.handler.update_device( self.other_user, self.other_user_device_id, update ) ) # Request to update a device display name with a new value that is longer than allowed. update = { ""display_name"": ""a"" * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1) } channel = self.make_request( ""PUT"", self.url, access_token=self.admin_user_tok, content=update, ) self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.TOO_LARGE, channel.json_body[""errcode""]) # Ensure the display name was not updated. channel = self.make_request( ""GET"", self.url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(""new display"", channel.json_body[""display_name""]) ``` ###Assistant : Update a device with a display name that is invalid (too long). " 2400,"def xatom(self, name, *args): name = name.upper() #if not name in self.capabilities: # Let the server decide! # raise self.error('unknown extension command: %s' % name) if not name in Commands: Commands[name] = (self.state,) return self._simple_command(name, *args) # Private methods ","Allow simple extension commands notified by server in CAPABILITY response. Assumes command is legal in current state. (typ, [data]) = .xatom(name, arg, ...) Returns response appropriate to extension command `name'. ",30,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def xatom(self, name, *args): name = name.upper() #if not name in self.capabilities: # Let the server decide! # raise self.error('unknown extension command: %s' % name) if not name in Commands: Commands[name] = (self.state,) return self._simple_command(name, *args) # Private methods ``` ###Assistant : Allow simple extension commands notified by server in CAPABILITY response. Assumes command is legal in current state. (typ, [data]) = .xatom(name, arg, ...) Returns response appropriate to extension command `name'. " 2401,"def forward(self, outputs, targets): outputs = outputs.clip(self.epsilon, 1 - self.epsilon) log_loss = targets * dp_log(outputs) + ((targets * -1) + 1) * dp_log((outputs * -1) + 1) log_loss = log_loss.sum(axis=1) * -1 return log_loss.mean() ","Forward pass. .. math:: L = -t \\log(p) - (1 - t) \\log(1 - p) Parameters ---------- outputs : numpy.array Predictions in (0, 1), such as sigmoidal output of a neural network. targets : numpy.array Targets in [0, 1], such as ground truth labels. ",44,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def forward(self, outputs, targets): outputs = outputs.clip(self.epsilon, 1 - self.epsilon) log_loss = targets * dp_log(outputs) + ((targets * -1) + 1) * dp_log((outputs * -1) + 1) log_loss = log_loss.sum(axis=1) * -1 return log_loss.mean() ``` ###Assistant : Forward pass. .. math:: L = -t \\log(p) - (1 - t) \\log(1 - p) Parameters ---------- outputs : numpy.array Predictions in (0, 1), such as sigmoidal output of a neural network. targets : numpy.array Targets in [0, 1], such as ground truth labels. " 2402,"def numeric_assortativity_coefficient(G, attribute, nodes=None): if nodes is None: nodes = G.nodes vals = {G.nodes[n][attribute] for n in nodes} mapping = {d: i for i, d, in enumerate(vals)} M = attribute_mixing_matrix(G, attribute, nodes, mapping) return _numeric_ac(M, mapping) ","Compute assortativity for numerical node attributes. Assortativity measures the similarity of connections in the graph with respect to the given numeric attribute. Parameters ---------- G : NetworkX graph attribute : string Node attribute key. nodes: list or iterable (optional) Compute numeric assortativity only for attributes of nodes in container. The default is all nodes. Returns ------- r: float Assortativity of graph for given attribute Examples -------- >>> G = nx.Graph() >>> G.add_nodes_from([0, 1], size=2) >>> G.add_nodes_from([2, 3], size=3) >>> G.add_edges_from([(0, 1), (2, 3)]) >>> print(nx.numeric_assortativity_coefficient(G, ""size"")) 1.0 Notes ----- This computes Eq. (21) in Ref. [1]_ , which is the Pearson correlation coefficient of the specified (scalar valued) attribute across edges. References ---------- .. [1] M. E. J. Newman, Mixing patterns in networks Physical Review E, 67 026126, 2003 ",129,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def numeric_assortativity_coefficient(G, attribute, nodes=None): if nodes is None: nodes = G.nodes vals = {G.nodes[n][attribute] for n in nodes} mapping = {d: i for i, d, in enumerate(vals)} M = attribute_mixing_matrix(G, attribute, nodes, mapping) return _numeric_ac(M, mapping) ``` ###Assistant : Compute assortativity for numerical node attributes. Assortativity measures the similarity of connections in the graph with respect to the given numeric attribute. Parameters ---------- G : NetworkX graph attribute : string Node attribute key. nodes: list or iterable (optional) Compute numeric assortativity only for attributes of nodes in container. The default is all nodes. Returns ------- r: float Assortativity of graph for given attribute Examples -------- >>> G = nx.Graph() >>> G.add_nodes_from([0, 1], size=2) >>> G.add_nodes_from([2, 3], size=3) >>> G.add_edges_from([(0, 1), (2, 3)]) >>> print(nx.numeric_assortativity_coefficient(G, ""size"")) 1.0 Notes ----- This computes Eq. (21) in Ref. [1]_ , which is the Pearson correlation coefficient of the specified (scalar valued) attribute across edges. References ---------- .. [1] M. E. J. Newman, Mixing patterns in networks Physical Review E, 67 026126, 2003 " 2403,"def write_filepath(filepath, strategy): dirpath = os.path.dirname(filepath) base = os.path.basename(filepath) return os.path.join(write_dirpath(dirpath, strategy), base) ","Returns the writing file path to be used to save file distributedly. Directory to contain `filepath` would be created if it doesn't exist. Args: filepath: Original filepath that would be used without distribution. strategy: The tf.distribute strategy object currently used. Returns: The writing filepath that should be used to save file with distribution. ",53,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def write_filepath(filepath, strategy): dirpath = os.path.dirname(filepath) base = os.path.basename(filepath) return os.path.join(write_dirpath(dirpath, strategy), base) ``` ###Assistant : Returns the writing file path to be used to save file distributedly. Directory to contain `filepath` would be created if it doesn't exist. Args: filepath: Original filepath that would be used without distribution. strategy: The tf.distribute strategy object currently used. Returns: The writing filepath that should be used to save file with distribution. " 2404,"def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) CONVNEXT_START_DOCSTRING = r CONVNEXT_INPUTS_DOCSTRING = r @add_start_docstrings( ""The bare ConvNext model outputting raw features without any specific head on top."", CONVNEXT_START_DOCSTRING, )"," Method used for serving the model. Args: inputs (`Dict[str, tf.Tensor]`): The input of the saved model as a dictionary of tensors. This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. Parameters: config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. Args: pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`ConvNextFeatureExtractor`]. See [`ConvNextFeatureExtractor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. ",298,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) CONVNEXT_START_DOCSTRING = r CONVNEXT_INPUTS_DOCSTRING = r @add_start_docstrings( ""The bare ConvNext model outputting raw features without any specific head on top."", CONVNEXT_START_DOCSTRING, ) ``` ###Assistant : Method used for serving the model. Args: inputs (`Dict[str, tf.Tensor]`): The input of the saved model as a dictionary of tensors. This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. Parameters: config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. Args: pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`ConvNextFeatureExtractor`]. See [`ConvNextFeatureExtractor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. " 2405,"def getclasstree(classes, unique=False): children = {} roots = [] for c in classes: if c.__bases__: for parent in c.__bases__: if parent not in children: children[parent] = [] if c not in children[parent]: children[parent].append(c) if unique and parent in classes: break elif c not in roots: roots.append(c) for parent in children: if parent not in classes: roots.append(parent) return walktree(roots, children, None) # ------------------------------------------------ argument list extraction Arguments = namedtuple('Arguments', 'args, varargs, varkw') ","Arrange the given list of classes into a hierarchy of nested lists. Where a nested list appears, it contains classes derived from the class whose entry immediately precedes the list. Each entry is a 2-tuple containing a class and a tuple of its base classes. If the 'unique' argument is true, exactly one entry appears in the returned structure for each class in the given list. Otherwise, classes using multiple inheritance and their descendants will appear multiple times.",78,71,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def getclasstree(classes, unique=False): children = {} roots = [] for c in classes: if c.__bases__: for parent in c.__bases__: if parent not in children: children[parent] = [] if c not in children[parent]: children[parent].append(c) if unique and parent in classes: break elif c not in roots: roots.append(c) for parent in children: if parent not in classes: roots.append(parent) return walktree(roots, children, None) # ------------------------------------------------ argument list extraction Arguments = namedtuple('Arguments', 'args, varargs, varkw') ``` ###Assistant : Arrange the given list of classes into a hierarchy of nested lists. Where a nested list appears, it contains classes derived from the class whose entry immediately precedes the list. Each entry is a 2-tuple containing a class and a tuple of its base classes. If the 'unique' argument is true, exactly one entry appears in the returned structure for each class in the given list. Otherwise, classes using multiple inheritance and their descendants will appear multiple times. " 2406,"def get_revision(cls, location): # type: (str) -> str raise NotImplementedError "," Return the current commit id of the files at the given location. ",12,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_revision(cls, location): # type: (str) -> str raise NotImplementedError ``` ###Assistant : Return the current commit id of the files at the given location. " 2407,"def get_current_settings() -> Settings: from prefect.context import ProfileContext profile = ProfileContext.get() if profile is not None: return profile.settings return get_settings_from_env() "," Returns a settings object populated with values from the current profile or, if no profile is active, the environment. ",19,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_current_settings() -> Settings: from prefect.context import ProfileContext profile = ProfileContext.get() if profile is not None: return profile.settings return get_settings_from_env() ``` ###Assistant : Returns a settings object populated with values from the current profile or, if no profile is active, the environment. " 2408,"def test_compare_key_greater(self): self.not_at_dest_sync_strategy.determine_should_sync.return_value = False # Try when the sync strategy says to sync the file. self.not_at_src_sync_strategy.determine_should_sync.return_value = True src_files = [] dest_files = [] ref_list = [] result_list = [] time = datetime.datetime.now() src_file = FileStat(src='', dest='', compare_key='domparator_test.py', size=10, last_update=time, src_type='local', dest_type='s3', operation_name='upload') dest_file = FileStat(src='', dest='', compare_key='comparator_test.py', size=10, last_update=time, src_type='s3', dest_type='local', operation_name='') src_files.append(src_file) dest_files.append(dest_file) ref_list.append(dest_file) files = self.comparator.call(iter(src_files), iter(dest_files)) for filename in files: result_list.append(filename) self.assertEqual(result_list, ref_list) # Now try when the sync strategy says not to sync the file. self.not_at_src_sync_strategy.determine_should_sync.return_value = False result_list = [] ref_list = [] files = self.comparator.call(iter(src_files), iter(dest_files)) for filename in files: result_list.append(filename) self.assertEqual(result_list, ref_list) "," Confirm the appropriate action is taken when the soruce compare key is greater than the destination compare key. ",18,101,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_compare_key_greater(self): self.not_at_dest_sync_strategy.determine_should_sync.return_value = False # Try when the sync strategy says to sync the file. self.not_at_src_sync_strategy.determine_should_sync.return_value = True src_files = [] dest_files = [] ref_list = [] result_list = [] time = datetime.datetime.now() src_file = FileStat(src='', dest='', compare_key='domparator_test.py', size=10, last_update=time, src_type='local', dest_type='s3', operation_name='upload') dest_file = FileStat(src='', dest='', compare_key='comparator_test.py', size=10, last_update=time, src_type='s3', dest_type='local', operation_name='') src_files.append(src_file) dest_files.append(dest_file) ref_list.append(dest_file) files = self.comparator.call(iter(src_files), iter(dest_files)) for filename in files: result_list.append(filename) self.assertEqual(result_list, ref_list) # Now try when the sync strategy says not to sync the file. self.not_at_src_sync_strategy.determine_should_sync.return_value = False result_list = [] ref_list = [] files = self.comparator.call(iter(src_files), iter(dest_files)) for filename in files: result_list.append(filename) self.assertEqual(result_list, ref_list) ``` ###Assistant : Confirm the appropriate action is taken when the soruce compare key is greater than the destination compare key. " 2409,"def test_custom_exact_lookup_none_rhs(self): field = Author._meta.get_field(""birthdate"") OldExactLookup = field.get_lookup(""exact"") author = Author.objects.create(name=""author"", birthdate=None) try: field.register_lookup(Exactly, ""exact"") self.assertEqual(Author.objects.get(birthdate__exact=None), author) finally: field.register_lookup(OldExactLookup, ""exact"") "," __exact=None is transformed to __isnull=True if a custom lookup class with lookup_name != 'exact' is registered as the `exact` lookup. ",20,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_custom_exact_lookup_none_rhs(self): field = Author._meta.get_field(""birthdate"") OldExactLookup = field.get_lookup(""exact"") author = Author.objects.create(name=""author"", birthdate=None) try: field.register_lookup(Exactly, ""exact"") self.assertEqual(Author.objects.get(birthdate__exact=None), author) finally: field.register_lookup(OldExactLookup, ""exact"") ``` ###Assistant : __exact=None is transformed to __isnull=True if a custom lookup class with lookup_name != 'exact' is registered as the `exact` lookup. " 2410,"def batch_pairwise_distances(U, V): with tf.variable_scope('pairwise_dist_block'): # Squared norms of each row in U and V. norm_u = tf.reduce_sum(tf.square(U), 1) norm_v = tf.reduce_sum(tf.square(V), 1) # norm_u as a row and norm_v as a column vectors. norm_u = tf.reshape(norm_u, [-1, 1]) norm_v = tf.reshape(norm_v, [1, -1]) # Pairwise squared Euclidean distances. D = tf.maximum(norm_u - 2*tf.matmul(U, V, False, True) + norm_v, 0.0) return D #---------------------------------------------------------------------------- ", Compute pairwise distances between two batches of feature vectors.,9,63,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def batch_pairwise_distances(U, V): with tf.variable_scope('pairwise_dist_block'): # Squared norms of each row in U and V. norm_u = tf.reduce_sum(tf.square(U), 1) norm_v = tf.reduce_sum(tf.square(V), 1) # norm_u as a row and norm_v as a column vectors. norm_u = tf.reshape(norm_u, [-1, 1]) norm_v = tf.reshape(norm_v, [1, -1]) # Pairwise squared Euclidean distances. D = tf.maximum(norm_u - 2*tf.matmul(U, V, False, True) + norm_v, 0.0) return D #---------------------------------------------------------------------------- ``` ###Assistant : Compute pairwise distances between two batches of feature vectors. " 2411,"def expand_dims(self, image): self._ensure_format_supported(image) # Do nothing if PIL image if isinstance(image, PIL.Image.Image): return image if is_torch_tensor(image): image = image.unsqueeze(0) else: image = np.expand_dims(image, axis=0) return image "," Expands 2-dimensional `image` to 3 dimensions. Args: image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): The image to expand. ",17,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def expand_dims(self, image): self._ensure_format_supported(image) # Do nothing if PIL image if isinstance(image, PIL.Image.Image): return image if is_torch_tensor(image): image = image.unsqueeze(0) else: image = np.expand_dims(image, axis=0) return image ``` ###Assistant : Expands 2-dimensional `image` to 3 dimensions. Args: image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): The image to expand. " 2412,"def test_find_executable_task_instances_order_execution_date(self, dag_maker): dag_id_1 = 'SchedulerJobTest.test_find_executable_task_instances_order_execution_date-a' dag_id_2 = 'SchedulerJobTest.test_find_executable_task_instances_order_execution_date-b' task_id = 'task-a' session = settings.Session() with dag_maker(dag_id=dag_id_1, max_active_tasks=16, session=session): EmptyOperator(task_id=task_id) dr1 = dag_maker.create_dagrun(execution_date=DEFAULT_DATE + timedelta(hours=1)) with dag_maker(dag_id=dag_id_2, max_active_tasks=16, session=session): EmptyOperator(task_id=task_id) dr2 = dag_maker.create_dagrun() dr1 = session.merge(dr1, load=False) self.scheduler_job = SchedulerJob(subdir=os.devnull) tis = dr1.task_instances + dr2.task_instances for ti in tis: ti.state = State.SCHEDULED session.merge(ti) session.flush() res = self.scheduler_job._executable_task_instances_to_queued(max_tis=1, session=session) session.flush() assert [ti.key for ti in res] == [tis[1].key] session.rollback() "," Test that task instances follow execution_date order priority. If two dagruns with different execution dates are scheduled, tasks with earliest dagrun execution date will first be executed ",27,68,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_find_executable_task_instances_order_execution_date(self, dag_maker): dag_id_1 = 'SchedulerJobTest.test_find_executable_task_instances_order_execution_date-a' dag_id_2 = 'SchedulerJobTest.test_find_executable_task_instances_order_execution_date-b' task_id = 'task-a' session = settings.Session() with dag_maker(dag_id=dag_id_1, max_active_tasks=16, session=session): EmptyOperator(task_id=task_id) dr1 = dag_maker.create_dagrun(execution_date=DEFAULT_DATE + timedelta(hours=1)) with dag_maker(dag_id=dag_id_2, max_active_tasks=16, session=session): EmptyOperator(task_id=task_id) dr2 = dag_maker.create_dagrun() dr1 = session.merge(dr1, load=False) self.scheduler_job = SchedulerJob(subdir=os.devnull) tis = dr1.task_instances + dr2.task_instances for ti in tis: ti.state = State.SCHEDULED session.merge(ti) session.flush() res = self.scheduler_job._executable_task_instances_to_queued(max_tis=1, session=session) session.flush() assert [ti.key for ti in res] == [tis[1].key] session.rollback() ``` ###Assistant : Test that task instances follow execution_date order priority. If two dagruns with different execution dates are scheduled, tasks with earliest dagrun execution date will first be executed " 2413,"def dispatch(self, request, *args, **kwargs): self.determine_active_organization(request, kwargs.get(""organization_slug"", None)) if self.csrf_protect: if hasattr(self.dispatch.__func__, ""csrf_exempt""): delattr(self.dispatch.__func__, ""csrf_exempt"") response = self.test_csrf(request) if response: return response if self.is_auth_required(request, *args, **kwargs): return self.handle_auth_required(request, *args, **kwargs) if self.is_sudo_required(request, *args, **kwargs): return self.handle_sudo_required(request, *args, **kwargs) args, kwargs = self.convert_args(request, *args, **kwargs) request.access = self.get_access(request, *args, **kwargs) if not self.has_permission(request, *args, **kwargs): return self.handle_permission_required(request, *args, **kwargs) if ""organization"" in kwargs: org = kwargs[""organization""] if self.is_member_disabled_from_limit(request, org): return self.handle_disabled_member(org) if self.is_not_2fa_compliant(request, org): return self.handle_not_2fa_compliant(request, *args, **kwargs) self.request = request self.default_context = self.get_context_data(request, *args, **kwargs) return self.handle(request, *args, **kwargs) "," A note on the CSRF protection process. Because the CSRF decorators don't work well with view subclasses, we allow them to control whether a CSRF check is done by setting self.csrf_protect. This has a couple of implications: 1. We need to mark this method as @csrf_exempt so that when the CSRF middleware checks it as part of the regular middleware sequence, it always passes. 2. If self.csrf_protect is set, we will re-run the CSRF check ourselves using CsrfViewMiddleware().process_view() 3. But first we must remove the csrf_exempt attribute that was set by the decorator so that the middleware doesn't shortcut and pass the check unconditionally again. ",105,89,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dispatch(self, request, *args, **kwargs): self.determine_active_organization(request, kwargs.get(""organization_slug"", None)) if self.csrf_protect: if hasattr(self.dispatch.__func__, ""csrf_exempt""): delattr(self.dispatch.__func__, ""csrf_exempt"") response = self.test_csrf(request) if response: return response if self.is_auth_required(request, *args, **kwargs): return self.handle_auth_required(request, *args, **kwargs) if self.is_sudo_required(request, *args, **kwargs): return self.handle_sudo_required(request, *args, **kwargs) args, kwargs = self.convert_args(request, *args, **kwargs) request.access = self.get_access(request, *args, **kwargs) if not self.has_permission(request, *args, **kwargs): return self.handle_permission_required(request, *args, **kwargs) if ""organization"" in kwargs: org = kwargs[""organization""] if self.is_member_disabled_from_limit(request, org): return self.handle_disabled_member(org) if self.is_not_2fa_compliant(request, org): return self.handle_not_2fa_compliant(request, *args, **kwargs) self.request = request self.default_context = self.get_context_data(request, *args, **kwargs) return self.handle(request, *args, **kwargs) ``` ###Assistant : A note on the CSRF protection process. Because the CSRF decorators don't work well with view subclasses, we allow them to control whether a CSRF check is done by setting self.csrf_protect. This has a couple of implications: 1. We need to mark this method as @csrf_exempt so that when the CSRF middleware checks it as part of the regular middleware sequence, it always passes. 2. If self.csrf_protect is set, we will re-run the CSRF check ourselves using CsrfViewMiddleware().process_view() 3. But first we must remove the csrf_exempt attribute that was set by the decorator so that the middleware doesn't shortcut and pass the check unconditionally again. " 2414,"def interpolator(self) -> int: assert self._interpolator is not None return self._interpolator ", int: The cv2 interpolator required to transpose the mask to a full frame. ,13,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def interpolator(self) -> int: assert self._interpolator is not None return self._interpolator ``` ###Assistant : int: The cv2 interpolator required to transpose the mask to a full frame. " 2415,"def test_cluster_interrupt(start_connected_cluster, tmpdir): cluster = start_connected_cluster dirpath = str(tmpdir) # Needs to be in scope for pytest","Tests run_experiment on cluster shutdown with actual interrupt. This is an end-to-end test. ",13,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_cluster_interrupt(start_connected_cluster, tmpdir): cluster = start_connected_cluster dirpath = str(tmpdir) # Needs to be in scope for pytest ``` ###Assistant : Tests run_experiment on cluster shutdown with actual interrupt. This is an end-to-end test. " 2416,"def test_version(self) -> None: for version in (""1.0"", 1.0, 1): result = self.parse_response({""version"": version, ""type"": ""link""}) # An empty Open Graph response is an error, ensure the URL is included. self.assertIn(""og:url"", result.open_graph_result) # A missing version should be treated as 1.0. result = self.parse_response({""type"": ""link""}) self.assertIn(""og:url"", result.open_graph_result) # Invalid versions should be rejected. for version in (""2.0"", ""1"", 1.1, 0, None, {}, []): result = self.parse_response({""version"": version, ""type"": ""link""}) # An empty Open Graph response is an error, ensure the URL is included. self.assertEqual({}, result.open_graph_result) ",Accept versions that are similar to 1.0 as a string or int (or missing).,14,85,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_version(self) -> None: for version in (""1.0"", 1.0, 1): result = self.parse_response({""version"": version, ""type"": ""link""}) # An empty Open Graph response is an error, ensure the URL is included. self.assertIn(""og:url"", result.open_graph_result) # A missing version should be treated as 1.0. result = self.parse_response({""type"": ""link""}) self.assertIn(""og:url"", result.open_graph_result) # Invalid versions should be rejected. for version in (""2.0"", ""1"", 1.1, 0, None, {}, []): result = self.parse_response({""version"": version, ""type"": ""link""}) # An empty Open Graph response is an error, ensure the URL is included. self.assertEqual({}, result.open_graph_result) ``` ###Assistant : Accept versions that are similar to 1.0 as a string or int (or missing). " 2417,"def test_page_allowing_subpages(self): response = self.client.get( reverse(""wagtailadmin_userbar_frontend"", args=(self.event_index.id,)) ) # page allows subpages, so the 'add page' button should show expected_url = reverse( ""wagtailadmin_pages:add_subpage"", args=(self.event_index.id,) ) needle = f self.assertTagInHTML(needle, str(response.content)) "," Add a child page ",18,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_page_allowing_subpages(self): response = self.client.get( reverse(""wagtailadmin_userbar_frontend"", args=(self.event_index.id,)) ) # page allows subpages, so the 'add page' button should show expected_url = reverse( ""wagtailadmin_pages:add_subpage"", args=(self.event_index.id,) ) needle = f self.assertTagInHTML(needle, str(response.content)) ``` ###Assistant : Add a child page " 2418,"def render_output_ui(self, streamlit_app, input) -> None: # type: ignore src, result = self.__root__ streamlit_app.subheader(""Synthesized Audio"") streamlit_app.audio(result.content, format=""audio/wav"") fig, ax = plt.subplots() ax.imshow(src.mel, aspect=""equal"", interpolation=""none"") ax.set_title(""mel spectrogram(Source Audio)"") streamlit_app.pyplot(fig) fig, ax = plt.subplots() ax.imshow(result.mel, aspect=""equal"", interpolation=""none"") ax.set_title(""mel spectrogram(Result Audio)"") streamlit_app.pyplot(fig) ","Custom output UI. If this method is implmeneted, it will be used instead of the default Output UI renderer. ",19,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def render_output_ui(self, streamlit_app, input) -> None: # type: ignore src, result = self.__root__ streamlit_app.subheader(""Synthesized Audio"") streamlit_app.audio(result.content, format=""audio/wav"") fig, ax = plt.subplots() ax.imshow(src.mel, aspect=""equal"", interpolation=""none"") ax.set_title(""mel spectrogram(Source Audio)"") streamlit_app.pyplot(fig) fig, ax = plt.subplots() ax.imshow(result.mel, aspect=""equal"", interpolation=""none"") ax.set_title(""mel spectrogram(Result Audio)"") streamlit_app.pyplot(fig) ``` ###Assistant : Custom output UI. If this method is implmeneted, it will be used instead of the default Output UI renderer. " 2419,"def get_default_mesh(self): return self._default_mesh LayoutMap.get.__doc__ = LayoutMap.__getitem__.__doc__ @keras_export(""keras.dtensor.experimental.layout_map_scope"", v1=[]) @contextlib.contextmanager","Return the default `Mesh` set at instance creation. The `Mesh` can be used to create default replicated `Layout` when there isn't a match of the input string query. ",28,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_default_mesh(self): return self._default_mesh LayoutMap.get.__doc__ = LayoutMap.__getitem__.__doc__ @keras_export(""keras.dtensor.experimental.layout_map_scope"", v1=[]) @contextlib.contextmanager ``` ###Assistant : Return the default `Mesh` set at instance creation. The `Mesh` can be used to create default replicated `Layout` when there isn't a match of the input string query. " 2420,"def __setitem__(self, key, item): # pragma: no cover raise NotImplementedError(""Implemented by subclasses"") "," Assign `item` value to dataset located by `key`. Parameters ---------- key : callable or tuple The global row numbers to assign data to. item : modin.pandas.DataFrame, modin.pandas.Series or scalar Value that should be assigned to located dataset. See Also -------- pandas.DataFrame.iloc ",41,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __setitem__(self, key, item): # pragma: no cover raise NotImplementedError(""Implemented by subclasses"") ``` ###Assistant : Assign `item` value to dataset located by `key`. Parameters ---------- key : callable or tuple The global row numbers to assign data to. item : modin.pandas.DataFrame, modin.pandas.Series or scalar Value that should be assigned to located dataset. See Also -------- pandas.DataFrame.iloc " 2421,"def test_bad_origin_cannot_be_parsed(self): req = self._get_POST_request_with_token() req.META[""HTTP_HOST""] = ""www.example.com"" req.META[""HTTP_ORIGIN""] = ""https://["" mw = CsrfViewMiddleware(post_form_view) self._check_referer_rejects(mw, req) self.assertIs(mw._origin_verified(req), False) with self.assertLogs(""django.security.csrf"", ""WARNING"") as cm: response = mw.process_view(req, post_form_view, (), {}) self.assertEqual(response.status_code, 403) msg = REASON_BAD_ORIGIN % req.META[""HTTP_ORIGIN""] self.assertEqual(cm.records[0].getMessage(), ""Forbidden (%s): "" % msg) "," A POST request with an origin that can't be parsed by urlparse() is rejected. ",14,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_bad_origin_cannot_be_parsed(self): req = self._get_POST_request_with_token() req.META[""HTTP_HOST""] = ""www.example.com"" req.META[""HTTP_ORIGIN""] = ""https://["" mw = CsrfViewMiddleware(post_form_view) self._check_referer_rejects(mw, req) self.assertIs(mw._origin_verified(req), False) with self.assertLogs(""django.security.csrf"", ""WARNING"") as cm: response = mw.process_view(req, post_form_view, (), {}) self.assertEqual(response.status_code, 403) msg = REASON_BAD_ORIGIN % req.META[""HTTP_ORIGIN""] self.assertEqual(cm.records[0].getMessage(), ""Forbidden (%s): "" % msg) ``` ###Assistant : A POST request with an origin that can't be parsed by urlparse() is rejected. " 2422,"def move_from_center(coord, centers, deltas, axmask=(True, True, True)): return _move_from_center(coord, centers, deltas, axmask=axmask) "," For each coordinate where *axmask* is True, move *coord* away from *centers* by *deltas*. ",14,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def move_from_center(coord, centers, deltas, axmask=(True, True, True)): return _move_from_center(coord, centers, deltas, axmask=axmask) ``` ###Assistant : For each coordinate where *axmask* is True, move *coord* away from *centers* by *deltas*. " 2423,"def get_execution_info(self, job_id, function_descriptor): function_id = function_descriptor.function_id # If the function has already been loaded, # There's no need to load again if function_id in self._function_execution_info: return self._function_execution_info[function_id] if self._worker.load_code_from_local: # Load function from local code. if not function_descriptor.is_actor_method(): # If the function is not able to be loaded, # try to load it from GCS, # even if load_code_from_local is set True if self._load_function_from_local(function_descriptor) is True: return self._function_execution_info[function_id] # Load function from GCS. # Wait until the function to be executed has actually been # registered on this worker. We will push warnings to the user if # we spend too long in this loop. # The driver function may not be found in sys.path. Try to load # the function from GCS. with profiling.profile(""wait_for_function""): self._wait_for_function(function_descriptor, job_id) try: function_id = function_descriptor.function_id info = self._function_execution_info[function_id] except KeyError as e: message = ( ""Error occurs in get_execution_info: "" ""job_id: %s, function_descriptor: %s. Message: %s"" % (job_id, function_descriptor, e) ) raise KeyError(message) return info ","Get the FunctionExecutionInfo of a remote function. Args: job_id: ID of the job that the function belongs to. function_descriptor: The FunctionDescriptor of the function to get. Returns: A FunctionExecutionInfo object. ",30,162,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_execution_info(self, job_id, function_descriptor): function_id = function_descriptor.function_id # If the function has already been loaded, # There's no need to load again if function_id in self._function_execution_info: return self._function_execution_info[function_id] if self._worker.load_code_from_local: # Load function from local code. if not function_descriptor.is_actor_method(): # If the function is not able to be loaded, # try to load it from GCS, # even if load_code_from_local is set True if self._load_function_from_local(function_descriptor) is True: return self._function_execution_info[function_id] # Load function from GCS. # Wait until the function to be executed has actually been # registered on this worker. We will push warnings to the user if # we spend too long in this loop. # The driver function may not be found in sys.path. Try to load # the function from GCS. with profiling.profile(""wait_for_function""): self._wait_for_function(function_descriptor, job_id) try: function_id = function_descriptor.function_id info = self._function_execution_info[function_id] except KeyError as e: message = ( ""Error occurs in get_execution_info: "" ""job_id: %s, function_descriptor: %s. Message: %s"" % (job_id, function_descriptor, e) ) raise KeyError(message) return info ``` ###Assistant : Get the FunctionExecutionInfo of a remote function. Args: job_id: ID of the job that the function belongs to. function_descriptor: The FunctionDescriptor of the function to get. Returns: A FunctionExecutionInfo object. " 2424,"def exclude_all_devices(self) -> bool: return all(idx in _EXCLUDE_DEVICES for idx in range(self._device_count)) ", bool: ``True`` if all GPU devices have been explicitly disabled otherwise ``False`` ,12,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def exclude_all_devices(self) -> bool: return all(idx in _EXCLUDE_DEVICES for idx in range(self._device_count)) ``` ###Assistant : bool: ``True`` if all GPU devices have been explicitly disabled otherwise ``False`` " 2425,"def serialize_labels(self, resources): labels = [] for label in resources: if label in AlexaGlobalCatalog.__dict__.values(): label = {""@type"": ""asset"", ""value"": {""assetId"": label}} else: label = {""@type"": ""text"", ""value"": {""text"": label, ""locale"": ""en-US""}} labels.append(label) return {""friendlyNames"": labels} ",Return resource label objects for friendlyNames serialized for an API response.,11,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def serialize_labels(self, resources): labels = [] for label in resources: if label in AlexaGlobalCatalog.__dict__.values(): label = {""@type"": ""asset"", ""value"": {""assetId"": label}} else: label = {""@type"": ""text"", ""value"": {""text"": label, ""locale"": ""en-US""}} labels.append(label) return {""friendlyNames"": labels} ``` ###Assistant : Return resource label objects for friendlyNames serialized for an API response. " 2426,"def _decode_bitonal(self): data = bytearray() total_bytes = self.state.xsize * self.state.ysize comment_spans = False while len(data) != total_bytes: block = self._read_block() # read next block if not block: # eof break while block and comment_spans: comment_end = self._find_comment_end(block) if comment_end != -1: # comment ends in this block block = block[comment_end + 1 :] # delete tail of previous comment break else: # comment spans whole block block = self._read_block() block, comment_spans = self._ignore_comments(block) tokens = b"""".join(block.split()) for token in tokens: if token not in (48, 49): raise ValueError(f""Invalid token for this mode: {bytes([token])}"") data = (data + tokens)[:total_bytes] invert = bytes.maketrans(b""01"", b""\xFF\x00"") return data.translate(invert) "," This is a separate method because in the plain PBM format, all data tokens are exactly one byte, so the inter-token whitespace is optional. ",24,104,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _decode_bitonal(self): data = bytearray() total_bytes = self.state.xsize * self.state.ysize comment_spans = False while len(data) != total_bytes: block = self._read_block() # read next block if not block: # eof break while block and comment_spans: comment_end = self._find_comment_end(block) if comment_end != -1: # comment ends in this block block = block[comment_end + 1 :] # delete tail of previous comment break else: # comment spans whole block block = self._read_block() block, comment_spans = self._ignore_comments(block) tokens = b"""".join(block.split()) for token in tokens: if token not in (48, 49): raise ValueError(f""Invalid token for this mode: {bytes([token])}"") data = (data + tokens)[:total_bytes] invert = bytes.maketrans(b""01"", b""\xFF\x00"") return data.translate(invert) ``` ###Assistant : This is a separate method because in the plain PBM format, all data tokens are exactly one byte, so the inter-token whitespace is optional. " 2427,"def recast_to_symbols(eqs, symbols): if not iterable(eqs) and iterable(symbols): raise ValueError('Both eqs and symbols must be iterable') orig = list(symbols) symbols = list(ordered(symbols)) swap_sym = {} i = 0 for j, s in enumerate(symbols): if not isinstance(s, Symbol) and s not in swap_sym: swap_sym[s] = Dummy('X%d' % i) i += 1 new_f = [] for i in eqs: isubs = getattr(i, 'subs', None) if isubs is not None: new_f.append(isubs(swap_sym)) else: new_f.append(i) restore = {v: k for k, v in swap_sym.items()} return new_f, [swap_sym.get(i, i) for i in orig], restore "," Return (e, s, d) where e and s are versions of *eqs* and *symbols* in which any non-Symbol objects in *symbols* have been replaced with generic Dummy symbols and d is a dictionary that can be used to restore the original expressions. Examples ======== >>> from sympy.solvers.solvers import recast_to_symbols >>> from sympy import symbols, Function >>> x, y = symbols('x y') >>> fx = Function('f')(x) >>> eqs, syms = [fx + 1, x, y], [fx, y] >>> e, s, d = recast_to_symbols(eqs, syms); (e, s, d) ([_X0 + 1, x, y], [_X0, y], {_X0: f(x)}) The original equations and symbols can be restored using d: >>> assert [i.xreplace(d) for i in eqs] == eqs >>> assert [d.get(i, i) for i in s] == syms ",124,88,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def recast_to_symbols(eqs, symbols): if not iterable(eqs) and iterable(symbols): raise ValueError('Both eqs and symbols must be iterable') orig = list(symbols) symbols = list(ordered(symbols)) swap_sym = {} i = 0 for j, s in enumerate(symbols): if not isinstance(s, Symbol) and s not in swap_sym: swap_sym[s] = Dummy('X%d' % i) i += 1 new_f = [] for i in eqs: isubs = getattr(i, 'subs', None) if isubs is not None: new_f.append(isubs(swap_sym)) else: new_f.append(i) restore = {v: k for k, v in swap_sym.items()} return new_f, [swap_sym.get(i, i) for i in orig], restore ``` ###Assistant : Return (e, s, d) where e and s are versions of *eqs* and *symbols* in which any non-Symbol objects in *symbols* have been replaced with generic Dummy symbols and d is a dictionary that can be used to restore the original expressions. Examples ======== >>> from sympy.solvers.solvers import recast_to_symbols >>> from sympy import symbols, Function >>> x, y = symbols('x y') >>> fx = Function('f')(x) >>> eqs, syms = [fx + 1, x, y], [fx, y] >>> e, s, d = recast_to_symbols(eqs, syms); (e, s, d) ([_X0 + 1, x, y], [_X0, y], {_X0: f(x)}) The original equations and symbols can be restored using d: >>> assert [i.xreplace(d) for i in eqs] == eqs >>> assert [d.get(i, i) for i in s] == syms " 2428,"def model_is_indexable(cls, model, allow_child_models=False): if getattr(model, ""wagtail_reference_index_ignore"", False): return False # Don't check any models that have a parental key, references from these will be collected from the parent if not allow_child_models and any( [isinstance(field, ParentalKey) for field in model._meta.get_fields()] ): return False for field in model._meta.get_fields(): if field.is_relation and field.many_to_one: if getattr(field, ""wagtail_reference_index_ignore"", False): continue if getattr( field.related_model, ""wagtail_reference_index_ignore"", False ): continue if isinstance(field, (ParentalKey, GenericRel)): continue return True if hasattr(field, ""extract_references""): return True if issubclass(model, ClusterableModel): for child_relation in get_all_child_relations(model): if cls.model_is_indexable( child_relation.related_model, allow_child_models=True, ): return True return False "," Returns True if the given model may have outbound references that we would be interested in recording in the index. Args: model (type): a Django model class allow_child_models (boolean): Child models are not indexable on their own. If you are looking at a child model from the perspective of indexing it through its parent, set this to True to disable checking for this. Default False. ",65,91,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def model_is_indexable(cls, model, allow_child_models=False): if getattr(model, ""wagtail_reference_index_ignore"", False): return False # Don't check any models that have a parental key, references from these will be collected from the parent if not allow_child_models and any( [isinstance(field, ParentalKey) for field in model._meta.get_fields()] ): return False for field in model._meta.get_fields(): if field.is_relation and field.many_to_one: if getattr(field, ""wagtail_reference_index_ignore"", False): continue if getattr( field.related_model, ""wagtail_reference_index_ignore"", False ): continue if isinstance(field, (ParentalKey, GenericRel)): continue return True if hasattr(field, ""extract_references""): return True if issubclass(model, ClusterableModel): for child_relation in get_all_child_relations(model): if cls.model_is_indexable( child_relation.related_model, allow_child_models=True, ): return True return False ``` ###Assistant : Returns True if the given model may have outbound references that we would be interested in recording in the index. Args: model (type): a Django model class allow_child_models (boolean): Child models are not indexable on their own. If you are looking at a child model from the perspective of indexing it through its parent, set this to True to disable checking for this. Default False. " 2429,"def get_encodings_from_content(content): warnings.warn( ( ""In requests 3.0, get_encodings_from_content will be removed. For "" ""more information, please see the discussion on issue #2266. (This"" "" warning should only appear once.)"" ), DeprecationWarning, ) charset_re = re.compile(r']', flags=re.I) pragma_re = re.compile(r']', flags=re.I) xml_re = re.compile(r'^<\?xml.*?encoding=[""\']*(.+?)[""\'>]') return ( charset_re.findall(content) + pragma_re.findall(content) + xml_re.findall(content) ) ","Returns encodings from given content string. :param content: bytestring to extract encodings from. ",13,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_encodings_from_content(content): warnings.warn( ( ""In requests 3.0, get_encodings_from_content will be removed. For "" ""more information, please see the discussion on issue #2266. (This"" "" warning should only appear once.)"" ), DeprecationWarning, ) charset_re = re.compile(r']', flags=re.I) pragma_re = re.compile(r']', flags=re.I) xml_re = re.compile(r'^<\?xml.*?encoding=[""\']*(.+?)[""\'>]') return ( charset_re.findall(content) + pragma_re.findall(content) + xml_re.findall(content) ) ``` ###Assistant : Returns encodings from given content string. :param content: bytestring to extract encodings from. " 2430,"def _scale_axis_limits(self, scale_x, scale_y, scale_z): # Get the axis limits and centers minx, maxx, miny, maxy, minz, maxz = self.get_w_lims() cx = (maxx + minx)/2 cy = (maxy + miny)/2 cz = (maxz + minz)/2 # Scale the data range dx = (maxx - minx)*scale_x dy = (maxy - miny)*scale_y dz = (maxz - minz)*scale_z # Set the scaled axis limits self.set_xlim3d(cx - dx/2, cx + dx/2) self.set_ylim3d(cy - dy/2, cy + dy/2) self.set_zlim3d(cz - dz/2, cz + dz/2) "," Keeping the center of the x, y, and z data axes fixed, scale their limits by scale factors. A scale factor > 1 zooms out and a scale factor < 1 zooms in. Parameters ---------- scale_x : float Scale factor for the x data axis. scale_y : float Scale factor for the y data axis. scale_z : float Scale factor for the z data axis. ",65,79,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _scale_axis_limits(self, scale_x, scale_y, scale_z): # Get the axis limits and centers minx, maxx, miny, maxy, minz, maxz = self.get_w_lims() cx = (maxx + minx)/2 cy = (maxy + miny)/2 cz = (maxz + minz)/2 # Scale the data range dx = (maxx - minx)*scale_x dy = (maxy - miny)*scale_y dz = (maxz - minz)*scale_z # Set the scaled axis limits self.set_xlim3d(cx - dx/2, cx + dx/2) self.set_ylim3d(cy - dy/2, cy + dy/2) self.set_zlim3d(cz - dz/2, cz + dz/2) ``` ###Assistant : Keeping the center of the x, y, and z data axes fixed, scale their limits by scale factors. A scale factor > 1 zooms out and a scale factor < 1 zooms in. Parameters ---------- scale_x : float Scale factor for the x data axis. scale_y : float Scale factor for the y data axis. scale_z : float Scale factor for the z data axis. " 2431,"def layers(self) -> tuple[str, ...]: for node in self.ancestors: if not isinstance(node, Widget): break if node.styles.has_rule(""layers""): return node.styles.layers return (""default"",) ","Layers of from parent. Returns: tuple[str, ...]: Tuple of layer names. ",11,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def layers(self) -> tuple[str, ...]: for node in self.ancestors: if not isinstance(node, Widget): break if node.styles.has_rule(""layers""): return node.styles.layers return (""default"",) ``` ###Assistant : Layers of from parent. Returns: tuple[str, ...]: Tuple of layer names. " 2432,"def get_roi_head_cfg(fname): config = _get_config_module(fname) model = copy.deepcopy(config.model) roi_head = model.roi_head train_cfg = None if model.train_cfg is None else model.train_cfg.rcnn test_cfg = None if model.test_cfg is None else model.test_cfg.rcnn roi_head.update(dict(train_cfg=train_cfg, test_cfg=test_cfg)) return roi_head ","Grab configs necessary to create a roi_head. These are deep copied to allow for safe modification of parameters without influencing other tests. ",22,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_roi_head_cfg(fname): config = _get_config_module(fname) model = copy.deepcopy(config.model) roi_head = model.roi_head train_cfg = None if model.train_cfg is None else model.train_cfg.rcnn test_cfg = None if model.test_cfg is None else model.test_cfg.rcnn roi_head.update(dict(train_cfg=train_cfg, test_cfg=test_cfg)) return roi_head ``` ###Assistant : Grab configs necessary to create a roi_head. These are deep copied to allow for safe modification of parameters without influencing other tests. " 2433,"def print_as_log(*args, **kwargs): from prefect.context import FlowRunContext, TaskRunContext context = TaskRunContext.get() or FlowRunContext.get() if not context or not context.log_prints: return print(*args, **kwargs) logger = get_run_logger() # Print to an in-memory buffer; so we do not need to implement `print` buffer = io.StringIO() kwargs[""file""] = buffer print(*args, **kwargs) # Remove trailing whitespace to prevent duplicates logger.info(buffer.getvalue().rstrip()) @contextmanager"," A patch for `print` to send printed messages to the Prefect run logger. If no run is active, `print` will behave as if it were not patched. ",27,56,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def print_as_log(*args, **kwargs): from prefect.context import FlowRunContext, TaskRunContext context = TaskRunContext.get() or FlowRunContext.get() if not context or not context.log_prints: return print(*args, **kwargs) logger = get_run_logger() # Print to an in-memory buffer; so we do not need to implement `print` buffer = io.StringIO() kwargs[""file""] = buffer print(*args, **kwargs) # Remove trailing whitespace to prevent duplicates logger.info(buffer.getvalue().rstrip()) @contextmanager ``` ###Assistant : A patch for `print` to send printed messages to the Prefect run logger. If no run is active, `print` will behave as if it were not patched. " 2434,"def test_image(): # Test fails for matplotlib 1.5+ because the size of the image # generated by matplotlib has changed. if Version(matplotlib.__version__) == Version(""3.4.1""): image_size = 432 else: pytest.skip(""Test fails for older matplotlib"") np.random.seed(0) # image size depends on the seed fig, ax = plt.subplots(figsize=(2, 2)) ax.imshow(np.random.random((10, 10)), cmap=plt.cm.jet, interpolation=""nearest"") _assert_output_equal( fake_renderer_output(fig, FakeRenderer), f, ) "," opening figure opening axes draw image of size {image_size} closing axes closing figure ",13,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_image(): # Test fails for matplotlib 1.5+ because the size of the image # generated by matplotlib has changed. if Version(matplotlib.__version__) == Version(""3.4.1""): image_size = 432 else: pytest.skip(""Test fails for older matplotlib"") np.random.seed(0) # image size depends on the seed fig, ax = plt.subplots(figsize=(2, 2)) ax.imshow(np.random.random((10, 10)), cmap=plt.cm.jet, interpolation=""nearest"") _assert_output_equal( fake_renderer_output(fig, FakeRenderer), f, ) ``` ###Assistant : opening figure opening axes draw image of size {image_size} closing axes closing figure " 2435,"def ignore_cidr(vm_, ip): from ipaddress import ip_address, ip_network cidrs = config.get_cloud_config_value( ""ignore_cidr"", vm_, __opts__, default=[], search_global=False ) if cidrs and isinstance(cidrs, str): cidrs = [cidrs] for cidr in cidrs or []: if ip_address(ip) in ip_network(cidr): log.warning(""IP %r found within %r; ignoring it."", ip, cidr) return True return False "," Return True if we are to ignore the specified IP. ",10,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ignore_cidr(vm_, ip): from ipaddress import ip_address, ip_network cidrs = config.get_cloud_config_value( ""ignore_cidr"", vm_, __opts__, default=[], search_global=False ) if cidrs and isinstance(cidrs, str): cidrs = [cidrs] for cidr in cidrs or []: if ip_address(ip) in ip_network(cidr): log.warning(""IP %r found within %r; ignoring it."", ip, cidr) return True return False ``` ###Assistant : Return True if we are to ignore the specified IP. " 2436,"def encode_nested_example(schema, obj): # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): return {k: encode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)} elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if encode_nested_example(sub_schema, first_elmt) != first_elmt: return [encode_nested_example(sub_schema, o) for o in obj] return list(obj) elif isinstance(schema, Sequence): # We allow to reverse list of dict => dict of list for compatiblity with tfds if isinstance(schema.feature, dict): # dict of list to fill list_dict = {} if isinstance(obj, (list, tuple)): # obj is a list of dict for k, dict_tuples in zip_dict(schema.feature, *obj): list_dict[k] = [encode_nested_example(dict_tuples[0], o) for o in dict_tuples[1:]] return list_dict else: # obj is a single dict for k, (sub_schema, sub_objs) in zip_dict(schema.feature, obj): list_dict[k] = [encode_nested_example(sub_schema, o) for o in sub_objs] return list_dict # schema.feature is not a dict if isinstance(obj, str): # don't interpret a string as a list raise ValueError(f""Got a string but expected a list instead: '{obj}'"") if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, schema.feature): break # be careful when comparing tensors here if not isinstance(first_elmt, list) or encode_nested_example(schema.feature, first_elmt) != first_elmt: return [encode_nested_example(schema.feature, o) for o in obj] return list(obj) # Object with special encoding: # ClassLabel will convert from string to int, TranslationVariableLanguages does some checks elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)): return schema.encode_example(obj) if obj is not None else None # Other object should be directly convertible to a native Arrow type (like Translation and Translation) return obj ","Encode a nested example. This is used since some features (in particular ClassLabel) have some logic during encoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded. If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same. ",71,270,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def encode_nested_example(schema, obj): # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): return {k: encode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)} elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if encode_nested_example(sub_schema, first_elmt) != first_elmt: return [encode_nested_example(sub_schema, o) for o in obj] return list(obj) elif isinstance(schema, Sequence): # We allow to reverse list of dict => dict of list for compatiblity with tfds if isinstance(schema.feature, dict): # dict of list to fill list_dict = {} if isinstance(obj, (list, tuple)): # obj is a list of dict for k, dict_tuples in zip_dict(schema.feature, *obj): list_dict[k] = [encode_nested_example(dict_tuples[0], o) for o in dict_tuples[1:]] return list_dict else: # obj is a single dict for k, (sub_schema, sub_objs) in zip_dict(schema.feature, obj): list_dict[k] = [encode_nested_example(sub_schema, o) for o in sub_objs] return list_dict # schema.feature is not a dict if isinstance(obj, str): # don't interpret a string as a list raise ValueError(f""Got a string but expected a list instead: '{obj}'"") if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, schema.feature): break # be careful when comparing tensors here if not isinstance(first_elmt, list) or encode_nested_example(schema.feature, first_elmt) != first_elmt: return [encode_nested_example(schema.feature, o) for o in obj] return list(obj) # Object with special encoding: # ClassLabel will convert from string to int, TranslationVariableLanguages does some checks elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)): return schema.encode_example(obj) if obj is not None else None # Other object should be directly convertible to a native Arrow type (like Translation and Translation) return obj ``` ###Assistant : Encode a nested example. This is used since some features (in particular ClassLabel) have some logic during encoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded. If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same. " 2437,"def _configure_matplotlib(cls): rcParams[""keymap.fullscreen""] = [k for k in rcParams[""keymap.fullscreen""] if k != ""f""] rcParams[""keymap.save""] = [k for k in rcParams[""keymap.save""] if k != ""s""] rcParams[""keymap.home""] = [k for k in rcParams[""keymap.home""] if k != ""r""] rcParams[""figure.raise_window""] = False "," Remove `F`, 'S' and 'R' from their default bindings and stop Matplotlib from stealing focus ",15,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _configure_matplotlib(cls): rcParams[""keymap.fullscreen""] = [k for k in rcParams[""keymap.fullscreen""] if k != ""f""] rcParams[""keymap.save""] = [k for k in rcParams[""keymap.save""] if k != ""s""] rcParams[""keymap.home""] = [k for k in rcParams[""keymap.home""] if k != ""r""] rcParams[""figure.raise_window""] = False ``` ###Assistant : Remove `F`, 'S' and 'R' from their default bindings and stop Matplotlib from stealing focus " 2438,"def set_permission_cache(user, key, value): from django.core.cache import cache # store this key, so we can clean it when required cache_key = get_cache_key(user, key) cache.set(cache_key, value, get_cms_setting('CACHE_DURATIONS')['permissions'], version=get_cache_permission_version()) "," Helper method for storing values in cache. Stores used keys so all of them can be cleaned when clean_permission_cache gets called. ",21,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_permission_cache(user, key, value): from django.core.cache import cache # store this key, so we can clean it when required cache_key = get_cache_key(user, key) cache.set(cache_key, value, get_cms_setting('CACHE_DURATIONS')['permissions'], version=get_cache_permission_version()) ``` ###Assistant : Helper method for storing values in cache. Stores used keys so all of them can be cleaned when clean_permission_cache gets called. " 2439,"def cookies(self) -> multidict.MultiDictView[str, tuple[str, multidict.MultiDict[str, Optional[str]]]]: return multidict.MultiDictView( self._get_cookies, self._set_cookies ) "," The response cookies. A possibly empty `MultiDictView`, where the keys are cookie name strings, and values are `(cookie value, attributes)` tuples. Within attributes, unary attributes (e.g. `HTTPOnly`) are indicated by a `None` value. Modifications to the MultiDictView update `Response.headers`, and vice versa. *Warning:* Changes to `attributes` will not be picked up unless you also reassign the `(cookie value, attributes)` tuple directly in the `MultiDictView`. ",64,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cookies(self) -> multidict.MultiDictView[str, tuple[str, multidict.MultiDict[str, Optional[str]]]]: return multidict.MultiDictView( self._get_cookies, self._set_cookies ) ``` ###Assistant : The response cookies. A possibly empty `MultiDictView`, where the keys are cookie name strings, and values are `(cookie value, attributes)` tuples. Within attributes, unary attributes (e.g. `HTTPOnly`) are indicated by a `None` value. Modifications to the MultiDictView update `Response.headers`, and vice versa. *Warning:* Changes to `attributes` will not be picked up unless you also reassign the `(cookie value, attributes)` tuple directly in the `MultiDictView`. " 2440,"def _normalize_entries(entries, separators=None): norm_files = {} for entry in entries: norm_files[normalize_file(entry.path, separators=separators)] = entry return norm_files "," Normalizes the entry paths to use the POSIX path separator. *entries* (:class:`~collections.abc.Iterable` of :class:`.TreeEntry`) contains the entries to be normalized. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. See :func:`normalize_file` for more information. Returns a :class:`dict` mapping the each normalized file path (:class:`str`) to the entry (:class:`.TreeEntry`) ",52,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _normalize_entries(entries, separators=None): norm_files = {} for entry in entries: norm_files[normalize_file(entry.path, separators=separators)] = entry return norm_files ``` ###Assistant : Normalizes the entry paths to use the POSIX path separator. *entries* (:class:`~collections.abc.Iterable` of :class:`.TreeEntry`) contains the entries to be normalized. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. See :func:`normalize_file` for more information. Returns a :class:`dict` mapping the each normalized file path (:class:`str`) to the entry (:class:`.TreeEntry`) " 2441,"def assertXMLNotEqual(self, xml1, xml2, msg=None): try: result = compare_xml(xml1, xml2) except Exception as e: standardMsg = ""First or second argument is not valid XML\n%s"" % e self.fail(self._formatMessage(msg, standardMsg)) else: if result: standardMsg = ""%s == %s"" % ( safe_repr(xml1, True), safe_repr(xml2, True), ) self.fail(self._formatMessage(msg, standardMsg)) "," Assert that two XML snippets are not semantically equivalent. Whitespace in most cases is ignored and attribute ordering is not significant. The arguments must be valid XML. ",27,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def assertXMLNotEqual(self, xml1, xml2, msg=None): try: result = compare_xml(xml1, xml2) except Exception as e: standardMsg = ""First or second argument is not valid XML\n%s"" % e self.fail(self._formatMessage(msg, standardMsg)) else: if result: standardMsg = ""%s == %s"" % ( safe_repr(xml1, True), safe_repr(xml2, True), ) self.fail(self._formatMessage(msg, standardMsg)) ``` ###Assistant : Assert that two XML snippets are not semantically equivalent. Whitespace in most cases is ignored and attribute ordering is not significant. The arguments must be valid XML. " 2442,"def token_kwargs(bits, parser, support_legacy=False): if not bits: return {} match = kwarg_re.match(bits[0]) kwarg_format = match and match[1] if not kwarg_format: if not support_legacy: return {} if len(bits) < 3 or bits[1] != ""as"": return {} kwargs = {} while bits: if kwarg_format: match = kwarg_re.match(bits[0]) if not match or not match[1]: return kwargs key, value = match.groups() del bits[:1] else: if len(bits) < 3 or bits[1] != ""as"": return kwargs key, value = bits[2], bits[0] del bits[:3] kwargs[key] = parser.compile_filter(value) if bits and not kwarg_format: if bits[0] != ""and"": return kwargs del bits[:1] return kwargs "," Parse token keyword arguments and return a dictionary of the arguments retrieved from the ``bits`` token list. `bits` is a list containing the remainder of the token (split by spaces) that is to be checked for arguments. Valid arguments are removed from this list. `support_legacy` - if True, the legacy format ``1 as foo`` is accepted. Otherwise, only the standard ``foo=1`` format is allowed. There is no requirement for all remaining token ``bits`` to be keyword arguments, so return the dictionary as soon as an invalid argument format is reached. ",90,95,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def token_kwargs(bits, parser, support_legacy=False): if not bits: return {} match = kwarg_re.match(bits[0]) kwarg_format = match and match[1] if not kwarg_format: if not support_legacy: return {} if len(bits) < 3 or bits[1] != ""as"": return {} kwargs = {} while bits: if kwarg_format: match = kwarg_re.match(bits[0]) if not match or not match[1]: return kwargs key, value = match.groups() del bits[:1] else: if len(bits) < 3 or bits[1] != ""as"": return kwargs key, value = bits[2], bits[0] del bits[:3] kwargs[key] = parser.compile_filter(value) if bits and not kwarg_format: if bits[0] != ""and"": return kwargs del bits[:1] return kwargs ``` ###Assistant : Parse token keyword arguments and return a dictionary of the arguments retrieved from the ``bits`` token list. `bits` is a list containing the remainder of the token (split by spaces) that is to be checked for arguments. Valid arguments are removed from this list. `support_legacy` - if True, the legacy format ``1 as foo`` is accepted. Otherwise, only the standard ``foo=1`` format is allowed. There is no requirement for all remaining token ``bits`` to be keyword arguments, so return the dictionary as soon as an invalid argument format is reached. " 2443,"def get_cost_of_delayed_shipments(scorecard): return get_total_cost_of_shipments(scorecard) - get_cost_of_on_time_shipments(scorecard) ",Gets the total cost of all delayed shipments in the period (based on Purchase Receipts - POs),17,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_cost_of_delayed_shipments(scorecard): return get_total_cost_of_shipments(scorecard) - get_cost_of_on_time_shipments(scorecard) ``` ###Assistant : Gets the total cost of all delayed shipments in the period (based on Purchase Receipts - POs) " 2444,"def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): # if weight is specified, apply element-wise weight if weight is not None: loss = loss * weight # if avg_factor is not specified, just reduce the loss if avg_factor is None: loss = reduce_loss(loss, reduction) else: # if reduction is mean, then average the loss by avg_factor if reduction == 'mean': # Avoid causing ZeroDivisionError when avg_factor is 0.0, # i.e., all labels of an image belong to ignore index. eps = torch.finfo(torch.float32).eps loss = loss.sum() / (avg_factor + eps) # if reduction is 'none', then do nothing, otherwise raise an error elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction=""sum""') return loss ","Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. ",38,112,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): # if weight is specified, apply element-wise weight if weight is not None: loss = loss * weight # if avg_factor is not specified, just reduce the loss if avg_factor is None: loss = reduce_loss(loss, reduction) else: # if reduction is mean, then average the loss by avg_factor if reduction == 'mean': # Avoid causing ZeroDivisionError when avg_factor is 0.0, # i.e., all labels of an image belong to ignore index. eps = torch.finfo(torch.float32).eps loss = loss.sum() / (avg_factor + eps) # if reduction is 'none', then do nothing, otherwise raise an error elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction=""sum""') return loss ``` ###Assistant : Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. " 2445,"def test_failure_to_run_iterations(): rnd = np.random.RandomState(0) X = rnd.standard_normal((100, 10)) A = X @ X.T Q = rnd.standard_normal((X.shape[0], 4)) with pytest.warns(UserWarning, match=""Exited at iteration""): eigenvalues, _ = lobpcg(A, Q, maxiter=20) assert(np.max(eigenvalues) > 0) @pytest.mark.filterwarnings(""ignore:The problem size"")","Check that the code exists gracefully without breaking. Issue #10974. ",10,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_failure_to_run_iterations(): rnd = np.random.RandomState(0) X = rnd.standard_normal((100, 10)) A = X @ X.T Q = rnd.standard_normal((X.shape[0], 4)) with pytest.warns(UserWarning, match=""Exited at iteration""): eigenvalues, _ = lobpcg(A, Q, maxiter=20) assert(np.max(eigenvalues) > 0) @pytest.mark.filterwarnings(""ignore:The problem size"") ``` ###Assistant : Check that the code exists gracefully without breaking. Issue #10974. " 2446,"def test_predictor_tableau_header(self, mock_handler): df = pd.DataFrame([ {'a': 1, 'b': 'one'}, {'a': 2, 'b': 'two'}, {'a': 1, 'b': 'three'}, ]) self.set_handler(mock_handler, name='pg', tables={'tasks': df}) # --- use predictor --- predicted_value = 5 predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical }, 'predicted_value': predicted_value } self.set_predictor(predictor) ret = self.command_executor.execute_command(parse_sql(f, dialect='mindsdb')) # second column is having last value of 'b' # 3: count rows, 4: sum of 'a', 5 max of prediction assert ret.data[0] == [3, 4, 5] "," SELECT SUM(1) AS `cnt__0B4A4E8BD11C48FFB4730D4D2C32191A_ok`, sum(`Custom SQL Query`.`a`) AS `sum_height_ok`, max(`Custom SQL Query`.`p`) AS `sum_length1_ok` FROM ( SELECT res.a, res.p FROM pg.tasks as source JOIN mindsdb.task_model as res ) `Custom SQL Query` HAVING (COUNT(1) > 0) ",35,82,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_predictor_tableau_header(self, mock_handler): df = pd.DataFrame([ {'a': 1, 'b': 'one'}, {'a': 2, 'b': 'two'}, {'a': 1, 'b': 'three'}, ]) self.set_handler(mock_handler, name='pg', tables={'tasks': df}) # --- use predictor --- predicted_value = 5 predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical }, 'predicted_value': predicted_value } self.set_predictor(predictor) ret = self.command_executor.execute_command(parse_sql(f, dialect='mindsdb')) # second column is having last value of 'b' # 3: count rows, 4: sum of 'a', 5 max of prediction assert ret.data[0] == [3, 4, 5] ``` ###Assistant : SELECT SUM(1) AS `cnt__0B4A4E8BD11C48FFB4730D4D2C32191A_ok`, sum(`Custom SQL Query`.`a`) AS `sum_height_ok`, max(`Custom SQL Query`.`p`) AS `sum_length1_ok` FROM ( SELECT res.a, res.p FROM pg.tasks as source JOIN mindsdb.task_model as res ) `Custom SQL Query` HAVING (COUNT(1) > 0) " 2447,"def test_conflicting_specified_basename_different_models(self): self.router.register(r'notes', NoteViewSet) with pytest.raises(ImproperlyConfigured): self.router.register(r'notes_basename', BasenameViewSet, basename='routertestmodel') "," Ensure 2 routers with different models, and a conflicting basename specified throws an exception ",14,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_conflicting_specified_basename_different_models(self): self.router.register(r'notes', NoteViewSet) with pytest.raises(ImproperlyConfigured): self.router.register(r'notes_basename', BasenameViewSet, basename='routertestmodel') ``` ###Assistant : Ensure 2 routers with different models, and a conflicting basename specified throws an exception " 2448,"def _get_boosted_releases(self) -> BoostedReleases: boosted_releases = BoostedReleases() for boosted_release_cache_key, timestamp in self.redis_client.hgetall( self._generate_cache_key_for_boosted_releases_hash() ).items(): extracted_data = self._extract_data_from_cache_key(boosted_release_cache_key) if extracted_data: release_id, environment = extracted_data boosted_releases.add_release( cache_key=boosted_release_cache_key, id=release_id, timestamp=float(timestamp), environment=environment, ) return boosted_releases "," Returns all the boosted releases and parses them based on key and value data. This method should not be called directly as the boosted releases are not extended, thus they contain only a subset of information. ",36,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_boosted_releases(self) -> BoostedReleases: boosted_releases = BoostedReleases() for boosted_release_cache_key, timestamp in self.redis_client.hgetall( self._generate_cache_key_for_boosted_releases_hash() ).items(): extracted_data = self._extract_data_from_cache_key(boosted_release_cache_key) if extracted_data: release_id, environment = extracted_data boosted_releases.add_release( cache_key=boosted_release_cache_key, id=release_id, timestamp=float(timestamp), environment=environment, ) return boosted_releases ``` ###Assistant : Returns all the boosted releases and parses them based on key and value data. This method should not be called directly as the boosted releases are not extended, thus they contain only a subset of information. " 2449,"def store_stats_summary(reply): store_summary = ""--- Aggregate object store stats across all nodes ---\n"" # TODO(ekl) it would be nice if we could provide a full memory usage # breakdown by type (e.g., pinned by worker, primary, etc.) store_summary += ( ""Plasma memory usage {} MiB, {} objects, {}% full, {}% "" ""needed\n"".format( int(reply.store_stats.object_store_bytes_used / (1024 * 1024)), reply.store_stats.num_local_objects, round( 100 * reply.store_stats.object_store_bytes_used / reply.store_stats.object_store_bytes_avail, 2, ), round( 100 * reply.store_stats.object_store_bytes_primary_copy / reply.store_stats.object_store_bytes_avail, 2, ), ) ) if reply.store_stats.object_store_bytes_fallback > 0: store_summary += ""Plasma filesystem mmap usage: {} MiB\n"".format( int(reply.store_stats.object_store_bytes_fallback / (1024 * 1024)) ) if reply.store_stats.spill_time_total_s > 0: store_summary += ( ""Spilled {} MiB, {} objects, avg write throughput {} MiB/s\n"".format( int(reply.store_stats.spilled_bytes_total / (1024 * 1024)), reply.store_stats.spilled_objects_total, int( reply.store_stats.spilled_bytes_total / (1024 * 1024) / reply.store_stats.spill_time_total_s ), ) ) if reply.store_stats.restore_time_total_s > 0: store_summary += ( ""Restored {} MiB, {} objects, avg read throughput {} MiB/s\n"".format( int(reply.store_stats.restored_bytes_total / (1024 * 1024)), reply.store_stats.restored_objects_total, int( reply.store_stats.restored_bytes_total / (1024 * 1024) / reply.store_stats.restore_time_total_s ), ) ) if reply.store_stats.consumed_bytes > 0: store_summary += ""Objects consumed by Ray tasks: {} MiB.\n"".format( int(reply.store_stats.consumed_bytes / (1024 * 1024)) ) if reply.store_stats.object_pulls_queued: store_summary += ""Object fetches queued, waiting for available memory."" return store_summary ",Returns formatted string describing object store stats in all nodes.,10,194,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def store_stats_summary(reply): store_summary = ""--- Aggregate object store stats across all nodes ---\n"" # TODO(ekl) it would be nice if we could provide a full memory usage # breakdown by type (e.g., pinned by worker, primary, etc.) store_summary += ( ""Plasma memory usage {} MiB, {} objects, {}% full, {}% "" ""needed\n"".format( int(reply.store_stats.object_store_bytes_used / (1024 * 1024)), reply.store_stats.num_local_objects, round( 100 * reply.store_stats.object_store_bytes_used / reply.store_stats.object_store_bytes_avail, 2, ), round( 100 * reply.store_stats.object_store_bytes_primary_copy / reply.store_stats.object_store_bytes_avail, 2, ), ) ) if reply.store_stats.object_store_bytes_fallback > 0: store_summary += ""Plasma filesystem mmap usage: {} MiB\n"".format( int(reply.store_stats.object_store_bytes_fallback / (1024 * 1024)) ) if reply.store_stats.spill_time_total_s > 0: store_summary += ( ""Spilled {} MiB, {} objects, avg write throughput {} MiB/s\n"".format( int(reply.store_stats.spilled_bytes_total / (1024 * 1024)), reply.store_stats.spilled_objects_total, int( reply.store_stats.spilled_bytes_total / (1024 * 1024) / reply.store_stats.spill_time_total_s ), ) ) if reply.store_stats.restore_time_total_s > 0: store_summary += ( ""Restored {} MiB, {} objects, avg read throughput {} MiB/s\n"".format( int(reply.store_stats.restored_bytes_total / (1024 * 1024)), reply.store_stats.restored_objects_total, int( reply.store_stats.restored_bytes_total / (1024 * 1024) / reply.store_stats.restore_time_total_s ), ) ) if reply.store_stats.consumed_bytes > 0: store_summary += ""Objects consumed by Ray tasks: {} MiB.\n"".format( int(reply.store_stats.consumed_bytes / (1024 * 1024)) ) if reply.store_stats.object_pulls_queued: store_summary += ""Object fetches queued, waiting for available memory."" return store_summary ``` ###Assistant : Returns formatted string describing object store stats in all nodes. " 2450,"def test_spam_checker_deny(self) -> None: self.get_failure(self.handler.register_user(localpart=""user""), SynapseError) ","A spam checker can deny registration, which results in an error.",11,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_spam_checker_deny(self) -> None: self.get_failure(self.handler.register_user(localpart=""user""), SynapseError) ``` ###Assistant : A spam checker can deny registration, which results in an error. " 2451,"def _get_pitch_yaw_roll(self) -> None: proj_matrix = np.zeros((3, 4), dtype=""float32"") proj_matrix[:3, :3] = cv2.Rodrigues(self._rotation)[0] euler = cv2.decomposeProjectionMatrix(proj_matrix)[-1] self._pitch_yaw_roll = cast(Tuple[float, float, float], tuple(euler.squeeze())) logger.trace(""yaw_pitch: %s"", self._pitch_yaw_roll) # type: ignore "," Obtain the yaw, roll and pitch from the :attr:`_rotation` in eular angles. ",12,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_pitch_yaw_roll(self) -> None: proj_matrix = np.zeros((3, 4), dtype=""float32"") proj_matrix[:3, :3] = cv2.Rodrigues(self._rotation)[0] euler = cv2.decomposeProjectionMatrix(proj_matrix)[-1] self._pitch_yaw_roll = cast(Tuple[float, float, float], tuple(euler.squeeze())) logger.trace(""yaw_pitch: %s"", self._pitch_yaw_roll) # type: ignore ``` ###Assistant : Obtain the yaw, roll and pitch from the :attr:`_rotation` in eular angles. " 2452,"def aiohttp_notify_servers_mock() -> Iterable[Mock]: with patch( ""homeassistant.components.dlna_dmr.data.AiohttpNotifyServer"" ) as mock_constructor: servers = [] ","Construct mock AiohttpNotifyServer on demand, eliminating network use. This fixture provides a list of the constructed servers. ",17,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def aiohttp_notify_servers_mock() -> Iterable[Mock]: with patch( ""homeassistant.components.dlna_dmr.data.AiohttpNotifyServer"" ) as mock_constructor: servers = [] ``` ###Assistant : Construct mock AiohttpNotifyServer on demand, eliminating network use. This fixture provides a list of the constructed servers. " 2453,"def line_collection_2d_to_3d(col, zs=0, zdir='z'): segments3d = _paths_to_3d_segments(col.get_paths(), zs, zdir) col.__class__ = Line3DCollection col.set_segments(segments3d) ",Convert a `.LineCollection` to a `.Line3DCollection` object.,7,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def line_collection_2d_to_3d(col, zs=0, zdir='z'): segments3d = _paths_to_3d_segments(col.get_paths(), zs, zdir) col.__class__ = Line3DCollection col.set_segments(segments3d) ``` ###Assistant : Convert a `.LineCollection` to a `.Line3DCollection` object. " 2454,"def execute(): frappe.reload_doc(""e_commerce"", ""web_template"", ""item_card_group"") blocks = frappe.db.get_all( ""Web Page Block"", filters={""web_template"": ""Item Card Group""}, fields=[""parent"", ""web_template_values"", ""name""] ) fields = generate_fields_to_edit() for block in blocks: web_template_value = json.loads(block.get('web_template_values')) for field in fields: item = web_template_value.get(field) if not item: continue if frappe.db.exists(""Website Item"", {""item_code"": item}): website_item = frappe.db.get_value(""Website Item"", {""item_code"": item}) else: website_item = make_new_website_item(item, web_template_value, field) continue if website_item: web_template_value[field] = website_item frappe.db.set_value(""Web Page Block"", block.name, ""web_template_values"", json.dumps(web_template_value)) "," Convert all Item links to Website Item link values in exisitng 'Item Card Group' Web Page Block data. ",18,69,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def execute(): frappe.reload_doc(""e_commerce"", ""web_template"", ""item_card_group"") blocks = frappe.db.get_all( ""Web Page Block"", filters={""web_template"": ""Item Card Group""}, fields=[""parent"", ""web_template_values"", ""name""] ) fields = generate_fields_to_edit() for block in blocks: web_template_value = json.loads(block.get('web_template_values')) for field in fields: item = web_template_value.get(field) if not item: continue if frappe.db.exists(""Website Item"", {""item_code"": item}): website_item = frappe.db.get_value(""Website Item"", {""item_code"": item}) else: website_item = make_new_website_item(item, web_template_value, field) continue if website_item: web_template_value[field] = website_item frappe.db.set_value(""Web Page Block"", block.name, ""web_template_values"", json.dumps(web_template_value)) ``` ###Assistant : Convert all Item links to Website Item link values in exisitng 'Item Card Group' Web Page Block data. " 2455,"def set_pickradius(self, pickradius): if not isinstance(pickradius, Number) or pickradius < 0: raise ValueError(""pick radius should be a distance"") self._pickradius = pickradius pickradius = property(get_pickradius, set_pickradius) "," Set the pick radius used for containment tests. See `.contains` for more details. Parameters ---------- pickradius : float Pick radius, in points. ",22,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_pickradius(self, pickradius): if not isinstance(pickradius, Number) or pickradius < 0: raise ValueError(""pick radius should be a distance"") self._pickradius = pickradius pickradius = property(get_pickradius, set_pickradius) ``` ###Assistant : Set the pick radius used for containment tests. See `.contains` for more details. Parameters ---------- pickradius : float Pick radius, in points. " 2456,"def _predict(self): with self._lock: self._predicted_images = [] for frame in self._input_images: self._predictor.in_queue.put(frame) idx = 0 while idx < self._sample_size: logger.debug(""Predicting face %s of %s"", idx + 1, self._sample_size) items = self._predictor.out_queue.get() if items == ""EOF"": logger.debug(""Received EOF"") break for item in items: self._predicted_images.append(item) logger.debug(""Predicted face %s of %s"", idx + 1, self._sample_size) idx += 1 logger.debug(""Predicted faces"") "," Predict from the loaded frames. With a threading lock (to prevent stacking), run the selected faces through the Faceswap model predict function and add the output to :attr:`predicted` ",28,57,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _predict(self): with self._lock: self._predicted_images = [] for frame in self._input_images: self._predictor.in_queue.put(frame) idx = 0 while idx < self._sample_size: logger.debug(""Predicting face %s of %s"", idx + 1, self._sample_size) items = self._predictor.out_queue.get() if items == ""EOF"": logger.debug(""Received EOF"") break for item in items: self._predicted_images.append(item) logger.debug(""Predicted face %s of %s"", idx + 1, self._sample_size) idx += 1 logger.debug(""Predicted faces"") ``` ###Assistant : Predict from the loaded frames. With a threading lock (to prevent stacking), run the selected faces through the Faceswap model predict function and add the output to :attr:`predicted` " 2457,"def exclude(f): J, new = f.rep.exclude() gens = [gen for j, gen in enumerate(f.gens) if j not in J] return f.per(new, gens=gens) "," Remove unnecessary generators from ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import a, b, c, d, x >>> Poly(a + x, a, b, c, d, x).exclude() Poly(a + x, a, x, domain='ZZ') ",36,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def exclude(f): J, new = f.rep.exclude() gens = [gen for j, gen in enumerate(f.gens) if j not in J] return f.per(new, gens=gens) ``` ###Assistant : Remove unnecessary generators from ``f``. Examples ======== >>> from sympy import Poly >>> from sympy.abc import a, b, c, d, x >>> Poly(a + x, a, b, c, d, x).exclude() Poly(a + x, a, x, domain='ZZ') " 2458,"def set_ticks(self, ticks, labels=None, *, minor=False, **kwargs): result = self._set_tick_locations(ticks, minor=minor) if labels is not None: self.set_ticklabels(labels, minor=minor, **kwargs) return result "," Set this Axis' tick locations and optionally labels. If necessary, the view limits of the Axis are expanded so that all given ticks are visible. Parameters ---------- ticks : list of floats List of tick locations. The axis `.Locator` is replaced by a `~.ticker.FixedLocator`. Some tick formatters will not label arbitrary tick positions; e.g. log formatters only label decade ticks by default. In such a case you can set a formatter explicitly on the axis using `.Axis.set_major_formatter` or provide formatted *labels* yourself. labels : list of str, optional List of tick labels. If not set, the labels are generated with the axis tick `.Formatter`. minor : bool, default: False If ``False``, set the major ticks; if ``True``, the minor ticks. **kwargs `.Text` properties for the labels. These take effect only if you pass *labels*. In other cases, please use `~.Axes.tick_params`. Notes ----- The mandatory expansion of the view limits is an intentional design choice to prevent the surprise of a non-visible tick. If you need other limits, you should set the limits explicitly after setting the ticks. ",177,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_ticks(self, ticks, labels=None, *, minor=False, **kwargs): result = self._set_tick_locations(ticks, minor=minor) if labels is not None: self.set_ticklabels(labels, minor=minor, **kwargs) return result ``` ###Assistant : Set this Axis' tick locations and optionally labels. If necessary, the view limits of the Axis are expanded so that all given ticks are visible. Parameters ---------- ticks : list of floats List of tick locations. The axis `.Locator` is replaced by a `~.ticker.FixedLocator`. Some tick formatters will not label arbitrary tick positions; e.g. log formatters only label decade ticks by default. In such a case you can set a formatter explicitly on the axis using `.Axis.set_major_formatter` or provide formatted *labels* yourself. labels : list of str, optional List of tick labels. If not set, the labels are generated with the axis tick `.Formatter`. minor : bool, default: False If ``False``, set the major ticks; if ``True``, the minor ticks. **kwargs `.Text` properties for the labels. These take effect only if you pass *labels*. In other cases, please use `~.Axes.tick_params`. Notes ----- The mandatory expansion of the view limits is an intentional design choice to prevent the surprise of a non-visible tick. If you need other limits, you should set the limits explicitly after setting the ticks. " 2459,"def get_all_customers(date_range, company, field, limit=None): if field == ""outstanding_amount"": filters = [[""docstatus"", ""="", ""1""], [""company"", ""="", company]] if date_range: date_range = frappe.parse_json(date_range) filters.append([""posting_date"", "">="", ""between"", [date_range[0], date_range[1]]]) return frappe.db.get_all( ""Sales Invoice"", fields=[""customer as name"", ""sum(outstanding_amount) as value""], filters=filters, group_by=""customer"", order_by=""value desc"", limit=limit, ) else: if field == ""total_sales_amount"": select_field = ""sum(so_item.base_net_amount)"" elif field == ""total_qty_sold"": select_field = ""sum(so_item.stock_qty)"" date_condition = get_date_condition(date_range, ""so.transaction_date"") return frappe.db.sql( .format( select_field, date_condition ), (company, cint(limit)), as_dict=1, ) @frappe.whitelist()"," select so.customer as name, {0} as value FROM `tabSales Order` as so JOIN `tabSales Order Item` as so_item ON so.name = so_item.parent where so.docstatus = 1 {1} and so.company = %s group by so.customer order by value DESC limit %s ",40,73,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_all_customers(date_range, company, field, limit=None): if field == ""outstanding_amount"": filters = [[""docstatus"", ""="", ""1""], [""company"", ""="", company]] if date_range: date_range = frappe.parse_json(date_range) filters.append([""posting_date"", "">="", ""between"", [date_range[0], date_range[1]]]) return frappe.db.get_all( ""Sales Invoice"", fields=[""customer as name"", ""sum(outstanding_amount) as value""], filters=filters, group_by=""customer"", order_by=""value desc"", limit=limit, ) else: if field == ""total_sales_amount"": select_field = ""sum(so_item.base_net_amount)"" elif field == ""total_qty_sold"": select_field = ""sum(so_item.stock_qty)"" date_condition = get_date_condition(date_range, ""so.transaction_date"") return frappe.db.sql( .format( select_field, date_condition ), (company, cint(limit)), as_dict=1, ) @frappe.whitelist() ``` ###Assistant : select so.customer as name, {0} as value FROM `tabSales Order` as so JOIN `tabSales Order Item` as so_item ON so.name = so_item.parent where so.docstatus = 1 {1} and so.company = %s group by so.customer order by value DESC limit %s " 2460,"async def _async_create_radio_entity(self) -> FlowResult: assert self._title is not None assert self._radio_type is not None assert self._device_path is not None assert self._device_settings is not None device_settings = self._device_settings.copy() device_settings[CONF_DEVICE_PATH] = await self.hass.async_add_executor_job( usb.get_serial_by_id, self._device_path ) return self.async_create_entry( title=self._title, data={ CONF_DEVICE: device_settings, CONF_RADIO_TYPE: self._radio_type.name, }, ) ",Create a config entity with the current flow state.,9,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def _async_create_radio_entity(self) -> FlowResult: assert self._title is not None assert self._radio_type is not None assert self._device_path is not None assert self._device_settings is not None device_settings = self._device_settings.copy() device_settings[CONF_DEVICE_PATH] = await self.hass.async_add_executor_job( usb.get_serial_by_id, self._device_path ) return self.async_create_entry( title=self._title, data={ CONF_DEVICE: device_settings, CONF_RADIO_TYPE: self._radio_type.name, }, ) ``` ###Assistant : Create a config entity with the current flow state. " 2461,"def publish_daemon(self, publish_payload, *args, **kwargs): context = zmq.Context(1) ioloop = salt.ext.tornado.ioloop.IOLoop() ioloop.make_current() # Set up the context"," Bind to the interface specified in the configuration file ",9,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def publish_daemon(self, publish_payload, *args, **kwargs): context = zmq.Context(1) ioloop = salt.ext.tornado.ioloop.IOLoop() ioloop.make_current() # Set up the context ``` ###Assistant : Bind to the interface specified in the configuration file " 2462,"def _print_Pow(self, expr, rational=False): PREC = precedence(expr) if expr.exp is S.Half and not rational: return ""sqrt(%s)"" % self._print(expr.base) if expr.is_commutative: if -expr.exp is S.Half and not rational: # Note: Don't test ""expr.exp == -S.Half"" here, because that will # match -0.5, which we don't want. return ""%s/sqrt(%s)"" % tuple(map(lambda arg: self._print(arg), (S.One, expr.base))) if expr.exp is -S.One: # Similarly to the S.Half case, don't test with ""=="" here. return '%s/%s' % (self._print(S.One), self.parenthesize(expr.base, PREC, strict=False)) e = self.parenthesize(expr.exp, PREC, strict=False) if self.printmethod == '_sympyrepr' and expr.exp.is_Rational and expr.exp.q != 1: # the parenthesized exp should be '(Rational(a, b))' so strip parens, # but just check to be sure. if e.startswith('(Rational'): return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False), e[1:-1]) return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False), e) ","Printing helper function for ``Pow`` Parameters ========== rational : bool, optional If ``True``, it will not attempt printing ``sqrt(x)`` or ``x**S.Half`` as ``sqrt``, and will use ``x**(1/2)`` instead. See examples for additional details Examples ======== >>> from sympy import sqrt, StrPrinter >>> from sympy.abc import x How ``rational`` keyword works with ``sqrt``: >>> printer = StrPrinter() >>> printer._print_Pow(sqrt(x), rational=True) 'x**(1/2)' >>> printer._print_Pow(sqrt(x), rational=False) 'sqrt(x)' >>> printer._print_Pow(1/sqrt(x), rational=True) 'x**(-1/2)' >>> printer._print_Pow(1/sqrt(x), rational=False) '1/sqrt(x)' Notes ===== ``sqrt(x)`` is canonicalized as ``Pow(x, S.Half)`` in SymPy, so there is no need of defining a separate printer for ``sqrt``. Instead, it should be handled here as well. ",102,124,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _print_Pow(self, expr, rational=False): PREC = precedence(expr) if expr.exp is S.Half and not rational: return ""sqrt(%s)"" % self._print(expr.base) if expr.is_commutative: if -expr.exp is S.Half and not rational: # Note: Don't test ""expr.exp == -S.Half"" here, because that will # match -0.5, which we don't want. return ""%s/sqrt(%s)"" % tuple(map(lambda arg: self._print(arg), (S.One, expr.base))) if expr.exp is -S.One: # Similarly to the S.Half case, don't test with ""=="" here. return '%s/%s' % (self._print(S.One), self.parenthesize(expr.base, PREC, strict=False)) e = self.parenthesize(expr.exp, PREC, strict=False) if self.printmethod == '_sympyrepr' and expr.exp.is_Rational and expr.exp.q != 1: # the parenthesized exp should be '(Rational(a, b))' so strip parens, # but just check to be sure. if e.startswith('(Rational'): return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False), e[1:-1]) return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False), e) ``` ###Assistant : Printing helper function for ``Pow`` Parameters ========== rational : bool, optional If ``True``, it will not attempt printing ``sqrt(x)`` or ``x**S.Half`` as ``sqrt``, and will use ``x**(1/2)`` instead. See examples for additional details Examples ======== >>> from sympy import sqrt, StrPrinter >>> from sympy.abc import x How ``rational`` keyword works with ``sqrt``: >>> printer = StrPrinter() >>> printer._print_Pow(sqrt(x), rational=True) 'x**(1/2)' >>> printer._print_Pow(sqrt(x), rational=False) 'sqrt(x)' >>> printer._print_Pow(1/sqrt(x), rational=True) 'x**(-1/2)' >>> printer._print_Pow(1/sqrt(x), rational=False) '1/sqrt(x)' Notes ===== ``sqrt(x)`` is canonicalized as ``Pow(x, S.Half)`` in SymPy, so there is no need of defining a separate printer for ``sqrt``. Instead, it should be handled here as well. " 2463,"def _save_tab(self, tab, active, minimal=False): data: _JsonType = {'history': []} if active: data['active'] = True if minimal: history = [tab.history.current_item()] else: history = tab.history for idx, item in enumerate(history): qtutils.ensure_valid(item) item_data = self._save_tab_item(tab, idx, item) if item.url().scheme() == 'qute' and item.url().host() == 'back': # don't add qute://back to the session file if item_data.get('active', False) and data['history']: # mark entry before qute://back as active data['history'][-1]['active'] = True else: data['history'].append(item_data) return data ","Get a dict with data for a single tab. Args: tab: The WebView to save. active: Whether the tab is currently active. ",22,70,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _save_tab(self, tab, active, minimal=False): data: _JsonType = {'history': []} if active: data['active'] = True if minimal: history = [tab.history.current_item()] else: history = tab.history for idx, item in enumerate(history): qtutils.ensure_valid(item) item_data = self._save_tab_item(tab, idx, item) if item.url().scheme() == 'qute' and item.url().host() == 'back': # don't add qute://back to the session file if item_data.get('active', False) and data['history']: # mark entry before qute://back as active data['history'][-1]['active'] = True else: data['history'].append(item_data) return data ``` ###Assistant : Get a dict with data for a single tab. Args: tab: The WebView to save. active: Whether the tab is currently active. " 2464,"def testBestCheckpoints(self): keep_checkpoints_num = 4 checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num) checkpoints = [ Checkpoint(Checkpoint.PERSISTENT, i, self.mock_result(i)) for i in range(16) ] random.shuffle(checkpoints) for checkpoint in checkpoints: checkpoint_manager.on_checkpoint(checkpoint) best_checkpoints = checkpoint_manager.best_checkpoints() self.assertEqual(len(best_checkpoints), keep_checkpoints_num) for i in range(len(best_checkpoints)): self.assertEqual(best_checkpoints[i].value, i + 12) "," Tests that the best checkpoints are tracked and ordered correctly. ",10,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def testBestCheckpoints(self): keep_checkpoints_num = 4 checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num) checkpoints = [ Checkpoint(Checkpoint.PERSISTENT, i, self.mock_result(i)) for i in range(16) ] random.shuffle(checkpoints) for checkpoint in checkpoints: checkpoint_manager.on_checkpoint(checkpoint) best_checkpoints = checkpoint_manager.best_checkpoints() self.assertEqual(len(best_checkpoints), keep_checkpoints_num) for i in range(len(best_checkpoints)): self.assertEqual(best_checkpoints[i].value, i + 12) ``` ###Assistant : Tests that the best checkpoints are tracked and ordered correctly. " 2465,"def save(self): s = self._read_from_storage() # type: _Settings for k, v in self.__dict__.items(): if k[0] == '_': continue if hasattr(s, k): setattr(s, k, v) log.debug(""_ConfigSQL updating storage"") self._session.merge(s) try: self._session.commit() except OperationalError as e: log.error('Database error: %s', e) self._session.rollback() self.load() ",Apply all configuration values to the underlying storage.,8,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save(self): s = self._read_from_storage() # type: _Settings for k, v in self.__dict__.items(): if k[0] == '_': continue if hasattr(s, k): setattr(s, k, v) log.debug(""_ConfigSQL updating storage"") self._session.merge(s) try: self._session.commit() except OperationalError as e: log.error('Database error: %s', e) self._session.rollback() self.load() ``` ###Assistant : Apply all configuration values to the underlying storage. " 2466,"def test_transactions(self): prev_hour = timezone.now() - timedelta(hours=1) event = self.transaction_data.copy() event.update( { ""start_timestamp"": iso_format(prev_hour - timedelta(minutes=1)), ""timestamp"": iso_format(prev_hour), ""tags"": {""foo"": ""bar""}, ""transaction"": ""this is where a transaction's 'message' is stored"", } ) transaction = self.store_event(project_id=self.project.id, data=event) perf_issue = transaction.groups[0] perf_issue.update(first_seen=prev_hour) Activity.objects.create( project=self.project, group=perf_issue, type=ActivityType.SET_REGRESSION.value, datetime=prev_hour, data={""event_id"": transaction.event_id}, ) conditions = [{""id"": ""sentry.rules.conditions.regression_event.RegressionEventCondition""}] filters = [ { ""id"": ""sentry.rules.filters.tagged_event.TaggedEventFilter"", ""key"": ""foo"", ""match"": ""eq"", ""value"": ""bar"", } ] result = preview(self.project, conditions, filters, ""all"", ""all"", 0) assert perf_issue.id in result filters[0][""value""] = ""baz"" result = preview(self.project, conditions, filters, ""all"", ""all"", 0) assert perf_issue.id not in result filters = [ { ""id"": ""sentry.rules.filters.event_attribute.EventAttributeFilter"", ""attribute"": ""message"", ""match"": ""eq"", ""value"": ""this is where a transaction's 'message' is stored"", } ] result = preview(self.project, conditions, filters, ""all"", ""all"", 0) assert perf_issue.id in result filters[0][""value""] = ""wrong message"" result = preview(self.project, conditions, filters, ""all"", ""all"", 0) assert perf_issue.id not in result # this can be tested when SNS-1891 is fixed "," conditions = [{""id"": ""sentry.rules.conditions.first_seen_event.FirstSeenEventCondition""}] filters = [{ ""id"": ""sentry.rules.filters.tagged_event.TaggedEventFilter"", ""key"": ""foo"", ""match"": ""eq"", ""value"": ""bar"", }] result = preview(self.project, conditions, filters, ""all"", ""all"", 0) assert perf_issue.id in result ",28,153,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_transactions(self): prev_hour = timezone.now() - timedelta(hours=1) event = self.transaction_data.copy() event.update( { ""start_timestamp"": iso_format(prev_hour - timedelta(minutes=1)), ""timestamp"": iso_format(prev_hour), ""tags"": {""foo"": ""bar""}, ""transaction"": ""this is where a transaction's 'message' is stored"", } ) transaction = self.store_event(project_id=self.project.id, data=event) perf_issue = transaction.groups[0] perf_issue.update(first_seen=prev_hour) Activity.objects.create( project=self.project, group=perf_issue, type=ActivityType.SET_REGRESSION.value, datetime=prev_hour, data={""event_id"": transaction.event_id}, ) conditions = [{""id"": ""sentry.rules.conditions.regression_event.RegressionEventCondition""}] filters = [ { ""id"": ""sentry.rules.filters.tagged_event.TaggedEventFilter"", ""key"": ""foo"", ""match"": ""eq"", ""value"": ""bar"", } ] result = preview(self.project, conditions, filters, ""all"", ""all"", 0) assert perf_issue.id in result filters[0][""value""] = ""baz"" result = preview(self.project, conditions, filters, ""all"", ""all"", 0) assert perf_issue.id not in result filters = [ { ""id"": ""sentry.rules.filters.event_attribute.EventAttributeFilter"", ""attribute"": ""message"", ""match"": ""eq"", ""value"": ""this is where a transaction's 'message' is stored"", } ] result = preview(self.project, conditions, filters, ""all"", ""all"", 0) assert perf_issue.id in result filters[0][""value""] = ""wrong message"" result = preview(self.project, conditions, filters, ""all"", ""all"", 0) assert perf_issue.id not in result # this can be tested when SNS-1891 is fixed ``` ###Assistant : conditions = [{""id"": ""sentry.rules.conditions.first_seen_event.FirstSeenEventCondition""}] filters = [{ ""id"": ""sentry.rules.filters.tagged_event.TaggedEventFilter"", ""key"": ""foo"", ""match"": ""eq"", ""value"": ""bar"", }] result = preview(self.project, conditions, filters, ""all"", ""all"", 0) assert perf_issue.id in result " 2467,"def test_follows_semver_all_releases_semver_and_missing_package_semver_release_version(self): assert ( follows_semver_versioning_scheme( org_id=self.org.id, project_id=self.proj_1.id, release_version=""2.0.0"" ) is False ) "," Test that ensures that even if a project is following semver, then if the release_version supplied lacks a package, then for that specific release we opt the project out of being considered a semver project ",35,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_follows_semver_all_releases_semver_and_missing_package_semver_release_version(self): assert ( follows_semver_versioning_scheme( org_id=self.org.id, project_id=self.proj_1.id, release_version=""2.0.0"" ) is False ) ``` ###Assistant : Test that ensures that even if a project is following semver, then if the release_version supplied lacks a package, then for that specific release we opt the project out of being considered a semver project " 2468,"def MultivariateT(syms, mu, sigma, v): return multivariate_rv(MultivariateTDistribution, syms, mu, sigma, v) #------------------------------------------------------------------------------- # Multivariate Normal Gamma distribution --------------------------------------- "," Creates a joint random variable with multivariate T-distribution. Parameters ========== syms : A symbol/str For identifying the random variable. mu : A list/matrix Representing the location vector sigma : The shape matrix for the distribution Examples ======== >>> from sympy.stats import density, MultivariateT >>> from sympy import Symbol >>> x = Symbol(""x"") >>> X = MultivariateT(""x"", [1, 1], [[1, 0], [0, 1]], 2) >>> density(X)(1, 2) 2/(9*pi) Returns ======= RandomSymbol ",70,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def MultivariateT(syms, mu, sigma, v): return multivariate_rv(MultivariateTDistribution, syms, mu, sigma, v) #------------------------------------------------------------------------------- # Multivariate Normal Gamma distribution --------------------------------------- ``` ###Assistant : Creates a joint random variable with multivariate T-distribution. Parameters ========== syms : A symbol/str For identifying the random variable. mu : A list/matrix Representing the location vector sigma : The shape matrix for the distribution Examples ======== >>> from sympy.stats import density, MultivariateT >>> from sympy import Symbol >>> x = Symbol(""x"") >>> X = MultivariateT(""x"", [1, 1], [[1, 0], [0, 1]], 2) >>> density(X)(1, 2) 2/(9*pi) Returns ======= RandomSymbol " 2469,"def _can_hold_identifiers_and_holds_name(self, name) -> bool: if self.is_object() or is_string_dtype(self.dtype) or self.is_categorical(): return name in self return False "," Faster check for ``name in self`` when we know `name` is a Python identifier (e.g. in NDFrame.__getattr__, which hits this to support . key lookup). For indexes that can't hold identifiers (everything but object & categorical) we just return False. https://github.com/pandas-dev/pandas/issues/19764 ",41,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _can_hold_identifiers_and_holds_name(self, name) -> bool: if self.is_object() or is_string_dtype(self.dtype) or self.is_categorical(): return name in self return False ``` ###Assistant : Faster check for ``name in self`` when we know `name` is a Python identifier (e.g. in NDFrame.__getattr__, which hits this to support . key lookup). For indexes that can't hold identifiers (everything but object & categorical) we just return False. https://github.com/pandas-dev/pandas/issues/19764 " 2470,"def test_remove_other_alias(self) -> None: # Create a second alias. other_test_alias = ""#test2:test"" other_room_alias = self._add_alias(other_test_alias) # Set the alias as the canonical alias for this room. self._set_canonical_alias( { ""alias"": self.test_alias, ""alt_aliases"": [self.test_alias, other_test_alias], } ) data = self._get_canonical_alias() self.assertEqual(data[""content""][""alias""], self.test_alias) self.assertEqual( data[""content""][""alt_aliases""], [self.test_alias, other_test_alias] ) # Delete the second alias. self.get_success( self.handler.delete_association( create_requester(self.admin_user), other_room_alias ) ) data = self._get_canonical_alias() self.assertEqual(data[""content""][""alias""], self.test_alias) self.assertEqual(data[""content""][""alt_aliases""], [self.test_alias]) ",Removing an alias listed as in alt_aliases should remove it there too.,12,63,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_remove_other_alias(self) -> None: # Create a second alias. other_test_alias = ""#test2:test"" other_room_alias = self._add_alias(other_test_alias) # Set the alias as the canonical alias for this room. self._set_canonical_alias( { ""alias"": self.test_alias, ""alt_aliases"": [self.test_alias, other_test_alias], } ) data = self._get_canonical_alias() self.assertEqual(data[""content""][""alias""], self.test_alias) self.assertEqual( data[""content""][""alt_aliases""], [self.test_alias, other_test_alias] ) # Delete the second alias. self.get_success( self.handler.delete_association( create_requester(self.admin_user), other_room_alias ) ) data = self._get_canonical_alias() self.assertEqual(data[""content""][""alias""], self.test_alias) self.assertEqual(data[""content""][""alt_aliases""], [self.test_alias]) ``` ###Assistant : Removing an alias listed as in alt_aliases should remove it there too. " 2471,"def test_get_comments_no_doc(self): response = self.client.get( ""/api/documents/500/comments/"", format=""json"", ) self.assertEqual(response.status_code, 404) "," GIVEN: - A request to get comments from a non-existent document WHEN: - API request for document comments is made THEN: - HTTP 404 is returned ",26,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_comments_no_doc(self): response = self.client.get( ""/api/documents/500/comments/"", format=""json"", ) self.assertEqual(response.status_code, 404) ``` ###Assistant : GIVEN: - A request to get comments from a non-existent document WHEN: - API request for document comments is made THEN: - HTTP 404 is returned " 2472,"def test_mapping_keypad(self, config_stub, keyparser): config_stub.val.bindings.commands = {'normal': {'a': 'nop'}} config_stub.val.bindings.key_mappings = {'1': 'a'} info = keyutils.KeyInfo(Qt.Key.Key_1, Qt.KeyboardModifier.KeypadModifier) keyparser.handle(info.to_event()) keyparser.execute.assert_called_once_with('nop', None) ",Make sure falling back to non-numpad keys works with mappings.,10,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_mapping_keypad(self, config_stub, keyparser): config_stub.val.bindings.commands = {'normal': {'a': 'nop'}} config_stub.val.bindings.key_mappings = {'1': 'a'} info = keyutils.KeyInfo(Qt.Key.Key_1, Qt.KeyboardModifier.KeypadModifier) keyparser.handle(info.to_event()) keyparser.execute.assert_called_once_with('nop', None) ``` ###Assistant : Make sure falling back to non-numpad keys works with mappings. " 2473,"def verify_liked_image(browser, logger): browser.refresh() unlike_xpath = read_xpath(like_image.__name__, ""unlike"") like_elem = browser.find_elements(By.XPATH, unlike_xpath) if len(like_elem) == 1: return True else: logger.warning(""--> Image was NOT liked! You have a BLOCK on likes!"") return False ",Check for a ban on likes using the last liked image,11,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def verify_liked_image(browser, logger): browser.refresh() unlike_xpath = read_xpath(like_image.__name__, ""unlike"") like_elem = browser.find_elements(By.XPATH, unlike_xpath) if len(like_elem) == 1: return True else: logger.warning(""--> Image was NOT liked! You have a BLOCK on likes!"") return False ``` ###Assistant : Check for a ban on likes using the last liked image " 2474,"def get_bboxes(self, dst_type='hbb'): from ..bbox import get_box_type _, box_type_cls = get_box_type(dst_type) return box_type_cls.from_instance_masks(self) ","Get the certain type boxes from masks. Please refer to ``mmdet.structures.bbox.box_type`` for more details of the box type. Args: dst_type: Destination box type. Returns: :obj:`BaseBoxes`: Certain type boxes. ",28,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_bboxes(self, dst_type='hbb'): from ..bbox import get_box_type _, box_type_cls = get_box_type(dst_type) return box_type_cls.from_instance_masks(self) ``` ###Assistant : Get the certain type boxes from masks. Please refer to ``mmdet.structures.bbox.box_type`` for more details of the box type. Args: dst_type: Destination box type. Returns: :obj:`BaseBoxes`: Certain type boxes. " 2475,"def deprecate_call(): sympy_deprecation_warning( , deprecated_since_version=""1.5"", active_deprecations_target=""deprecated-tensor-fun-eval"", stacklevel=4, ) "," Calling a tensor like Tensor(*indices) is deprecated. Use Tensor.substitute_indices() instead. ",10,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def deprecate_call(): sympy_deprecation_warning( , deprecated_since_version=""1.5"", active_deprecations_target=""deprecated-tensor-fun-eval"", stacklevel=4, ) ``` ###Assistant : Calling a tensor like Tensor(*indices) is deprecated. Use Tensor.substitute_indices() instead. " 2476,"def assign_proto(proto, name, val): is_repeated_field = hasattr(getattr(proto, name), 'extend') if is_repeated_field and not isinstance(val, list): val = [val] if isinstance(val, list): if isinstance(val[0], dict): for item in val: proto_item = getattr(proto, name).add() for k, v in six.iteritems(item): assign_proto(proto_item, k, v) else: getattr(proto, name).extend(val) elif isinstance(val, dict): for k, v in six.iteritems(val): assign_proto(getattr(proto, name), k, v) else: setattr(proto, name, val) ","Assign a Python object to a protobuf message, based on the Python type (in recursive fashion). Lists become repeated fields/messages, dicts become messages, and other types are assigned directly. For convenience, repeated fields whose values are not lists are converted to single-element lists; e.g., `my_repeated_int_field=3` is converted to `my_repeated_int_field=[3]`.",49,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def assign_proto(proto, name, val): is_repeated_field = hasattr(getattr(proto, name), 'extend') if is_repeated_field and not isinstance(val, list): val = [val] if isinstance(val, list): if isinstance(val[0], dict): for item in val: proto_item = getattr(proto, name).add() for k, v in six.iteritems(item): assign_proto(proto_item, k, v) else: getattr(proto, name).extend(val) elif isinstance(val, dict): for k, v in six.iteritems(val): assign_proto(getattr(proto, name), k, v) else: setattr(proto, name, val) ``` ###Assistant : Assign a Python object to a protobuf message, based on the Python type (in recursive fashion). Lists become repeated fields/messages, dicts become messages, and other types are assigned directly. For convenience, repeated fields whose values are not lists are converted to single-element lists; e.g., `my_repeated_int_field=3` is converted to `my_repeated_int_field=[3]`. " 2477,"def __new__(cls, *args, **kwargs): sympy_deprecation_warning( , deprecated_since_version=""1.8"", active_deprecations_target='deprecated-askhandler', ) return super().__new__(cls, *args, **kwargs) "," The AskHandler system is deprecated. The AskHandler class should be replaced with the multipledispatch handler of Predicate ",17,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __new__(cls, *args, **kwargs): sympy_deprecation_warning( , deprecated_since_version=""1.8"", active_deprecations_target='deprecated-askhandler', ) return super().__new__(cls, *args, **kwargs) ``` ###Assistant : The AskHandler system is deprecated. The AskHandler class should be replaced with the multipledispatch handler of Predicate " 2478,"def incr_mean_variance_axis(X, *, axis, last_mean, last_var, last_n, weights=None): _raise_error_wrong_axis(axis) if not isinstance(X, (sp.csr_matrix, sp.csc_matrix)): _raise_typeerror(X) if np.size(last_n) == 1: last_n = np.full(last_mean.shape, last_n, dtype=last_mean.dtype) if not (np.size(last_mean) == np.size(last_var) == np.size(last_n)): raise ValueError(""last_mean, last_var, last_n do not have the same shapes."") if axis == 1: if np.size(last_mean) != X.shape[0]: raise ValueError( ""If axis=1, then last_mean, last_n, last_var should be of "" f""size n_samples {X.shape[0]} (Got {np.size(last_mean)})."" ) else: # axis == 0 if np.size(last_mean) != X.shape[1]: raise ValueError( ""If axis=0, then last_mean, last_n, last_var should be of "" f""size n_features {X.shape[1]} (Got {np.size(last_mean)})."" ) X = X.T if axis == 1 else X if weights is not None: weights = _check_sample_weight(weights, X, dtype=X.dtype) return _incr_mean_var_axis0( X, last_mean=last_mean, last_var=last_var, last_n=last_n, weights=weights ) ","Compute incremental mean and variance along an axis on a CSR or CSC matrix. last_mean, last_var are the statistics computed at the last step by this function. Both must be initialized to 0-arrays of the proper size, i.e. the number of features in X. last_n is the number of samples encountered until now. Parameters ---------- X : CSR or CSC sparse matrix of shape (n_samples, n_features) Input data. axis : {0, 1} Axis along which the axis should be computed. last_mean : ndarray of shape (n_features,) or (n_samples,), dtype=floating Array of means to update with the new data X. Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1. last_var : ndarray of shape (n_features,) or (n_samples,), dtype=floating Array of variances to update with the new data X. Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1. last_n : float or ndarray of shape (n_features,) or (n_samples,), \ dtype=floating Sum of the weights seen so far, excluding the current weights If not float, it should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1. If float it corresponds to having same weights for all samples (or features). weights : ndarray of shape (n_samples,) or (n_features,), default=None If axis is set to 0 shape is (n_samples,) or if axis is set to 1 shape is (n_features,). If it is set to None, then samples are equally weighted. .. versionadded:: 0.24 Returns ------- means : ndarray of shape (n_features,) or (n_samples,), dtype=floating Updated feature-wise means if axis = 0 or sample-wise means if axis = 1. variances : ndarray of shape (n_features,) or (n_samples,), dtype=floating Updated feature-wise variances if axis = 0 or sample-wise variances if axis = 1. n : ndarray of shape (n_features,) or (n_samples,), dtype=integral Updated number of seen samples per feature if axis=0 or number of seen features per sample if axis=1. If weights is not None, n is a sum of the weights of the seen samples or features instead of the actual number of seen samples or features. Notes ----- NaNs are ignored in the algorithm. ",344,121,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def incr_mean_variance_axis(X, *, axis, last_mean, last_var, last_n, weights=None): _raise_error_wrong_axis(axis) if not isinstance(X, (sp.csr_matrix, sp.csc_matrix)): _raise_typeerror(X) if np.size(last_n) == 1: last_n = np.full(last_mean.shape, last_n, dtype=last_mean.dtype) if not (np.size(last_mean) == np.size(last_var) == np.size(last_n)): raise ValueError(""last_mean, last_var, last_n do not have the same shapes."") if axis == 1: if np.size(last_mean) != X.shape[0]: raise ValueError( ""If axis=1, then last_mean, last_n, last_var should be of "" f""size n_samples {X.shape[0]} (Got {np.size(last_mean)})."" ) else: # axis == 0 if np.size(last_mean) != X.shape[1]: raise ValueError( ""If axis=0, then last_mean, last_n, last_var should be of "" f""size n_features {X.shape[1]} (Got {np.size(last_mean)})."" ) X = X.T if axis == 1 else X if weights is not None: weights = _check_sample_weight(weights, X, dtype=X.dtype) return _incr_mean_var_axis0( X, last_mean=last_mean, last_var=last_var, last_n=last_n, weights=weights ) ``` ###Assistant : Compute incremental mean and variance along an axis on a CSR or CSC matrix. last_mean, last_var are the statistics computed at the last step by this function. Both must be initialized to 0-arrays of the proper size, i.e. the number of features in X. last_n is the number of samples encountered until now. Parameters ---------- X : CSR or CSC sparse matrix of shape (n_samples, n_features) Input data. axis : {0, 1} Axis along which the axis should be computed. last_mean : ndarray of shape (n_features,) or (n_samples,), dtype=floating Array of means to update with the new data X. Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1. last_var : ndarray of shape (n_features,) or (n_samples,), dtype=floating Array of variances to update with the new data X. Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1. last_n : float or ndarray of shape (n_features,) or (n_samples,), \ dtype=floating Sum of the weights seen so far, excluding the current weights If not float, it should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1. If float it corresponds to having same weights for all samples (or features). weights : ndarray of shape (n_samples,) or (n_features,), default=None If axis is set to 0 shape is (n_samples,) or if axis is set to 1 shape is (n_features,). If it is set to None, then samples are equally weighted. .. versionadded:: 0.24 Returns ------- means : ndarray of shape (n_features,) or (n_samples,), dtype=floating Updated feature-wise means if axis = 0 or sample-wise means if axis = 1. variances : ndarray of shape (n_features,) or (n_samples,), dtype=floating Updated feature-wise variances if axis = 0 or sample-wise variances if axis = 1. n : ndarray of shape (n_features,) or (n_samples,), dtype=integral Updated number of seen samples per feature if axis=0 or number of seen features per sample if axis=1. If weights is not None, n is a sum of the weights of the seen samples or features instead of the actual number of seen samples or features. Notes ----- NaNs are ignored in the algorithm. " 2479,"async def test_thermostat_with_no_off_after_recheck(hass, hk_driver, events): entity_id = ""climate.test"" # support_auto = True hass.states.async_set( entity_id, HVACMode.COOL, { ATTR_SUPPORTED_FEATURES: SUPPORT_TARGET_TEMPERATURE | SUPPORT_TARGET_TEMPERATURE_RANGE, ATTR_HVAC_MODES: [], }, ) await hass.async_block_till_done() acc = Thermostat(hass, hk_driver, ""Climate"", entity_id, 1, None) hk_driver.add_accessory(acc) await acc.run() await hass.async_block_till_done() assert acc.char_cooling_thresh_temp.value == 23.0 assert acc.char_heating_thresh_temp.value == 19.0 assert acc.char_cooling_thresh_temp.properties[PROP_MAX_VALUE] == DEFAULT_MAX_TEMP assert acc.char_cooling_thresh_temp.properties[PROP_MIN_VALUE] == 7.0 assert acc.char_cooling_thresh_temp.properties[PROP_MIN_STEP] == 0.1 assert acc.char_heating_thresh_temp.properties[PROP_MAX_VALUE] == DEFAULT_MAX_TEMP assert acc.char_heating_thresh_temp.properties[PROP_MIN_VALUE] == 7.0 assert acc.char_heating_thresh_temp.properties[PROP_MIN_STEP] == 0.1 assert acc.char_target_heat_cool.value == 2 hass.states.async_set( entity_id, HVACMode.HEAT_COOL, { ATTR_TARGET_TEMP_HIGH: 22.0, ATTR_TARGET_TEMP_LOW: 20.0, ATTR_CURRENT_TEMPERATURE: 18.0, ATTR_HVAC_ACTION: HVACAction.HEATING, ATTR_HVAC_MODES: [HVACMode.HEAT_COOL, HVACMode.AUTO], }, ) await hass.async_block_till_done() assert acc.char_heating_thresh_temp.value == 20.0 assert acc.char_cooling_thresh_temp.value == 22.0 assert acc.char_current_heat_cool.value == 1 assert acc.char_target_heat_cool.value == 3 assert acc.char_current_temp.value == 18.0 assert acc.char_display_units.value == 0 ",Test if a thermostat that is not ready when we first see it that actually does not have off.,19,118,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_thermostat_with_no_off_after_recheck(hass, hk_driver, events): entity_id = ""climate.test"" # support_auto = True hass.states.async_set( entity_id, HVACMode.COOL, { ATTR_SUPPORTED_FEATURES: SUPPORT_TARGET_TEMPERATURE | SUPPORT_TARGET_TEMPERATURE_RANGE, ATTR_HVAC_MODES: [], }, ) await hass.async_block_till_done() acc = Thermostat(hass, hk_driver, ""Climate"", entity_id, 1, None) hk_driver.add_accessory(acc) await acc.run() await hass.async_block_till_done() assert acc.char_cooling_thresh_temp.value == 23.0 assert acc.char_heating_thresh_temp.value == 19.0 assert acc.char_cooling_thresh_temp.properties[PROP_MAX_VALUE] == DEFAULT_MAX_TEMP assert acc.char_cooling_thresh_temp.properties[PROP_MIN_VALUE] == 7.0 assert acc.char_cooling_thresh_temp.properties[PROP_MIN_STEP] == 0.1 assert acc.char_heating_thresh_temp.properties[PROP_MAX_VALUE] == DEFAULT_MAX_TEMP assert acc.char_heating_thresh_temp.properties[PROP_MIN_VALUE] == 7.0 assert acc.char_heating_thresh_temp.properties[PROP_MIN_STEP] == 0.1 assert acc.char_target_heat_cool.value == 2 hass.states.async_set( entity_id, HVACMode.HEAT_COOL, { ATTR_TARGET_TEMP_HIGH: 22.0, ATTR_TARGET_TEMP_LOW: 20.0, ATTR_CURRENT_TEMPERATURE: 18.0, ATTR_HVAC_ACTION: HVACAction.HEATING, ATTR_HVAC_MODES: [HVACMode.HEAT_COOL, HVACMode.AUTO], }, ) await hass.async_block_till_done() assert acc.char_heating_thresh_temp.value == 20.0 assert acc.char_cooling_thresh_temp.value == 22.0 assert acc.char_current_heat_cool.value == 1 assert acc.char_target_heat_cool.value == 3 assert acc.char_current_temp.value == 18.0 assert acc.char_display_units.value == 0 ``` ###Assistant : Test if a thermostat that is not ready when we first see it that actually does not have off. " 2480,"def save(self, *args, **kwargs): is_new = self.pk is None if is_new: clean_name = get_field_clean_name(self.label) self.clean_name = clean_name super().save(*args, **kwargs) "," When new fields are created, generate a template safe ascii name to use as the JSON storage reference for this field. Previously created fields will be updated to use the legacy unidecode method via checks & _migrate_legacy_clean_name. We do not want to update the clean name on any subsequent changes to the label as this would invalidate any previously submitted data. ",61,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save(self, *args, **kwargs): is_new = self.pk is None if is_new: clean_name = get_field_clean_name(self.label) self.clean_name = clean_name super().save(*args, **kwargs) ``` ###Assistant : When new fields are created, generate a template safe ascii name to use as the JSON storage reference for this field. Previously created fields will be updated to use the legacy unidecode method via checks & _migrate_legacy_clean_name. We do not want to update the clean name on any subsequent changes to the label as this would invalidate any previously submitted data. " 2481,"def dask_task_wrapper(func, repack, key, ray_pretask_cbs, ray_posttask_cbs, *args): if ray_pretask_cbs is not None: pre_states = [ cb(key, args) if cb is not None else None for cb in ray_pretask_cbs ] repacked_args, repacked_deps = repack(args) # Recursively execute Dask-inlined tasks. actual_args = [_execute_task(a, repacked_deps) for a in repacked_args] # Execute the actual underlying Dask task. result = func(*actual_args) if ray_posttask_cbs is not None: for cb, pre_state in zip(ray_posttask_cbs, pre_states): if cb is not None: cb(key, result, pre_state) return result "," A Ray remote function acting as a Dask task wrapper. This function will repackage the given flat `args` into its original data structures using `repack`, execute any Dask subtasks within the repackaged arguments (inlined by Dask's optimization pass), and then pass the concrete task arguments to the provide Dask task function, `func`. Args: func (callable): The Dask task function to execute. repack (callable): A function that repackages the provided args into the original (possibly nested) Python objects. key (str): The Dask key for this task. ray_pretask_cbs (callable): Pre-task execution callbacks. ray_posttask_cbs (callable): Post-task execution callback. *args (ObjectRef): Ray object references representing the Dask task's arguments. Returns: The output of the Dask task. In the context of Ray, a dask_task_wrapper.remote() invocation will return a Ray object reference representing the Ray task's result. ",131,77,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dask_task_wrapper(func, repack, key, ray_pretask_cbs, ray_posttask_cbs, *args): if ray_pretask_cbs is not None: pre_states = [ cb(key, args) if cb is not None else None for cb in ray_pretask_cbs ] repacked_args, repacked_deps = repack(args) # Recursively execute Dask-inlined tasks. actual_args = [_execute_task(a, repacked_deps) for a in repacked_args] # Execute the actual underlying Dask task. result = func(*actual_args) if ray_posttask_cbs is not None: for cb, pre_state in zip(ray_posttask_cbs, pre_states): if cb is not None: cb(key, result, pre_state) return result ``` ###Assistant : A Ray remote function acting as a Dask task wrapper. This function will repackage the given flat `args` into its original data structures using `repack`, execute any Dask subtasks within the repackaged arguments (inlined by Dask's optimization pass), and then pass the concrete task arguments to the provide Dask task function, `func`. Args: func (callable): The Dask task function to execute. repack (callable): A function that repackages the provided args into the original (possibly nested) Python objects. key (str): The Dask key for this task. ray_pretask_cbs (callable): Pre-task execution callbacks. ray_posttask_cbs (callable): Post-task execution callback. *args (ObjectRef): Ray object references representing the Dask task's arguments. Returns: The output of the Dask task. In the context of Ray, a dask_task_wrapper.remote() invocation will return a Ray object reference representing the Ray task's result. " 2482,"def words(count, common=True): word_list = list(COMMON_WORDS) if common else [] c = len(word_list) if count > c: count -= c while count > 0: c = min(count, len(WORDS)) count -= c word_list += random.sample(WORDS, c) else: word_list = word_list[:count] return "" "".join(word_list) "," Return a string of `count` lorem ipsum words separated by a single space. If `common` is True, then the first 19 words will be the standard 'lorem ipsum' words. Otherwise, all words will be selected randomly. ",36,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def words(count, common=True): word_list = list(COMMON_WORDS) if common else [] c = len(word_list) if count > c: count -= c while count > 0: c = min(count, len(WORDS)) count -= c word_list += random.sample(WORDS, c) else: word_list = word_list[:count] return "" "".join(word_list) ``` ###Assistant : Return a string of `count` lorem ipsum words separated by a single space. If `common` is True, then the first 19 words will be the standard 'lorem ipsum' words. Otherwise, all words will be selected randomly. " 2483,"def jumpTo(self, bytes): try: self._position = self.index(bytes, self.position) + len(bytes) - 1 except ValueError: raise StopIteration return True ","Look for the next sequence of bytes matching a given sequence. If a match is found advance the position to the last byte of the match",26,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def jumpTo(self, bytes): try: self._position = self.index(bytes, self.position) + len(bytes) - 1 except ValueError: raise StopIteration return True ``` ###Assistant : Look for the next sequence of bytes matching a given sequence. If a match is found advance the position to the last byte of the match " 2484,"def available(self) -> bool: expire_after: int | None = self._config.get(CONF_EXPIRE_AFTER) # mypy doesn't know about fget: https://github.com/python/mypy/issues/6185 return MqttAvailability.available.fget(self) and ( # type: ignore[attr-defined] expire_after is None or not self._expired ) ",Return true if the device is available and value has not expired.,12,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def available(self) -> bool: expire_after: int | None = self._config.get(CONF_EXPIRE_AFTER) # mypy doesn't know about fget: https://github.com/python/mypy/issues/6185 return MqttAvailability.available.fget(self) and ( # type: ignore[attr-defined] expire_after is None or not self._expired ) ``` ###Assistant : Return true if the device is available and value has not expired. " 2485,"def Logistic(name, mu, s): r return rv(name, LogisticDistribution, (mu, s)) #------------------------------------------------------------------------------- # Log-logistic distribution -------------------------------------------------------- "," Create a continuous random variable with a logistic distribution. Explanation =========== The density of the logistic distribution is given by .. math:: f(x) := \frac{e^{-(x-\mu)/s}} {s\left(1+e^{-(x-\mu)/s}\right)^2} Parameters ========== mu : Real number, the location (mean) s : Real number, `s > 0`, a scale Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Logistic, density, cdf >>> from sympy import Symbol >>> mu = Symbol(""mu"", real=True) >>> s = Symbol(""s"", positive=True) >>> z = Symbol(""z"") >>> X = Logistic(""x"", mu, s) >>> density(X)(z) exp((mu - z)/s)/(s*(exp((mu - z)/s) + 1)**2) >>> cdf(X)(z) 1/(exp((mu - z)/s) + 1) References ========== .. [1] https://en.wikipedia.org/wiki/Logistic_distribution .. [2] http://mathworld.wolfram.com/LogisticDistribution.html ",105,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def Logistic(name, mu, s): r return rv(name, LogisticDistribution, (mu, s)) #------------------------------------------------------------------------------- # Log-logistic distribution -------------------------------------------------------- ``` ###Assistant : Create a continuous random variable with a logistic distribution. Explanation =========== The density of the logistic distribution is given by .. math:: f(x) := \frac{e^{-(x-\mu)/s}} {s\left(1+e^{-(x-\mu)/s}\right)^2} Parameters ========== mu : Real number, the location (mean) s : Real number, `s > 0`, a scale Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Logistic, density, cdf >>> from sympy import Symbol >>> mu = Symbol(""mu"", real=True) >>> s = Symbol(""s"", positive=True) >>> z = Symbol(""z"") >>> X = Logistic(""x"", mu, s) >>> density(X)(z) exp((mu - z)/s)/(s*(exp((mu - z)/s) + 1)**2) >>> cdf(X)(z) 1/(exp((mu - z)/s) + 1) References ========== .. [1] https://en.wikipedia.org/wiki/Logistic_distribution .. [2] http://mathworld.wolfram.com/LogisticDistribution.html " 2486,"def __call__(self, feat_maps, comp_attribs): assert isinstance(feat_maps, paddle.Tensor) assert comp_attribs.ndim == 3 assert comp_attribs.shape[2] == 8 sorted_dist_inds_batch = [] local_graph_batch = [] knn_batch = [] node_feat_batch = [] node_label_batch = [] for batch_ind in range(comp_attribs.shape[0]): num_comps = int(comp_attribs[batch_ind, 0, 0]) comp_geo_attribs = comp_attribs[batch_ind, :num_comps, 1:7] node_labels = comp_attribs[batch_ind, :num_comps, 7].astype( np.int32) comp_centers = comp_geo_attribs[:, 0:2] distance_matrix = euclidean_distance_matrix(comp_centers, comp_centers) batch_id = np.zeros( (comp_geo_attribs.shape[0], 1), dtype=np.float32) * batch_ind comp_geo_attribs[:, -2] = np.clip(comp_geo_attribs[:, -2], -1, 1) angle = np.arccos(comp_geo_attribs[:, -2]) * np.sign( comp_geo_attribs[:, -1]) angle = angle.reshape((-1, 1)) rotated_rois = np.hstack( [batch_id, comp_geo_attribs[:, :-2], angle]) rois = paddle.to_tensor(rotated_rois) content_feats = self.pooling(feat_maps[batch_ind].unsqueeze(0), rois) content_feats = content_feats.reshape([content_feats.shape[0], -1]) geo_feats = feature_embedding(comp_geo_attribs, self.node_geo_feat_dim) geo_feats = paddle.to_tensor(geo_feats) node_feats = paddle.concat([content_feats, geo_feats], axis=-1) sorted_dist_inds = np.argsort(distance_matrix, axis=1) pivot_local_graphs, pivot_knns = self.generate_local_graphs( sorted_dist_inds, node_labels) node_feat_batch.append(node_feats) node_label_batch.append(node_labels) local_graph_batch.append(pivot_local_graphs) knn_batch.append(pivot_knns) sorted_dist_inds_batch.append(sorted_dist_inds) (node_feats, adjacent_matrices, knn_inds, gt_linkage) = \ self.generate_gcn_input(node_feat_batch, node_label_batch, local_graph_batch, knn_batch, sorted_dist_inds_batch) return node_feats, adjacent_matrices, knn_inds, gt_linkage ","Generate local graphs as GCN input. Args: feat_maps (Tensor): The feature maps to extract the content features of text components. comp_attribs (ndarray): The text component attributes. Returns: local_graphs_node_feat (Tensor): The node features of graph. adjacent_matrices (Tensor): The adjacent matrices of local graphs. pivots_knn_inds (Tensor): The k-nearest neighbor indices in local graph. gt_linkage (Tensor): The surpervision signal of GCN for linkage prediction. ",61,146,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __call__(self, feat_maps, comp_attribs): assert isinstance(feat_maps, paddle.Tensor) assert comp_attribs.ndim == 3 assert comp_attribs.shape[2] == 8 sorted_dist_inds_batch = [] local_graph_batch = [] knn_batch = [] node_feat_batch = [] node_label_batch = [] for batch_ind in range(comp_attribs.shape[0]): num_comps = int(comp_attribs[batch_ind, 0, 0]) comp_geo_attribs = comp_attribs[batch_ind, :num_comps, 1:7] node_labels = comp_attribs[batch_ind, :num_comps, 7].astype( np.int32) comp_centers = comp_geo_attribs[:, 0:2] distance_matrix = euclidean_distance_matrix(comp_centers, comp_centers) batch_id = np.zeros( (comp_geo_attribs.shape[0], 1), dtype=np.float32) * batch_ind comp_geo_attribs[:, -2] = np.clip(comp_geo_attribs[:, -2], -1, 1) angle = np.arccos(comp_geo_attribs[:, -2]) * np.sign( comp_geo_attribs[:, -1]) angle = angle.reshape((-1, 1)) rotated_rois = np.hstack( [batch_id, comp_geo_attribs[:, :-2], angle]) rois = paddle.to_tensor(rotated_rois) content_feats = self.pooling(feat_maps[batch_ind].unsqueeze(0), rois) content_feats = content_feats.reshape([content_feats.shape[0], -1]) geo_feats = feature_embedding(comp_geo_attribs, self.node_geo_feat_dim) geo_feats = paddle.to_tensor(geo_feats) node_feats = paddle.concat([content_feats, geo_feats], axis=-1) sorted_dist_inds = np.argsort(distance_matrix, axis=1) pivot_local_graphs, pivot_knns = self.generate_local_graphs( sorted_dist_inds, node_labels) node_feat_batch.append(node_feats) node_label_batch.append(node_labels) local_graph_batch.append(pivot_local_graphs) knn_batch.append(pivot_knns) sorted_dist_inds_batch.append(sorted_dist_inds) (node_feats, adjacent_matrices, knn_inds, gt_linkage) = \ self.generate_gcn_input(node_feat_batch, node_label_batch, local_graph_batch, knn_batch, sorted_dist_inds_batch) return node_feats, adjacent_matrices, knn_inds, gt_linkage ``` ###Assistant : Generate local graphs as GCN input. Args: feat_maps (Tensor): The feature maps to extract the content features of text components. comp_attribs (ndarray): The text component attributes. Returns: local_graphs_node_feat (Tensor): The node features of graph. adjacent_matrices (Tensor): The adjacent matrices of local graphs. pivots_knn_inds (Tensor): The k-nearest neighbor indices in local graph. gt_linkage (Tensor): The surpervision signal of GCN for linkage prediction. " 2487,"def _is_zero_copy_arrow_op(cls, op) -> bool: is_zero_copy_op = False if isinstance(op, (FrameNode, TransformNode, UnionNode)): # - FrameNode: already materialized PyArrow table # - TransformNode: select certain columns of the table, implemented zero-copy (``df._arrow_select``) # - UnionNode: concatenate PyArrow tables, implemented zero-copy (``df._arrow_concat``) is_zero_copy_op = True elif isinstance(op, MaskNode) and ( isinstance(op.row_positions, slice) or is_range_like(op.row_positions) ): # Can select rows zero-copy if indexer is a slice-like (``df._arrow_row_slice``) is_zero_copy_op = True return is_zero_copy_op and all( # Walk the computation tree cls._is_zero_copy_arrow_op(_op) for _op in getattr(op, ""inputs"", []) ) "," Check whether the passed node of the delayed computation tree could be executed zero-copy via PyArrow execution. Parameters ---------- op : DFAlgNode Returns ------- bool ",25,85,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _is_zero_copy_arrow_op(cls, op) -> bool: is_zero_copy_op = False if isinstance(op, (FrameNode, TransformNode, UnionNode)): # - FrameNode: already materialized PyArrow table # - TransformNode: select certain columns of the table, implemented zero-copy (``df._arrow_select``) # - UnionNode: concatenate PyArrow tables, implemented zero-copy (``df._arrow_concat``) is_zero_copy_op = True elif isinstance(op, MaskNode) and ( isinstance(op.row_positions, slice) or is_range_like(op.row_positions) ): # Can select rows zero-copy if indexer is a slice-like (``df._arrow_row_slice``) is_zero_copy_op = True return is_zero_copy_op and all( # Walk the computation tree cls._is_zero_copy_arrow_op(_op) for _op in getattr(op, ""inputs"", []) ) ``` ###Assistant : Check whether the passed node of the delayed computation tree could be executed zero-copy via PyArrow execution. Parameters ---------- op : DFAlgNode Returns ------- bool " 2488,"def batch_p_dist(x, y, p=2): x = x.unsqueeze(1) diff = x - y return paddle.norm(diff, p=p, axis=list(range(2, diff.dim()))) @register"," calculate pairwise p_dist, the first index of x and y are batch return [x.shape[0], y.shape[0]] ",15,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def batch_p_dist(x, y, p=2): x = x.unsqueeze(1) diff = x - y return paddle.norm(diff, p=p, axis=list(range(2, diff.dim()))) @register ``` ###Assistant : calculate pairwise p_dist, the first index of x and y are batch return [x.shape[0], y.shape[0]] " 2489,"def get_employee_shift(employee, for_timestamp=None, consider_default_shift=False, next_shift_direction=None): if for_timestamp is None: for_timestamp = now_datetime() shift_details = get_shift_for_timestamp(employee, for_timestamp) # if shift assignment is not found, consider default shift default_shift = frappe.db.get_value('Employee', employee, 'default_shift') if not shift_details and consider_default_shift: shift_details = get_shift_details(default_shift, for_timestamp.date()) # if its a holiday, reset if shift_details and is_holiday_date(employee, shift_details): shift_details = None # if no shift is found, find next or prev shift based on direction if not shift_details and next_shift_direction: shift_details = get_prev_or_next_shift(employee, for_timestamp, consider_default_shift, default_shift, next_shift_direction) return shift_details ","Returns a Shift Type for the given employee on the given date. (excluding the holidays) :param employee: Employee for which shift is required. :param for_timestamp: DateTime on which shift is required :param consider_default_shift: If set to true, default shift is taken when no shift assignment is found. :param next_shift_direction: One of: None, 'forward', 'reverse'. Direction to look for next shift if shift not found on given date. ",67,82,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_employee_shift(employee, for_timestamp=None, consider_default_shift=False, next_shift_direction=None): if for_timestamp is None: for_timestamp = now_datetime() shift_details = get_shift_for_timestamp(employee, for_timestamp) # if shift assignment is not found, consider default shift default_shift = frappe.db.get_value('Employee', employee, 'default_shift') if not shift_details and consider_default_shift: shift_details = get_shift_details(default_shift, for_timestamp.date()) # if its a holiday, reset if shift_details and is_holiday_date(employee, shift_details): shift_details = None # if no shift is found, find next or prev shift based on direction if not shift_details and next_shift_direction: shift_details = get_prev_or_next_shift(employee, for_timestamp, consider_default_shift, default_shift, next_shift_direction) return shift_details ``` ###Assistant : Returns a Shift Type for the given employee on the given date. (excluding the holidays) :param employee: Employee for which shift is required. :param for_timestamp: DateTime on which shift is required :param consider_default_shift: If set to true, default shift is taken when no shift assignment is found. :param next_shift_direction: One of: None, 'forward', 'reverse'. Direction to look for next shift if shift not found on given date. " 2490,"def get_create_form_class(self): self.create_model = self.get_create_model() if self.create_model: return get_task_form_class(self.create_model) else: return None "," To be called after dispatch(); returns the form class for creating a new task ",14,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_create_form_class(self): self.create_model = self.get_create_model() if self.create_model: return get_task_form_class(self.create_model) else: return None ``` ###Assistant : To be called after dispatch(); returns the form class for creating a new task " 2491,"def retrieve_image(self): image = self.storage.open(self.image_path, ""rb"") image_format = self.get_image_metadata_from_file(image) return (Image.open(image), image_format) ",Return a PIL Image instance stored at `image_path`.,8,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def retrieve_image(self): image = self.storage.open(self.image_path, ""rb"") image_format = self.get_image_metadata_from_file(image) return (Image.open(image), image_format) ``` ###Assistant : Return a PIL Image instance stored at `image_path`. " 2492,"def chebval(x, c, tensor=True): c = np.array(c, ndmin=1, copy=True) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: c = c.reshape(c.shape + (1,)*x.ndim) if len(c) == 1: c0 = c[0] c1 = 0 elif len(c) == 2: c0 = c[0] c1 = c[1] else: x2 = 2*x c0 = c[-2] c1 = c[-1] for i in range(3, len(c) + 1): tmp = c0 c0 = c[-i] - c1 c1 = tmp + c1*x2 return c0 + c1*x "," Evaluate a Chebyshev series at points x. If `c` is of length `n + 1`, this function returns the value: .. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x) The parameter `x` is converted to an array only if it is a tuple or a list, otherwise it is treated as a scalar. In either case, either `x` or its elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If `c` is multidimensional, then the shape of the result depends on the value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that scalars have shape (,). Trailing zeros in the coefficients will be used in the evaluation, so they should be avoided if efficiency is a concern. Parameters ---------- x : array_like, compatible object If `x` is a list or tuple, it is converted to an ndarray, otherwise it is left unchanged and treated as a scalar. In either case, `x` or its elements must support addition and multiplication with themselves and with the elements of `c`. c : array_like Array of coefficients ordered so that the coefficients for terms of degree n are contained in c[n]. If `c` is multidimensional the remaining indices enumerate multiple polynomials. In the two dimensional case the coefficients may be thought of as stored in the columns of `c`. tensor : boolean, optional If True, the shape of the coefficient array is extended with ones on the right, one for each dimension of `x`. Scalars have dimension 0 for this action. The result is that every column of coefficients in `c` is evaluated for every element of `x`. If False, `x` is broadcast over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. .. versionadded:: 1.7.0 Returns ------- values : ndarray, algebra_like The shape of the return value is described above. See Also -------- chebval2d, chebgrid2d, chebval3d, chebgrid3d Notes ----- The evaluation uses Clenshaw recursion, aka synthetic division. ",369,87,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def chebval(x, c, tensor=True): c = np.array(c, ndmin=1, copy=True) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) if isinstance(x, (tuple, list)): x = np.asarray(x) if isinstance(x, np.ndarray) and tensor: c = c.reshape(c.shape + (1,)*x.ndim) if len(c) == 1: c0 = c[0] c1 = 0 elif len(c) == 2: c0 = c[0] c1 = c[1] else: x2 = 2*x c0 = c[-2] c1 = c[-1] for i in range(3, len(c) + 1): tmp = c0 c0 = c[-i] - c1 c1 = tmp + c1*x2 return c0 + c1*x ``` ###Assistant : Evaluate a Chebyshev series at points x. If `c` is of length `n + 1`, this function returns the value: .. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x) The parameter `x` is converted to an array only if it is a tuple or a list, otherwise it is treated as a scalar. In either case, either `x` or its elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If `c` is multidimensional, then the shape of the result depends on the value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that scalars have shape (,). Trailing zeros in the coefficients will be used in the evaluation, so they should be avoided if efficiency is a concern. Parameters ---------- x : array_like, compatible object If `x` is a list or tuple, it is converted to an ndarray, otherwise it is left unchanged and treated as a scalar. In either case, `x` or its elements must support addition and multiplication with themselves and with the elements of `c`. c : array_like Array of coefficients ordered so that the coefficients for terms of degree n are contained in c[n]. If `c` is multidimensional the remaining indices enumerate multiple polynomials. In the two dimensional case the coefficients may be thought of as stored in the columns of `c`. tensor : boolean, optional If True, the shape of the coefficient array is extended with ones on the right, one for each dimension of `x`. Scalars have dimension 0 for this action. The result is that every column of coefficients in `c` is evaluated for every element of `x`. If False, `x` is broadcast over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. .. versionadded:: 1.7.0 Returns ------- values : ndarray, algebra_like The shape of the return value is described above. See Also -------- chebval2d, chebgrid2d, chebval3d, chebgrid3d Notes ----- The evaluation uses Clenshaw recursion, aka synthetic division. " 2493,"def dagrun_queued(self): dag_id = request.form.get('dag_id') dag_run_id = request.form.get('dag_run_id') confirmed = request.form.get('confirmed') == 'true' origin = get_safe_url(request.form.get('origin')) return self._mark_dagrun_state_as_queued(dag_id, dag_run_id, confirmed, origin) ",Queue DagRun so tasks that haven't run yet can be started.,11,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dagrun_queued(self): dag_id = request.form.get('dag_id') dag_run_id = request.form.get('dag_run_id') confirmed = request.form.get('confirmed') == 'true' origin = get_safe_url(request.form.get('origin')) return self._mark_dagrun_state_as_queued(dag_id, dag_run_id, confirmed, origin) ``` ###Assistant : Queue DagRun so tasks that haven't run yet can be started. " 2494,"def data_to_internal(data): internal = data.copy() if 'extra_vars' in data: internal['extra_vars'] = json.loads(data['extra_vars']) if 'credentials' in data: internal['credentials'] = set(Credential.objects.get(pk=_id) for _id in data['credentials']) if 'inventory' in data: internal['inventory'] = Inventory.objects.get(pk=data['inventory']) if 'execution_environment' in data: internal['execution_environment'] = ExecutionEnvironment.objects.get(pk=data['execution_environment']) if 'labels' in data: internal['labels'] = [Label.objects.get(pk=_id) for _id in data['labels']] if 'instance_groups' in data: internal['instance_groups'] = [InstanceGroup.objects.get(pk=_id) for _id in data['instance_groups']] return internal # End of setup, tests start here @pytest.mark.django_db @pytest.mark.job_runtime_vars"," returns internal representation, model objects, dictionaries, etc as opposed to integer primary keys and JSON strings ",16,70,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def data_to_internal(data): internal = data.copy() if 'extra_vars' in data: internal['extra_vars'] = json.loads(data['extra_vars']) if 'credentials' in data: internal['credentials'] = set(Credential.objects.get(pk=_id) for _id in data['credentials']) if 'inventory' in data: internal['inventory'] = Inventory.objects.get(pk=data['inventory']) if 'execution_environment' in data: internal['execution_environment'] = ExecutionEnvironment.objects.get(pk=data['execution_environment']) if 'labels' in data: internal['labels'] = [Label.objects.get(pk=_id) for _id in data['labels']] if 'instance_groups' in data: internal['instance_groups'] = [InstanceGroup.objects.get(pk=_id) for _id in data['instance_groups']] return internal # End of setup, tests start here @pytest.mark.django_db @pytest.mark.job_runtime_vars ``` ###Assistant : returns internal representation, model objects, dictionaries, etc as opposed to integer primary keys and JSON strings " 2495,"def _generate_sys_info_str(*sys_info) -> str: if sys_info: return ""
    "".join(sys_info).replace(""\n"", ""
    "") return """" ","Format system info into a string. *sys_info: System info strings to be included. Returns: Formatted string containing system information. ",19,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _generate_sys_info_str(*sys_info) -> str: if sys_info: return ""
    "".join(sys_info).replace(""\n"", ""
    "") return """" ``` ###Assistant : Format system info into a string. *sys_info: System info strings to be included. Returns: Formatted string containing system information. " 2496,"def default(self, obj): if isinstance(obj, tf.TensorShape): items = obj.as_list() if obj.rank is not None else None return {""class_name"": ""TensorShape"", ""items"": items} return get_json_type(obj) ","Encodes objects for types that aren't handled by the default encoder.",11,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def default(self, obj): if isinstance(obj, tf.TensorShape): items = obj.as_list() if obj.rank is not None else None return {""class_name"": ""TensorShape"", ""items"": items} return get_json_type(obj) ``` ###Assistant : Encodes objects for types that aren't handled by the default encoder. " 2497,"async def _pause_and_wait_for_callback(self): self._pause_requested = True await self.async_media_pause() try:",Send pause and wait for the pause callback to be received.,11,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def _pause_and_wait_for_callback(self): self._pause_requested = True await self.async_media_pause() try: ``` ###Assistant : Send pause and wait for the pause callback to be received. " 2498,"def delete_checkpoint(self, checkpoint_path): # Ensure TrialCheckpoints are converted if isinstance(checkpoint_path, TrialCheckpoint): checkpoint_path = checkpoint_path.local_path try: checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path) except FileNotFoundError: # The checkpoint won't exist locally if the # trial was rescheduled to another worker. logger.debug( f""Local checkpoint not found during garbage collection: "" f""{self.trial_id} - {checkpoint_path}"" ) return else: if self.uses_cloud_checkpointing: self.storage_client.delete(self._storage_path(checkpoint_dir)) if os.path.exists(checkpoint_dir): shutil.rmtree(checkpoint_dir) ","Deletes local copy of checkpoint. Args: checkpoint_path (str): Path to checkpoint. ",11,56,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def delete_checkpoint(self, checkpoint_path): # Ensure TrialCheckpoints are converted if isinstance(checkpoint_path, TrialCheckpoint): checkpoint_path = checkpoint_path.local_path try: checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path) except FileNotFoundError: # The checkpoint won't exist locally if the # trial was rescheduled to another worker. logger.debug( f""Local checkpoint not found during garbage collection: "" f""{self.trial_id} - {checkpoint_path}"" ) return else: if self.uses_cloud_checkpointing: self.storage_client.delete(self._storage_path(checkpoint_dir)) if os.path.exists(checkpoint_dir): shutil.rmtree(checkpoint_dir) ``` ###Assistant : Deletes local copy of checkpoint. Args: checkpoint_path (str): Path to checkpoint. " 2499,"def _resize_worker_group(self, state_dict, max_retries=10): old_workers = self.worker_group.num_workers self.worker_group.reset() time.sleep(1) for i in range(max_retries): new_workers = self.worker_group.new_workers_size() if new_workers: self._last_resize = time.time() startup_success = self._start_workers(int(new_workers)) if not startup_success: logger.info( f""Worker startup failed. Retrying "" f""{max_retries-i-1} more times."" ) self.worker_group.reset() continue self.load_state_dict(state_dict, blocking=True) if self.use_local and new_workers == 1 and old_workers > 1: # Major hack. If we go from LocalDistributedRunner to a # standard TorchRunner we have to manually reset the # dummy actor handle global vars. # TODO(amog): Refactor LocalDistributedTorchRunner to # not use global variables for resource reservation. ray.util.sgd.torch.distributed_torch_runner._dummy_cuda_actor = None ray.util.sgd.torch.distributed_torch_runner._dummy_cpu_actor = None return else: delay = 2 ** i logger.warning(""No new workers found. Retrying in %d sec."" % delay) time.sleep(delay) raise RuntimeError(""Exceeded max_retries for relaunching workers."") ","Resizes the number of remote workers based on available resources. Total number of workers will never exceed `num_workers` amount. Args: state_dict (dict): The state dict to load to all workers. max_retries (int): How many times to attempt to resize workers before failing. ",42,119,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _resize_worker_group(self, state_dict, max_retries=10): old_workers = self.worker_group.num_workers self.worker_group.reset() time.sleep(1) for i in range(max_retries): new_workers = self.worker_group.new_workers_size() if new_workers: self._last_resize = time.time() startup_success = self._start_workers(int(new_workers)) if not startup_success: logger.info( f""Worker startup failed. Retrying "" f""{max_retries-i-1} more times."" ) self.worker_group.reset() continue self.load_state_dict(state_dict, blocking=True) if self.use_local and new_workers == 1 and old_workers > 1: # Major hack. If we go from LocalDistributedRunner to a # standard TorchRunner we have to manually reset the # dummy actor handle global vars. # TODO(amog): Refactor LocalDistributedTorchRunner to # not use global variables for resource reservation. ray.util.sgd.torch.distributed_torch_runner._dummy_cuda_actor = None ray.util.sgd.torch.distributed_torch_runner._dummy_cpu_actor = None return else: delay = 2 ** i logger.warning(""No new workers found. Retrying in %d sec."" % delay) time.sleep(delay) raise RuntimeError(""Exceeded max_retries for relaunching workers."") ``` ###Assistant : Resizes the number of remote workers based on available resources. Total number of workers will never exceed `num_workers` amount. Args: state_dict (dict): The state dict to load to all workers. max_retries (int): How many times to attempt to resize workers before failing. " 2500,"def check_result_same(results, pipeline_results, check_keys): for key in check_keys: if results.get(key, None) is None: continue if isinstance(results[key], (BitmapMasks, PolygonMasks)): assert_allclose(pipeline_results[key].to_ndarray(), results[key].to_ndarray()) else: assert_allclose(pipeline_results[key], results[key]) ","Check whether the ``pipeline_results`` is the same with the predefined ``results``. Args: results (dict): Predefined results which should be the standard output of the transform pipeline. pipeline_results (dict): Results processed by the transform pipeline. check_keys (tuple): Keys that need to be checked between results and pipeline_results. ",46,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_result_same(results, pipeline_results, check_keys): for key in check_keys: if results.get(key, None) is None: continue if isinstance(results[key], (BitmapMasks, PolygonMasks)): assert_allclose(pipeline_results[key].to_ndarray(), results[key].to_ndarray()) else: assert_allclose(pipeline_results[key], results[key]) ``` ###Assistant : Check whether the ``pipeline_results`` is the same with the predefined ``results``. Args: results (dict): Predefined results which should be the standard output of the transform pipeline. pipeline_results (dict): Results processed by the transform pipeline. check_keys (tuple): Keys that need to be checked between results and pipeline_results. " 2501,"def get_rasa_sdk_version() -> Text: dependencies_filename = ""pyproject.toml"" toml_data = toml.load(project_root() / dependencies_filename) try: sdk_version = toml_data[""tool""][""poetry""][""dependencies""][""rasa-sdk""] return sdk_version[1:].strip() except AttributeError: raise Exception(f""Failed to find Rasa SDK version in {dependencies_filename}"") ",Find out what the referenced version of the Rasa SDK is.,11,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_rasa_sdk_version() -> Text: dependencies_filename = ""pyproject.toml"" toml_data = toml.load(project_root() / dependencies_filename) try: sdk_version = toml_data[""tool""][""poetry""][""dependencies""][""rasa-sdk""] return sdk_version[1:].strip() except AttributeError: raise Exception(f""Failed to find Rasa SDK version in {dependencies_filename}"") ``` ###Assistant : Find out what the referenced version of the Rasa SDK is. " 2502,"def test_update_omitted_version(self) -> None: version = self.get_success( self.handler.create_version( self.local_user, { ""algorithm"": ""m.megolm_backup.v1"", ""auth_data"": ""first_version_auth_data"", }, ) ) self.assertEqual(version, ""1"") self.get_success( self.handler.update_version( self.local_user, version, { ""algorithm"": ""m.megolm_backup.v1"", ""auth_data"": ""revised_first_version_auth_data"", }, ) ) # check we can retrieve it as the current version res = self.get_success(self.handler.get_version_info(self.local_user)) del res[""etag""] # etag is opaque, so don't test its contents self.assertDictEqual( res, { ""algorithm"": ""m.megolm_backup.v1"", ""auth_data"": ""revised_first_version_auth_data"", ""version"": version, ""count"": 0, }, ) ",Check that the update succeeds if the version is missing from the body,13,68,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_update_omitted_version(self) -> None: version = self.get_success( self.handler.create_version( self.local_user, { ""algorithm"": ""m.megolm_backup.v1"", ""auth_data"": ""first_version_auth_data"", }, ) ) self.assertEqual(version, ""1"") self.get_success( self.handler.update_version( self.local_user, version, { ""algorithm"": ""m.megolm_backup.v1"", ""auth_data"": ""revised_first_version_auth_data"", }, ) ) # check we can retrieve it as the current version res = self.get_success(self.handler.get_version_info(self.local_user)) del res[""etag""] # etag is opaque, so don't test its contents self.assertDictEqual( res, { ""algorithm"": ""m.megolm_backup.v1"", ""auth_data"": ""revised_first_version_auth_data"", ""version"": version, ""count"": 0, }, ) ``` ###Assistant : Check that the update succeeds if the version is missing from the body " 2503,"def parameter_value(self, other, u, v=None): from sympy.geometry.point import Point if not isinstance(other, GeometryEntity): other = Point(other, dim=self.ambient_dimension) if not isinstance(other, Point): raise ValueError(""other must be a point"") if other == self.p1: return other if isinstance(u, Symbol) and v is None: delta = self.arbitrary_point(u) - self.p1 eq = delta - (other - self.p1).unit sol = solve(eq, u, dict=True) elif isinstance(u, Symbol) and isinstance(v, Symbol): pt = self.arbitrary_point(u, v) sol = solve(pt - other, (u, v), dict=True) else: raise ValueError('expecting 1 or 2 symbols') if not sol: raise ValueError(""Given point is not on %s"" % func_name(self)) return sol[0] # {t: tval} or {u: uval, v: vval} ","Return the parameter(s) corresponding to the given point. Examples ======== >>> from sympy import pi, Plane >>> from sympy.abc import t, u, v >>> p = Plane((2, 0, 0), (0, 0, 1), (0, 1, 0)) By default, the parameter value returned defines a point that is a distance of 1 from the Plane's p1 value and in line with the given point: >>> on_circle = p.arbitrary_point(t).subs(t, pi/4) >>> on_circle.distance(p.p1) 1 >>> p.parameter_value(on_circle, t) {t: pi/4} Moving the point twice as far from p1 does not change the parameter value: >>> off_circle = p.p1 + (on_circle - p.p1)*2 >>> off_circle.distance(p.p1) 2 >>> p.parameter_value(off_circle, t) {t: pi/4} If the 2-value parameter is desired, supply the two parameter symbols and a replacement dictionary will be returned: >>> p.parameter_value(on_circle, u, v) {u: sqrt(10)/10, v: sqrt(10)/30} >>> p.parameter_value(off_circle, u, v) {u: sqrt(10)/5, v: sqrt(10)/15} ",139,104,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parameter_value(self, other, u, v=None): from sympy.geometry.point import Point if not isinstance(other, GeometryEntity): other = Point(other, dim=self.ambient_dimension) if not isinstance(other, Point): raise ValueError(""other must be a point"") if other == self.p1: return other if isinstance(u, Symbol) and v is None: delta = self.arbitrary_point(u) - self.p1 eq = delta - (other - self.p1).unit sol = solve(eq, u, dict=True) elif isinstance(u, Symbol) and isinstance(v, Symbol): pt = self.arbitrary_point(u, v) sol = solve(pt - other, (u, v), dict=True) else: raise ValueError('expecting 1 or 2 symbols') if not sol: raise ValueError(""Given point is not on %s"" % func_name(self)) return sol[0] # {t: tval} or {u: uval, v: vval} ``` ###Assistant : Return the parameter(s) corresponding to the given point. Examples ======== >>> from sympy import pi, Plane >>> from sympy.abc import t, u, v >>> p = Plane((2, 0, 0), (0, 0, 1), (0, 1, 0)) By default, the parameter value returned defines a point that is a distance of 1 from the Plane's p1 value and in line with the given point: >>> on_circle = p.arbitrary_point(t).subs(t, pi/4) >>> on_circle.distance(p.p1) 1 >>> p.parameter_value(on_circle, t) {t: pi/4} Moving the point twice as far from p1 does not change the parameter value: >>> off_circle = p.p1 + (on_circle - p.p1)*2 >>> off_circle.distance(p.p1) 2 >>> p.parameter_value(off_circle, t) {t: pi/4} If the 2-value parameter is desired, supply the two parameter symbols and a replacement dictionary will be returned: >>> p.parameter_value(on_circle, u, v) {u: sqrt(10)/10, v: sqrt(10)/30} >>> p.parameter_value(off_circle, u, v) {u: sqrt(10)/5, v: sqrt(10)/15} " 2504,"def orthographic_projection(X, camera): camera = camera.reshape((-1, 1, 3)) X_trans = X[:, :, :2] + camera[:, :, 1:] shape = paddle.shape(X_trans) X_2d = (camera[:, :, 0] * X_trans.reshape((shape[0], -1))).reshape(shape) return X_2d @register","Perform orthographic projection of 3D points X using the camera parameters Args: X: size = [B, N, 3] camera: size = [B, 3] Returns: Projected 2D points -- size = [B, N, 2] ",33,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def orthographic_projection(X, camera): camera = camera.reshape((-1, 1, 3)) X_trans = X[:, :, :2] + camera[:, :, 1:] shape = paddle.shape(X_trans) X_2d = (camera[:, :, 0] * X_trans.reshape((shape[0], -1))).reshape(shape) return X_2d @register ``` ###Assistant : Perform orthographic projection of 3D points X using the camera parameters Args: X: size = [B, N, 3] camera: size = [B, 3] Returns: Projected 2D points -- size = [B, N, 2] " 2505,"def _load_state_id(self, state_id): remote_calls = [ worker.load_state_stream.remote(state_id) for worker in self.remote_workers ] return remote_calls ",Loads the object with id `state_id` to all workers.,9,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _load_state_id(self, state_id): remote_calls = [ worker.load_state_stream.remote(state_id) for worker in self.remote_workers ] return remote_calls ``` ###Assistant : Loads the object with id `state_id` to all workers. " 2506,"def __new__(cls, p1, pt=None, angle=None, **kwargs): p1 = Point(p1, dim=2) if pt is not None and angle is None: try: p2 = Point(pt, dim=2) except (NotImplementedError, TypeError, ValueError): raise ValueError(filldedent()) if p1 == p2: raise ValueError('A Ray requires two distinct points.') elif angle is not None and pt is None: # we need to know if the angle is an odd multiple of pi/2 angle = sympify(angle) c = _pi_coeff(angle) p2 = None if c is not None: if c.is_Rational: if c.q == 2: if c.p == 1: p2 = p1 + Point(0, 1) elif c.p == 3: p2 = p1 + Point(0, -1) elif c.q == 1: if c.p == 0: p2 = p1 + Point(1, 0) elif c.p == 1: p2 = p1 + Point(-1, 0) if p2 is None: c *= S.Pi else: c = angle % (2*S.Pi) if not p2: m = 2*c/S.Pi left = And(1 < m, m < 3) # is it in quadrant 2 or 3? x = Piecewise((-1, left), (Piecewise((0, Eq(m % 1, 0)), (1, True)), True)) y = Piecewise((-tan(c), left), (Piecewise((1, Eq(m, 1)), (-1, Eq(m, 3)), (tan(c), True)), True)) p2 = p1 + Point(x, y) else: raise ValueError('A 2nd point or keyword ""angle"" must be used.') return LinearEntity2D.__new__(cls, p1, p2, **kwargs) "," The 2nd argument was not a valid Point; if it was meant to be an angle it should be given with keyword ""angle"".",23,210,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __new__(cls, p1, pt=None, angle=None, **kwargs): p1 = Point(p1, dim=2) if pt is not None and angle is None: try: p2 = Point(pt, dim=2) except (NotImplementedError, TypeError, ValueError): raise ValueError(filldedent()) if p1 == p2: raise ValueError('A Ray requires two distinct points.') elif angle is not None and pt is None: # we need to know if the angle is an odd multiple of pi/2 angle = sympify(angle) c = _pi_coeff(angle) p2 = None if c is not None: if c.is_Rational: if c.q == 2: if c.p == 1: p2 = p1 + Point(0, 1) elif c.p == 3: p2 = p1 + Point(0, -1) elif c.q == 1: if c.p == 0: p2 = p1 + Point(1, 0) elif c.p == 1: p2 = p1 + Point(-1, 0) if p2 is None: c *= S.Pi else: c = angle % (2*S.Pi) if not p2: m = 2*c/S.Pi left = And(1 < m, m < 3) # is it in quadrant 2 or 3? x = Piecewise((-1, left), (Piecewise((0, Eq(m % 1, 0)), (1, True)), True)) y = Piecewise((-tan(c), left), (Piecewise((1, Eq(m, 1)), (-1, Eq(m, 3)), (tan(c), True)), True)) p2 = p1 + Point(x, y) else: raise ValueError('A 2nd point or keyword ""angle"" must be used.') return LinearEntity2D.__new__(cls, p1, p2, **kwargs) ``` ###Assistant : The 2nd argument was not a valid Point; if it was meant to be an angle it should be given with keyword ""angle"". " 2507,"def delegate_command(args, host_state, exclude, require): # type: (EnvironmentConfig, HostState, t.List[str], t.List[str]) -> None con = host_state.controller_profile.get_origin_controller_connection() working_directory = host_state.controller_profile.get_working_directory() host_delegation = not isinstance(args.controller, OriginConfig) if host_delegation: if data_context().content.collection: content_root = os.path.join(working_directory, data_context().content.collection.directory) else: content_root = os.path.join(working_directory, 'ansible') ansible_bin_path = os.path.join(working_directory, 'ansible', 'bin') with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as payload_file: create_payload(args, payload_file.name) con.extract_archive(chdir=working_directory, src=payload_file) else: content_root = working_directory ansible_bin_path = ANSIBLE_BIN_PATH command = generate_command(args, host_state.controller_profile.python, ansible_bin_path, content_root, exclude, require) if isinstance(con, SshConnection): ssh = con.settings else: ssh = None options = [] if isinstance(args, IntegrationConfig) and args.controller.is_managed and all(target.is_managed for target in args.targets): if not args.allow_destructive: options.append('--allow-destructive') with support_container_context(args, ssh) as containers: # type: t.Optional[ContainerDatabase] if containers: options.extend(['--containers', json.dumps(containers.to_dict())]) # Run unit tests unprivileged to prevent stray writes to the source tree. # Also disconnect from the network once requirements have been installed. if isinstance(args, UnitsConfig) and isinstance(con, DockerConnection): pytest_user = 'pytest' writable_dirs = [ os.path.join(content_root, ResultType.JUNIT.relative_path), os.path.join(content_root, ResultType.COVERAGE.relative_path), ] con.run(['mkdir', '-p'] + writable_dirs) con.run(['chmod', '777'] + writable_dirs) con.run(['chmod', '755', working_directory]) con.run(['chmod', '644', os.path.join(content_root, args.metadata_path)]) con.run(['useradd', pytest_user, '--create-home']) con.run(insert_options(command, options + ['--requirements-mode', 'only'])) container = con.inspect() networks = container.get_network_names() if networks is not None: for network in networks: con.disconnect_network(network) else: display.warning('Network disconnection is not supported (this is normal under podman). ' 'Tests will not be isolated from the network. Network-related tests may misbehave.') options.extend(['--requirements-mode', 'skip']) con.user = pytest_user success = False try: con.run(insert_options(command, options)) success = True finally: if host_delegation: download_results(args, con, content_root, success) ",Delegate execution based on the provided host state.,8,231,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def delegate_command(args, host_state, exclude, require): # type: (EnvironmentConfig, HostState, t.List[str], t.List[str]) -> None con = host_state.controller_profile.get_origin_controller_connection() working_directory = host_state.controller_profile.get_working_directory() host_delegation = not isinstance(args.controller, OriginConfig) if host_delegation: if data_context().content.collection: content_root = os.path.join(working_directory, data_context().content.collection.directory) else: content_root = os.path.join(working_directory, 'ansible') ansible_bin_path = os.path.join(working_directory, 'ansible', 'bin') with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as payload_file: create_payload(args, payload_file.name) con.extract_archive(chdir=working_directory, src=payload_file) else: content_root = working_directory ansible_bin_path = ANSIBLE_BIN_PATH command = generate_command(args, host_state.controller_profile.python, ansible_bin_path, content_root, exclude, require) if isinstance(con, SshConnection): ssh = con.settings else: ssh = None options = [] if isinstance(args, IntegrationConfig) and args.controller.is_managed and all(target.is_managed for target in args.targets): if not args.allow_destructive: options.append('--allow-destructive') with support_container_context(args, ssh) as containers: # type: t.Optional[ContainerDatabase] if containers: options.extend(['--containers', json.dumps(containers.to_dict())]) # Run unit tests unprivileged to prevent stray writes to the source tree. # Also disconnect from the network once requirements have been installed. if isinstance(args, UnitsConfig) and isinstance(con, DockerConnection): pytest_user = 'pytest' writable_dirs = [ os.path.join(content_root, ResultType.JUNIT.relative_path), os.path.join(content_root, ResultType.COVERAGE.relative_path), ] con.run(['mkdir', '-p'] + writable_dirs) con.run(['chmod', '777'] + writable_dirs) con.run(['chmod', '755', working_directory]) con.run(['chmod', '644', os.path.join(content_root, args.metadata_path)]) con.run(['useradd', pytest_user, '--create-home']) con.run(insert_options(command, options + ['--requirements-mode', 'only'])) container = con.inspect() networks = container.get_network_names() if networks is not None: for network in networks: con.disconnect_network(network) else: display.warning('Network disconnection is not supported (this is normal under podman). ' 'Tests will not be isolated from the network. Network-related tests may misbehave.') options.extend(['--requirements-mode', 'skip']) con.user = pytest_user success = False try: con.run(insert_options(command, options)) success = True finally: if host_delegation: download_results(args, con, content_root, success) ``` ###Assistant : Delegate execution based on the provided host state. " 2508,"def test_in_predicate_requires_an_iterable(tmp_path, engine, filter_value): path = tmp_path / ""gh_8720_pandas.parquet"" df = pd.DataFrame( {""A"": [1, 2, 3, 4], ""B"": [1, 1, 2, 2]}, ) df.to_parquet(path, engine=engine) with pytest.raises(TypeError, match=""Value of 'in' filter""): dd.read_parquet(path, engine=engine, filters=filter_value) # pandas to_parquet outputs a single file, dask outputs a folder with global # metadata that changes the filtering code path ddf = dd.from_pandas(df, npartitions=2) path = tmp_path / ""gh_8720_dask.parquet"" ddf.to_parquet(path, engine=engine) with pytest.raises(TypeError, match=""Value of 'in' filter""): dd.read_parquet(path, engine=engine, filters=filter_value) ",Regression test for https://github.com/dask/dask/issues/8720,4,75,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_in_predicate_requires_an_iterable(tmp_path, engine, filter_value): path = tmp_path / ""gh_8720_pandas.parquet"" df = pd.DataFrame( {""A"": [1, 2, 3, 4], ""B"": [1, 1, 2, 2]}, ) df.to_parquet(path, engine=engine) with pytest.raises(TypeError, match=""Value of 'in' filter""): dd.read_parquet(path, engine=engine, filters=filter_value) # pandas to_parquet outputs a single file, dask outputs a folder with global # metadata that changes the filtering code path ddf = dd.from_pandas(df, npartitions=2) path = tmp_path / ""gh_8720_dask.parquet"" ddf.to_parquet(path, engine=engine) with pytest.raises(TypeError, match=""Value of 'in' filter""): dd.read_parquet(path, engine=engine, filters=filter_value) ``` ###Assistant : Regression test for https://github.com/dask/dask/issues/8720 " 2509,"def only_targets(self, target_type): # type: (t.Type[THostConfig]) -> t.List[THostConfig] if not self.targets: raise Exception('There must be one or more targets.') assert type_guard(self.targets, target_type) return t.cast(t.List[THostConfig], self.targets) "," Return a list of target host configurations. Requires that there are one or more targets, all the specified type. ",19,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def only_targets(self, target_type): # type: (t.Type[THostConfig]) -> t.List[THostConfig] if not self.targets: raise Exception('There must be one or more targets.') assert type_guard(self.targets, target_type) return t.cast(t.List[THostConfig], self.targets) ``` ###Assistant : Return a list of target host configurations. Requires that there are one or more targets, all the specified type. " 2510,"def _get_animated_artists(self): return tuple([a for ax_ in self.ax.get_figure().get_axes() for a in ax_.get_children() if a.get_animated() and a not in self.artists]) "," Convenience method to get all animated artists of a figure, except those already present in self.artists. 'z_order' is ignored. ",19,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_animated_artists(self): return tuple([a for ax_ in self.ax.get_figure().get_axes() for a in ax_.get_children() if a.get_animated() and a not in self.artists]) ``` ###Assistant : Convenience method to get all animated artists of a figure, except those already present in self.artists. 'z_order' is ignored. " 2511,"def new_gridlines(self, ax): gridlines = GridlinesCollection( None, transform=ax.transData, colors=mpl.rcParams['grid.color'], linestyles=mpl.rcParams['grid.linestyle'], linewidths=mpl.rcParams['grid.linewidth']) ax._set_artist_props(gridlines) gridlines.set_grid_helper(self) ax.axes._set_artist_props(gridlines) # gridlines.set_clip_path(self.axes.patch) # set_clip_path need to be deferred after Axes.cla is completed. # It is done inside the cla. return gridlines "," Create and return a new GridlineCollection instance. *which* : ""major"" or ""minor"" *axis* : ""both"", ""x"" or ""y"" ",18,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def new_gridlines(self, ax): gridlines = GridlinesCollection( None, transform=ax.transData, colors=mpl.rcParams['grid.color'], linestyles=mpl.rcParams['grid.linestyle'], linewidths=mpl.rcParams['grid.linewidth']) ax._set_artist_props(gridlines) gridlines.set_grid_helper(self) ax.axes._set_artist_props(gridlines) # gridlines.set_clip_path(self.axes.patch) # set_clip_path need to be deferred after Axes.cla is completed. # It is done inside the cla. return gridlines ``` ###Assistant : Create and return a new GridlineCollection instance. *which* : ""major"" or ""minor"" *axis* : ""both"", ""x"" or ""y"" " 2512,"def duplicates_removed(it, already_seen=()): lst = [] seen = set() for i in it: if i in seen or i in already_seen: continue lst.append(i) seen.add(i) return lst "," Returns a list with duplicates removed from the iterable `it`. Order is preserved. ",13,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def duplicates_removed(it, already_seen=()): lst = [] seen = set() for i in it: if i in seen or i in already_seen: continue lst.append(i) seen.add(i) return lst ``` ###Assistant : Returns a list with duplicates removed from the iterable `it`. Order is preserved. " 2513,"def masked_all(shape, dtype=float): a = masked_array(np.empty(shape, dtype), mask=np.ones(shape, make_mask_descr(dtype))) return a "," Empty masked array with all elements masked. Return an empty masked array of the given shape and dtype, where all the data are masked. Parameters ---------- shape : int or tuple of ints Shape of the required MaskedArray, e.g., ``(2, 3)`` or ``2``. dtype : dtype, optional Data type of the output. Returns ------- a : MaskedArray A masked array with all data masked. See Also -------- masked_all_like : Empty masked array modelled on an existing array. Examples -------- >>> import numpy.ma as ma >>> ma.masked_all((3, 3)) masked_array( data=[[--, --, --], [--, --, --], [--, --, --]], mask=[[ True, True, True], [ True, True, True], [ True, True, True]], fill_value=1e+20, dtype=float64) The `dtype` parameter defines the underlying data type. >>> a = ma.masked_all((3, 3)) >>> a.dtype dtype('float64') >>> a = ma.masked_all((3, 3), dtype=np.int32) >>> a.dtype dtype('int32') ",136,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def masked_all(shape, dtype=float): a = masked_array(np.empty(shape, dtype), mask=np.ones(shape, make_mask_descr(dtype))) return a ``` ###Assistant : Empty masked array with all elements masked. Return an empty masked array of the given shape and dtype, where all the data are masked. Parameters ---------- shape : int or tuple of ints Shape of the required MaskedArray, e.g., ``(2, 3)`` or ``2``. dtype : dtype, optional Data type of the output. Returns ------- a : MaskedArray A masked array with all data masked. See Also -------- masked_all_like : Empty masked array modelled on an existing array. Examples -------- >>> import numpy.ma as ma >>> ma.masked_all((3, 3)) masked_array( data=[[--, --, --], [--, --, --], [--, --, --]], mask=[[ True, True, True], [ True, True, True], [ True, True, True]], fill_value=1e+20, dtype=float64) The `dtype` parameter defines the underlying data type. >>> a = ma.masked_all((3, 3)) >>> a.dtype dtype('float64') >>> a = ma.masked_all((3, 3), dtype=np.int32) >>> a.dtype dtype('int32') " 2514,"def closeness_centrality(G, u=None, distance=None, wf_improved=True): r if G.is_directed(): G = G.reverse() # create a reversed graph view if distance is not None: # use Dijkstra's algorithm with specified attribute as edge weight path_length = functools.partial( nx.single_source_dijkstra_path_length, weight=distance ) else: path_length = nx.single_source_shortest_path_length if u is None: nodes = G.nodes else: nodes = [u] closeness_centrality = {} for n in nodes: sp = path_length(G, n) totsp = sum(sp.values()) len_G = len(G) _closeness_centrality = 0.0 if totsp > 0.0 and len_G > 1: _closeness_centrality = (len(sp) - 1.0) / totsp # normalize to number of nodes-1 in connected part if wf_improved: s = (len(sp) - 1.0) / (len_G - 1) _closeness_centrality *= s closeness_centrality[n] = _closeness_centrality if u is not None: return closeness_centrality[u] else: return closeness_centrality @not_implemented_for(""directed"")","Compute closeness centrality for nodes. Closeness centrality [1]_ of a node `u` is the reciprocal of the average shortest path distance to `u` over all `n-1` reachable nodes. .. math:: C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, where `d(v, u)` is the shortest-path distance between `v` and `u`, and `n-1` is the number of nodes reachable from `u`. Notice that the closeness distance function computes the incoming distance to `u` for directed graphs. To use outward distance, act on `G.reverse()`. Notice that higher values of closeness indicate higher centrality. Wasserman and Faust propose an improved formula for graphs with more than one connected component. The result is ""a ratio of the fraction of actors in the group who are reachable, to the average distance"" from the reachable actors [2]_. You might think this scale factor is inverted but it is not. As is, nodes from small components receive a smaller closeness value. Letting `N` denote the number of nodes in the graph, .. math:: C_{WF}(u) = \frac{n-1}{N-1} \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, Parameters ---------- G : graph A NetworkX graph u : node, optional Return only the value for node u distance : edge attribute key, optional (default=None) Use the specified edge attribute as the edge distance in shortest path calculations wf_improved : bool, optional (default=True) If True, scale by the fraction of nodes reachable. This gives the Wasserman and Faust improved formula. For single component graphs it is the same as the original formula. Returns ------- nodes : dictionary Dictionary of nodes with closeness centrality as the value. Examples -------- >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) >>> nx.closeness_centrality(G) {0: 1.0, 1: 1.0, 2: 0.75, 3: 0.75} See Also -------- betweenness_centrality, load_centrality, eigenvector_centrality, degree_centrality, incremental_closeness_centrality Notes ----- The closeness centrality is normalized to `(n-1)/(|G|-1)` where `n` is the number of nodes in the connected part of graph containing the node. If the graph is not completely connected, this algorithm computes the closeness centrality for each connected part separately scaled by that parts size. If the 'distance' keyword is set to an edge attribute key then the shortest-path length will be computed using Dijkstra's algorithm with that edge attribute as the edge weight. The closeness centrality uses *inward* distance to a node, not outward. If you want to use outword distances apply the function to `G.reverse()` In NetworkX 2.2 and earlier a bug caused Dijkstra's algorithm to use the outward distance rather than the inward distance. If you use a 'distance' keyword and a DiGraph, your results will change between v2.2 and v2.3. References ---------- .. [1] Linton C. Freeman: Centrality in networks: I. Conceptual clarification. Social Networks 1:215-239, 1979. https://doi.org/10.1016/0378-8733(78)90021-7 .. [2] pg. 201 of Wasserman, S. and Faust, K., Social Network Analysis: Methods and Applications, 1994, Cambridge University Press. ",467,125,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def closeness_centrality(G, u=None, distance=None, wf_improved=True): r if G.is_directed(): G = G.reverse() # create a reversed graph view if distance is not None: # use Dijkstra's algorithm with specified attribute as edge weight path_length = functools.partial( nx.single_source_dijkstra_path_length, weight=distance ) else: path_length = nx.single_source_shortest_path_length if u is None: nodes = G.nodes else: nodes = [u] closeness_centrality = {} for n in nodes: sp = path_length(G, n) totsp = sum(sp.values()) len_G = len(G) _closeness_centrality = 0.0 if totsp > 0.0 and len_G > 1: _closeness_centrality = (len(sp) - 1.0) / totsp # normalize to number of nodes-1 in connected part if wf_improved: s = (len(sp) - 1.0) / (len_G - 1) _closeness_centrality *= s closeness_centrality[n] = _closeness_centrality if u is not None: return closeness_centrality[u] else: return closeness_centrality @not_implemented_for(""directed"") ``` ###Assistant : Compute closeness centrality for nodes. Closeness centrality [1]_ of a node `u` is the reciprocal of the average shortest path distance to `u` over all `n-1` reachable nodes. .. math:: C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, where `d(v, u)` is the shortest-path distance between `v` and `u`, and `n-1` is the number of nodes reachable from `u`. Notice that the closeness distance function computes the incoming distance to `u` for directed graphs. To use outward distance, act on `G.reverse()`. Notice that higher values of closeness indicate higher centrality. Wasserman and Faust propose an improved formula for graphs with more than one connected component. The result is ""a ratio of the fraction of actors in the group who are reachable, to the average distance"" from the reachable actors [2]_. You might think this scale factor is inverted but it is not. As is, nodes from small components receive a smaller closeness value. Letting `N` denote the number of nodes in the graph, .. math:: C_{WF}(u) = \frac{n-1}{N-1} \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, Parameters ---------- G : graph A NetworkX graph u : node, optional Return only the value for node u distance : edge attribute key, optional (default=None) Use the specified edge attribute as the edge distance in shortest path calculations wf_improved : bool, optional (default=True) If True, scale by the fraction of nodes reachable. This gives the Wasserman and Faust improved formula. For single component graphs it is the same as the original formula. Returns ------- nodes : dictionary Dictionary of nodes with closeness centrality as the value. Examples -------- >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) >>> nx.closeness_centrality(G) {0: 1.0, 1: 1.0, 2: 0.75, 3: 0.75} See Also -------- betweenness_centrality, load_centrality, eigenvector_centrality, degree_centrality, incremental_closeness_centrality Notes ----- The closeness centrality is normalized to `(n-1)/(|G|-1)` where `n` is the number of nodes in the connected part of graph containing the node. If the graph is not completely connected, this algorithm computes the closeness centrality for each connected part separately scaled by that parts size. If the 'distance' keyword is set to an edge attribute key then the shortest-path length will be computed using Dijkstra's algorithm with that edge attribute as the edge weight. The closeness centrality uses *inward* distance to a node, not outward. If you want to use outword distances apply the function to `G.reverse()` In NetworkX 2.2 and earlier a bug caused Dijkstra's algorithm to use the outward distance rather than the inward distance. If you use a 'distance' keyword and a DiGraph, your results will change between v2.2 and v2.3. References ---------- .. [1] Linton C. Freeman: Centrality in networks: I. Conceptual clarification. Social Networks 1:215-239, 1979. https://doi.org/10.1016/0378-8733(78)90021-7 .. [2] pg. 201 of Wasserman, S. and Faust, K., Social Network Analysis: Methods and Applications, 1994, Cambridge University Press. " 2515,"async def _remove_old_push_actions_that_have_rotated(self) -> None: # We want to clear out anything that is older than a day that *has* already # been rotated. rotated_upto_stream_ordering = await self.db_pool.simple_select_one_onecol( table=""event_push_summary_stream_ordering"", keyvalues={}, retcol=""stream_ordering"", ) max_stream_ordering_to_delete = min( rotated_upto_stream_ordering, self.stream_ordering_day_ago ) ",Clear out old push actions that have been summarised.,9,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def _remove_old_push_actions_that_have_rotated(self) -> None: # We want to clear out anything that is older than a day that *has* already # been rotated. rotated_upto_stream_ordering = await self.db_pool.simple_select_one_onecol( table=""event_push_summary_stream_ordering"", keyvalues={}, retcol=""stream_ordering"", ) max_stream_ordering_to_delete = min( rotated_upto_stream_ordering, self.stream_ordering_day_ago ) ``` ###Assistant : Clear out old push actions that have been summarised. " 2516,"def test_indent(): multiline_string = indented_multiline_string = assert indented_multiline_string == _indent(multiline_string, 4) ","Assert that indenting a multiline string by 4 spaces prepends 4 spaces before each new line.test test1 test2 test3 test test1 test2 test3",23,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_indent(): multiline_string = indented_multiline_string = assert indented_multiline_string == _indent(multiline_string, 4) ``` ###Assistant : Assert that indenting a multiline string by 4 spaces prepends 4 spaces before each new line.test test1 test2 test3 test test1 test2 test3 " 2517,"def update_inputs_outputs_dims(model, input_dims, output_dims): # type: (ModelProto, Dict[Text, List[Any]], Dict[Text, List[Any]]) -> ModelProto dim_param_set = set() # type: Set[Text] "," This function updates the dimension sizes of the model's inputs and outputs to the values provided in input_dims and output_dims. if the dim value provided is negative, a unique dim_param will be set for that dimension. Example. if we have the following shape for inputs and outputs: shape(input_1) = ('b', 3, 'w', 'h') shape(input_2) = ('b', 4) and shape(output) = ('b', 'd', 5) The parameters can be provided as: input_dims = { ""input_1"": ['b', 3, 'w', 'h'], ""input_2"": ['b', 4], } output_dims = { ""output"": ['b', -1, 5] } Putting it together: model = onnx.load('model.onnx') updated_model = update_inputs_outputs_dims(model, input_dims, output_dims) onnx.save(updated_model, 'model.onnx') ",102,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update_inputs_outputs_dims(model, input_dims, output_dims): # type: (ModelProto, Dict[Text, List[Any]], Dict[Text, List[Any]]) -> ModelProto dim_param_set = set() # type: Set[Text] ``` ###Assistant : This function updates the dimension sizes of the model's inputs and outputs to the values provided in input_dims and output_dims. if the dim value provided is negative, a unique dim_param will be set for that dimension. Example. if we have the following shape for inputs and outputs: shape(input_1) = ('b', 3, 'w', 'h') shape(input_2) = ('b', 4) and shape(output) = ('b', 'd', 5) The parameters can be provided as: input_dims = { ""input_1"": ['b', 3, 'w', 'h'], ""input_2"": ['b', 4], } output_dims = { ""output"": ['b', -1, 5] } Putting it together: model = onnx.load('model.onnx') updated_model = update_inputs_outputs_dims(model, input_dims, output_dims) onnx.save(updated_model, 'model.onnx') " 2518,"def get_aliased_columns(aliased_columns, model_alias, targets, mode=None): for col in targets: if mode == 'input': if str(col.parts[0]) != model_alias and col.alias is not None: aliased_columns[aliased_columns.index(col.parts[-1])] = str(col.alias) if mode == 'output': if str(col.parts[0]) == model_alias and col.alias is not None: aliased_columns[aliased_columns.index('prediction')] = str(col.alias) return aliased_columns", This method assumes mdb_sql will alert if there are two columns with the same alias ,15,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_aliased_columns(aliased_columns, model_alias, targets, mode=None): for col in targets: if mode == 'input': if str(col.parts[0]) != model_alias and col.alias is not None: aliased_columns[aliased_columns.index(col.parts[-1])] = str(col.alias) if mode == 'output': if str(col.parts[0]) == model_alias and col.alias is not None: aliased_columns[aliased_columns.index('prediction')] = str(col.alias) return aliased_columns ``` ###Assistant : This method assumes mdb_sql will alert if there are two columns with the same alias " 2519,"def get_model_urls(app_label, model_name): paths = [] # Retrieve registered views for this model try: views = registry['views'][app_label][model_name] except KeyError: # No views have been registered for this model views = [] for view in views: # Import the view class or function callable = import_string(view['path']) if issubclass(callable, View): callable = callable.as_view() # Create a path to the view paths.append( path(f""{view['name']}/"", callable, name=f""{model_name}_{view['name']}"", kwargs=view['kwargs']) ) return paths "," Return a list of URL paths for detail views registered to the given model. Args: app_label: App/plugin name model_name: Model name ",21,66,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_model_urls(app_label, model_name): paths = [] # Retrieve registered views for this model try: views = registry['views'][app_label][model_name] except KeyError: # No views have been registered for this model views = [] for view in views: # Import the view class or function callable = import_string(view['path']) if issubclass(callable, View): callable = callable.as_view() # Create a path to the view paths.append( path(f""{view['name']}/"", callable, name=f""{model_name}_{view['name']}"", kwargs=view['kwargs']) ) return paths ``` ###Assistant : Return a list of URL paths for detail views registered to the given model. Args: app_label: App/plugin name model_name: Model name " 2520,"def head(self, url, **kwargs): r kwargs.setdefault(""allow_redirects"", False) return self.request(""HEAD"", url, **kwargs) ","Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response ",24,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def head(self, url, **kwargs): r kwargs.setdefault(""allow_redirects"", False) return self.request(""HEAD"", url, **kwargs) ``` ###Assistant : Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response " 2521,"async def get_and_submit_flow_runs(self) -> List[FlowRun]: if not self.started: raise RuntimeError(""Agent is not started. Use `async with OrionAgent()...`"") self.logger.debug(""Checking for flow runs..."") submittable_runs = await self.client.read_flow_runs( sort=FlowRunSort.NEXT_SCHEDULED_START_TIME_ASC, flow_run_filter=self.flow_run_query_filter(), ) for flow_run in submittable_runs: self.logger.info(f""Submitting flow run '{flow_run.id}'"") self.submitting_flow_run_ids.add(flow_run.id) self.task_group.start_soon( self.submit_run, flow_run, ) return submittable_runs "," Queries for scheduled flow runs and submits them for execution in parallel ",12,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def get_and_submit_flow_runs(self) -> List[FlowRun]: if not self.started: raise RuntimeError(""Agent is not started. Use `async with OrionAgent()...`"") self.logger.debug(""Checking for flow runs..."") submittable_runs = await self.client.read_flow_runs( sort=FlowRunSort.NEXT_SCHEDULED_START_TIME_ASC, flow_run_filter=self.flow_run_query_filter(), ) for flow_run in submittable_runs: self.logger.info(f""Submitting flow run '{flow_run.id}'"") self.submitting_flow_run_ids.add(flow_run.id) self.task_group.start_soon( self.submit_run, flow_run, ) return submittable_runs ``` ###Assistant : Queries for scheduled flow runs and submits them for execution in parallel " 2522,"def get_quoted_string(value): quoted_string = QuotedString() if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) quoted_string.append(token) token, value = get_bare_quoted_string(value) quoted_string.append(token) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) quoted_string.append(token) return quoted_string, value ","quoted-string = [CFWS] [CFWS] 'bare-quoted-string' is an intermediate class defined by this parser and not by the RFC grammar. It is the quoted string without any attached CFWS. ",29,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_quoted_string(value): quoted_string = QuotedString() if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) quoted_string.append(token) token, value = get_bare_quoted_string(value) quoted_string.append(token) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) quoted_string.append(token) return quoted_string, value ``` ###Assistant : quoted-string = [CFWS] [CFWS] 'bare-quoted-string' is an intermediate class defined by this parser and not by the RFC grammar. It is the quoted string without any attached CFWS. " 2523,"def test_defined_keyword_exist_in_schema(self, keyword, discovered_catalog): schemas_errors = [] for stream_name, stream in discovered_catalog.items(): check_result = find_keyword_schema(stream.json_schema, key=keyword) if check_result: schemas_errors.append(stream_name) assert not schemas_errors, f""Found not allowed `{keyword}` keyword for selected streams: {schemas_errors}."" ",Checking for the presence of not allowed keywords within each json schema,12,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_defined_keyword_exist_in_schema(self, keyword, discovered_catalog): schemas_errors = [] for stream_name, stream in discovered_catalog.items(): check_result = find_keyword_schema(stream.json_schema, key=keyword) if check_result: schemas_errors.append(stream_name) assert not schemas_errors, f""Found not allowed `{keyword}` keyword for selected streams: {schemas_errors}."" ``` ###Assistant : Checking for the presence of not allowed keywords within each json schema " 2524,"def normalize_path_patterns(patterns): patterns = [os.path.normcase(p) for p in patterns] dir_suffixes = {""%s*"" % path_sep for path_sep in {""/"", os.sep}} norm_patterns = [] for pattern in patterns: for dir_suffix in dir_suffixes: if pattern.endswith(dir_suffix): norm_patterns.append(pattern[: -len(dir_suffix)]) break else: norm_patterns.append(pattern) return norm_patterns ",Normalize an iterable of glob style patterns based on OS.,10,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def normalize_path_patterns(patterns): patterns = [os.path.normcase(p) for p in patterns] dir_suffixes = {""%s*"" % path_sep for path_sep in {""/"", os.sep}} norm_patterns = [] for pattern in patterns: for dir_suffix in dir_suffixes: if pattern.endswith(dir_suffix): norm_patterns.append(pattern[: -len(dir_suffix)]) break else: norm_patterns.append(pattern) return norm_patterns ``` ###Assistant : Normalize an iterable of glob style patterns based on OS. " 2525,"def get_protobuf_schema() -> GeneratedProtocolMessageType: return ErrorResponseMessage_PB ","Return the type of protobuf object which stores a class of this type As a part of serialization and deserialization, we need the ability to lookup the protobuf object type directly from the object type. This static method allows us to do this. Importantly, this method is also used to create the reverse lookup ability within the metaclass of Serializable. In the metaclass, it calls this method and then it takes whatever type is returned from this method and adds an attribute to it with the type of this class attached to it. See the MetaSerializable class for details. :return: the type of protobuf object which corresponds to this class. :rtype: GeneratedProtocolMessageType ",112,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_protobuf_schema() -> GeneratedProtocolMessageType: return ErrorResponseMessage_PB ``` ###Assistant : Return the type of protobuf object which stores a class of this type As a part of serialization and deserialization, we need the ability to lookup the protobuf object type directly from the object type. This static method allows us to do this. Importantly, this method is also used to create the reverse lookup ability within the metaclass of Serializable. In the metaclass, it calls this method and then it takes whatever type is returned from this method and adds an attribute to it with the type of this class attached to it. See the MetaSerializable class for details. :return: the type of protobuf object which corresponds to this class. :rtype: GeneratedProtocolMessageType " 2526,"def test_remote_media_cache_retention(self) -> None: # Advance 31 days (in seconds) self.reactor.advance(31 * 24 * 60 * 60) # Check that media has been correctly purged. # Local media should be unaffected. # Remote media accessed <30 days ago should still exist. self._assert_if_mxc_uris_purged( purged=[ (self.remote_server_name, self.remote_not_recently_accessed_media), ], not_purged=[ (self.remote_server_name, self.remote_recently_accessed_media), (self.hs.config.server.server_name, self.local_recently_accessed_media), ( self.hs.config.server.server_name, self.local_not_recently_accessed_media, ), (self.hs.config.server.server_name, self.local_never_accessed_media), ], ) "," Tests that entries from the remote media cache that have not been accessed recently is purged, while local media is unaffected. ",21,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_remote_media_cache_retention(self) -> None: # Advance 31 days (in seconds) self.reactor.advance(31 * 24 * 60 * 60) # Check that media has been correctly purged. # Local media should be unaffected. # Remote media accessed <30 days ago should still exist. self._assert_if_mxc_uris_purged( purged=[ (self.remote_server_name, self.remote_not_recently_accessed_media), ], not_purged=[ (self.remote_server_name, self.remote_recently_accessed_media), (self.hs.config.server.server_name, self.local_recently_accessed_media), ( self.hs.config.server.server_name, self.local_not_recently_accessed_media, ), (self.hs.config.server.server_name, self.local_never_accessed_media), ], ) ``` ###Assistant : Tests that entries from the remote media cache that have not been accessed recently is purged, while local media is unaffected. " 2527,"def setmodulation(self, modu): # type: (int) -> bool # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11 # noqa: E501 self._check_npcap_requirement() _modus = { 0: ""dsss"", 1: ""fhss"", 2: ""irbaseband"", 3: ""ofdm"", 4: ""hrdss"", 5: ""erp"", 6: ""ht"", 7: ""vht"", 8: ""ihv"", 9: ""mimo-ofdm"", 10: ""mimo-ofdm"", } m = _modus.get(modu, ""unknown"") if isinstance(modu, int) else modu return self._npcap_set(""modu"", str(m)) ","Set the interface modulation. It can be: - 0: dsss - 1: fhss - 2: irbaseband - 3: ofdm - 4: hrdss - 5: erp - 6: ht - 7: vht - 8: ihv - 9: mimo-ofdm - 10: mimo-ofdm - the value directly Only available with Npcap.",48,54,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def setmodulation(self, modu): # type: (int) -> bool # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11 # noqa: E501 self._check_npcap_requirement() _modus = { 0: ""dsss"", 1: ""fhss"", 2: ""irbaseband"", 3: ""ofdm"", 4: ""hrdss"", 5: ""erp"", 6: ""ht"", 7: ""vht"", 8: ""ihv"", 9: ""mimo-ofdm"", 10: ""mimo-ofdm"", } m = _modus.get(modu, ""unknown"") if isinstance(modu, int) else modu return self._npcap_set(""modu"", str(m)) ``` ###Assistant : Set the interface modulation. It can be: - 0: dsss - 1: fhss - 2: irbaseband - 3: ofdm - 4: hrdss - 5: erp - 6: ht - 7: vht - 8: ihv - 9: mimo-ofdm - 10: mimo-ofdm - the value directly Only available with Npcap. " 2528,"def _on_connection_error(self, connection, exception): log.error(""Failed to connect"", exc_info=True) "," Invoked by pika when connection on connection error :param connection: :param exception: :return: ",13,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _on_connection_error(self, connection, exception): log.error(""Failed to connect"", exc_info=True) ``` ###Assistant : Invoked by pika when connection on connection error :param connection: :param exception: :return: " 2529,"def is_monotonic_decreasing(self) -> bool: # monotonic decreasing if and only if reverse is monotonic increasing return self[::-1].is_monotonic_increasing "," Return a boolean if the values are equal or decreasing. ",10,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_monotonic_decreasing(self) -> bool: # monotonic decreasing if and only if reverse is monotonic increasing return self[::-1].is_monotonic_increasing ``` ###Assistant : Return a boolean if the values are equal or decreasing. " 2530,"def test_sends_assignment_notification(self): url = f""/api/0/issues/{self.group.id}/"" with self.tasks(): response = self.client.put(url, format=""json"", data={""assignedTo"": self.user.username}) assert response.status_code == 200, response.content msg = mail.outbox[0] # check the txt version assert f""assigned {self.short_id} to themselves"" in msg.body # check the html version assert f""{self.short_id} to themselves

    "" in msg.alternatives[0][0] attachment, text = get_attachment() assert text == f""Issue assigned to {self.name} by themselves"" assert attachment[""title""] == self.group.title assert ( attachment[""footer""] == f""{self.project.slug} | "" ) "," Test that an email AND Slack notification are sent with the expected values when an issue is assigned. ",18,70,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_sends_assignment_notification(self): url = f""/api/0/issues/{self.group.id}/"" with self.tasks(): response = self.client.put(url, format=""json"", data={""assignedTo"": self.user.username}) assert response.status_code == 200, response.content msg = mail.outbox[0] # check the txt version assert f""assigned {self.short_id} to themselves"" in msg.body # check the html version assert f""{self.short_id} to themselves

    "" in msg.alternatives[0][0] attachment, text = get_attachment() assert text == f""Issue assigned to {self.name} by themselves"" assert attachment[""title""] == self.group.title assert ( attachment[""footer""] == f""{self.project.slug} | "" ) ``` ###Assistant : Test that an email AND Slack notification are sent with the expected values when an issue is assigned. " 2531,"def _ask_default(self, default=''): self.prompt_output.write('Please enter the default value as valid Python.') if default: self.prompt_output.write( f""Accept the default '{default}' by pressing 'Enter' or "" f""provide another value."" ) self.prompt_output.write( 'The datetime and django.utils.timezone modules are available, so ' 'it is possible to provide e.g. timezone.now as a value.' ) self.prompt_output.write(""Type 'exit' to exit this prompt"") while True: if default: prompt = ""[default: {}] >>> "".format(default) else: prompt = "">>> "" self.prompt_output.write(prompt, ending='') code = input() if not code and default: code = default if not code: self.prompt_output.write(""Please enter some code, or 'exit' (without quotes) to exit."") elif code == ""exit"": sys.exit(1) else: try: return eval(code, {}, {'datetime': datetime, 'timezone': timezone}) except (SyntaxError, NameError) as e: self.prompt_output.write('Invalid input: %s' % e) "," Prompt for a default value. The ``default`` argument allows providing a custom default value (as a string) which will be shown to the user and used as the return value if the user doesn't provide any other input. ",38,119,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _ask_default(self, default=''): self.prompt_output.write('Please enter the default value as valid Python.') if default: self.prompt_output.write( f""Accept the default '{default}' by pressing 'Enter' or "" f""provide another value."" ) self.prompt_output.write( 'The datetime and django.utils.timezone modules are available, so ' 'it is possible to provide e.g. timezone.now as a value.' ) self.prompt_output.write(""Type 'exit' to exit this prompt"") while True: if default: prompt = ""[default: {}] >>> "".format(default) else: prompt = "">>> "" self.prompt_output.write(prompt, ending='') code = input() if not code and default: code = default if not code: self.prompt_output.write(""Please enter some code, or 'exit' (without quotes) to exit."") elif code == ""exit"": sys.exit(1) else: try: return eval(code, {}, {'datetime': datetime, 'timezone': timezone}) except (SyntaxError, NameError) as e: self.prompt_output.write('Invalid input: %s' % e) ``` ###Assistant : Prompt for a default value. The ``default`` argument allows providing a custom default value (as a string) which will be shown to the user and used as the return value if the user doesn't provide any other input. " 2532,"def reset_modules(self) -> None: self.modules = {} self.update_modules() self.parse_modules() ","Reset the loaded modules list. This is called from cleanup to clear temporarily loaded modules.",15,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def reset_modules(self) -> None: self.modules = {} self.update_modules() self.parse_modules() ``` ###Assistant : Reset the loaded modules list. This is called from cleanup to clear temporarily loaded modules. " 2533,"def test_ddppo_compilation(self): config = ppo.ddppo.DEFAULT_CONFIG.copy() config[""num_gpus_per_worker""] = 0 num_iterations = 2 for _ in framework_iterator(config, frameworks=""torch""): trainer = ppo.ddppo.DDPPOTrainer(config=config, env=""CartPole-v0"") for i in range(num_iterations): results = trainer.train() check_train_results(results) print(results) # Make sure, weights on all workers are the same (including # local one). weights = trainer.workers.foreach_worker(lambda w: w.get_weights()) for w in weights[1:]: check(w, weights[0]) check_compute_single_action(trainer) trainer.stop() ",Test whether a DDPPOTrainer can be built with both frameworks.,10,56,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_ddppo_compilation(self): config = ppo.ddppo.DEFAULT_CONFIG.copy() config[""num_gpus_per_worker""] = 0 num_iterations = 2 for _ in framework_iterator(config, frameworks=""torch""): trainer = ppo.ddppo.DDPPOTrainer(config=config, env=""CartPole-v0"") for i in range(num_iterations): results = trainer.train() check_train_results(results) print(results) # Make sure, weights on all workers are the same (including # local one). weights = trainer.workers.foreach_worker(lambda w: w.get_weights()) for w in weights[1:]: check(w, weights[0]) check_compute_single_action(trainer) trainer.stop() ``` ###Assistant : Test whether a DDPPOTrainer can be built with both frameworks. " 2534,"def log_cosh(y_true, y_pred): y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) def _logcosh(x): return x + tf.math.softplus(-2. * x) - tf.cast( tf.math.log(2.), x.dtype) return backend.mean(_logcosh(y_pred - y_true), axis=-1) @keras_export('keras.metrics.categorical_crossentropy', 'keras.losses.categorical_crossentropy') @tf.__internal__.dispatch.add_dispatch_support","Logarithm of the hyperbolic cosine of the prediction error. `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly like the mean squared error, but will not be so strongly affected by the occasional wildly incorrect prediction. Standalone usage: >>> y_true = np.random.random(size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.logcosh(y_true, y_pred) >>> assert loss.shape == (2,) >>> x = y_pred - y_true >>> assert np.allclose( ... loss.numpy(), ... np.mean(x + np.log(np.exp(-2. * x) + 1.) - tf.math.log(2.), axis=-1), ... atol=1e-5) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Logcosh error values. shape = `[batch_size, d0, .. dN-1]`. ",131,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def log_cosh(y_true, y_pred): y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) def _logcosh(x): return x + tf.math.softplus(-2. * x) - tf.cast( tf.math.log(2.), x.dtype) return backend.mean(_logcosh(y_pred - y_true), axis=-1) @keras_export('keras.metrics.categorical_crossentropy', 'keras.losses.categorical_crossentropy') @tf.__internal__.dispatch.add_dispatch_support ``` ###Assistant : Logarithm of the hyperbolic cosine of the prediction error. `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly like the mean squared error, but will not be so strongly affected by the occasional wildly incorrect prediction. Standalone usage: >>> y_true = np.random.random(size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.logcosh(y_true, y_pred) >>> assert loss.shape == (2,) >>> x = y_pred - y_true >>> assert np.allclose( ... loss.numpy(), ... np.mean(x + np.log(np.exp(-2. * x) + 1.) - tf.math.log(2.), axis=-1), ... atol=1e-5) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Logcosh error values. shape = `[batch_size, d0, .. dN-1]`. " 2535,"def make_predict_function(self, force=False): if self.predict_function is not None and not force: return self.predict_function ","Creates a function that executes one step of inference. This method can be overridden to support custom inference logic. This method is called by `Model.predict` and `Model.predict_on_batch`. Typically, this method directly controls `tf.function` and `tf.distribute.Strategy` settings, and delegates the actual evaluation logic to `Model.predict_step`. This function is cached the first time `Model.predict` or `Model.predict_on_batch` is called. The cache is cleared whenever `Model.compile` is called. You can skip the cache and generate again the function with `force=True`. Args: force: Whether to regenerate the predict function and skip the cached function if available. Returns: Function. The function created by this method should accept a `tf.data.Iterator`, and return the outputs of the `Model`. ",110,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def make_predict_function(self, force=False): if self.predict_function is not None and not force: return self.predict_function ``` ###Assistant : Creates a function that executes one step of inference. This method can be overridden to support custom inference logic. This method is called by `Model.predict` and `Model.predict_on_batch`. Typically, this method directly controls `tf.function` and `tf.distribute.Strategy` settings, and delegates the actual evaluation logic to `Model.predict_step`. This function is cached the first time `Model.predict` or `Model.predict_on_batch` is called. The cache is cleared whenever `Model.compile` is called. You can skip the cache and generate again the function with `force=True`. Args: force: Whether to regenerate the predict function and skip the cached function if available. Returns: Function. The function created by this method should accept a `tf.data.Iterator`, and return the outputs of the `Model`. " 2536,"def fit(self, X, y, **fit_params): self._validate_params() return self._fit(X, y, **fit_params) ","Fit the RFE model and then the underlying estimator on the selected features. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,) The target values. **fit_params : dict Additional parameters passed to the `fit` method of the underlying estimator. Returns ------- self : object Fitted estimator. ",58,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fit(self, X, y, **fit_params): self._validate_params() return self._fit(X, y, **fit_params) ``` ###Assistant : Fit the RFE model and then the underlying estimator on the selected features. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,) The target values. **fit_params : dict Additional parameters passed to the `fit` method of the underlying estimator. Returns ------- self : object Fitted estimator. " 2537,"def test_invalid_number_selection_fails(): number_string = ""99999999"" result = get_first_menu_and_fail(number_string) lines = result.stdout.splitlines() # Strange string addition are due to coloring, I believe assert lines[-1] == f""\x1b[31mInvalid selection {number_string}\x1b[0m"" assert result.exit_code == 1 "," We need to make sure that if we give an invalid number that the CLI will exit. ",17,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_invalid_number_selection_fails(): number_string = ""99999999"" result = get_first_menu_and_fail(number_string) lines = result.stdout.splitlines() # Strange string addition are due to coloring, I believe assert lines[-1] == f""\x1b[31mInvalid selection {number_string}\x1b[0m"" assert result.exit_code == 1 ``` ###Assistant : We need to make sure that if we give an invalid number that the CLI will exit. " 2538,"def filldedent(s, w=70, **kwargs): return '\n' + fill(dedent(str(s)).strip('\n'), width=w, **kwargs) "," Strips leading and trailing empty lines from a copy of ``s``, then dedents, fills and returns it. Empty line stripping serves to deal with docstrings like this one that start with a newline after the initial triple quote, inserting an empty line at the beginning of the string. Additional keyword arguments will be passed to ``textwrap.fill()``. See Also ======== strlines, rawlines ",61,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def filldedent(s, w=70, **kwargs): return '\n' + fill(dedent(str(s)).strip('\n'), width=w, **kwargs) ``` ###Assistant : Strips leading and trailing empty lines from a copy of ``s``, then dedents, fills and returns it. Empty line stripping serves to deal with docstrings like this one that start with a newline after the initial triple quote, inserting an empty line at the beginning of the string. Additional keyword arguments will be passed to ``textwrap.fill()``. See Also ======== strlines, rawlines " 2539,"def in1d(ar1, ar2, assume_unique=False, invert=False, kind=None): # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() # Ensure that iteration through object arrays yields size-1 arrays if ar2.dtype == object: ar2 = ar2.reshape(-1, 1) # Convert booleans to uint8 so we can use the fast integer algorithm if ar1.dtype == bool: ar1 = ar1.view(np.uint8) if ar2.dtype == bool: ar2 = ar2.view(np.uint8) # Check if we can use a fast integer algorithm: integer_arrays = (np.issubdtype(ar1.dtype, np.integer) and np.issubdtype(ar2.dtype, np.integer)) if kind not in {None, 'sort', 'table'}: raise ValueError( ""Invalid kind: {0}. "".format(kind) + ""Please use None, 'sort' or 'table'."") if integer_arrays and kind in {None, 'table'}: ar2_min = np.min(ar2) ar2_max = np.max(ar2) ar2_range = int(ar2_max) - int(ar2_min) # Constraints on whether we can actually use the table method: range_safe_from_overflow = ar2_range < np.iinfo(ar2.dtype).max below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size) # Optimal performance is for approximately # log10(size) > (log10(range) - 2.27) / 0.927. # However, here we set the requirement that by default # the intermediate array can only be 6x # the combined memory allocation of the original # arrays. See discussion on # https://github.com/numpy/numpy/pull/12065. if ( range_safe_from_overflow and (below_memory_constraint or kind == 'table') ): if invert: outgoing_array = np.ones_like(ar1, dtype=bool) else: outgoing_array = np.zeros_like(ar1, dtype=bool) # Make elements 1 where the integer exists in ar2 if invert: isin_helper_ar = np.ones(ar2_range + 1, dtype=bool) isin_helper_ar[ar2 - ar2_min] = 0 else: isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool) isin_helper_ar[ar2 - ar2_min] = 1 # Mask out elements we know won't work basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] - ar2_min] return outgoing_array elif kind == 'table': # not range_safe_from_overflow raise RuntimeError( ""You have specified kind='table', "" ""but the range of values in `ar2` exceeds the "" ""maximum integer of the datatype. "" ""Please set `kind` to None or 'sort'."" ) elif kind == 'table': raise ValueError( ""The 'table' method is only "" ""supported for boolean or integer arrays. "" ""Please select 'sort' or None for kind."" ) # Check if one of the arrays may contain arbitrary objects contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject # This code is run when # a) the first condition is true, making the code significantly faster # b) the second condition is true (i.e. `ar1` or `ar2` may contain # arbitrary objects), since then sorting is not guaranteed to work if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: if invert: mask = np.ones(len(ar1), dtype=bool) for a in ar2: mask &= (ar1 != a) else: mask = np.zeros(len(ar1), dtype=bool) for a in ar2: mask |= (ar1 == a) return mask # Otherwise use sorting if not assume_unique: ar1, rev_idx = np.unique(ar1, return_inverse=True) ar2 = np.unique(ar2) ar = np.concatenate((ar1, ar2)) # We need this to be a stable sort, so always use 'mergesort' # here. The values from the first array should always come before # the values from the second array. order = ar.argsort(kind='mergesort') sar = ar[order] if invert: bool_ar = (sar[1:] != sar[:-1]) else: bool_ar = (sar[1:] == sar[:-1]) flag = np.concatenate((bool_ar, [invert])) ret = np.empty(ar.shape, dtype=bool) ret[order] = flag if assume_unique: return ret[:len(ar1)] else: return ret[rev_idx] "," Test whether each element of a 1-D array is also present in a second array. Returns a boolean array the same length as `ar1` that is True where an element of `ar1` is in `ar2` and False otherwise. We recommend using :func:`isin` instead of `in1d` for new code. Parameters ---------- ar1 : (M,) array_like Input array. ar2 : array_like The values against which to test each value of `ar1`. assume_unique : bool, optional If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. invert : bool, optional If True, the values in the returned array are inverted (that is, False where an element of `ar1` is in `ar2` and True otherwise). Default is False. ``np.in1d(a, b, invert=True)`` is equivalent to (but is faster than) ``np.invert(in1d(a, b))``. kind : {None, 'sort', 'table'}, optional The algorithm to use. This will not affect the final result, but will affect the speed. Default will select automatically based on memory considerations. * If 'sort', will use a mergesort-based approach. This will have a memory usage of roughly 6 times the sum of the sizes of `ar1` and `ar2`, not accounting for size of dtypes. * If 'table', will use a key-dictionary approach similar to a counting sort. This is only available for boolean and integer arrays. This will have a memory usage of the size of `ar1` plus the max-min value of `ar2`. This tends to be the faster method if the following formula is true: ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``, but may use greater memory. * If `None`, will automatically choose 'table' if the required memory allocation is less than or equal to 6 times the sum of the sizes of `ar1` and `ar2`, otherwise will use 'sort'. This is done to not use a large amount of memory by default, even though 'table' may be faster in most cases. .. versionadded:: 1.8.0 Returns ------- in1d : (M,) ndarray, bool The values `ar1[in1d]` are in `ar2`. See Also -------- isin : Version of this function that preserves the shape of ar1. numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Notes ----- `in1d` can be considered as an element-wise function version of the python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly equivalent to ``np.array([item in b for item in a])``. However, this idea fails if `ar2` is a set, or similar (non-sequence) container: As ``ar2`` is converted to an array, in those cases ``asarray(ar2)`` is an object array rather than the expected array of contained values. .. versionadded:: 1.4.0 Examples -------- >>> test = np.array([0, 1, 2, 5, 0]) >>> states = [0, 2] >>> mask = np.in1d(test, states) >>> mask array([ True, False, True, False, True]) >>> test[mask] array([0, 2, 0]) >>> mask = np.in1d(test, states, invert=True) >>> mask array([False, True, False, True, False]) >>> test[mask] array([1, 5]) ",485,528,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def in1d(ar1, ar2, assume_unique=False, invert=False, kind=None): # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() # Ensure that iteration through object arrays yields size-1 arrays if ar2.dtype == object: ar2 = ar2.reshape(-1, 1) # Convert booleans to uint8 so we can use the fast integer algorithm if ar1.dtype == bool: ar1 = ar1.view(np.uint8) if ar2.dtype == bool: ar2 = ar2.view(np.uint8) # Check if we can use a fast integer algorithm: integer_arrays = (np.issubdtype(ar1.dtype, np.integer) and np.issubdtype(ar2.dtype, np.integer)) if kind not in {None, 'sort', 'table'}: raise ValueError( ""Invalid kind: {0}. "".format(kind) + ""Please use None, 'sort' or 'table'."") if integer_arrays and kind in {None, 'table'}: ar2_min = np.min(ar2) ar2_max = np.max(ar2) ar2_range = int(ar2_max) - int(ar2_min) # Constraints on whether we can actually use the table method: range_safe_from_overflow = ar2_range < np.iinfo(ar2.dtype).max below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size) # Optimal performance is for approximately # log10(size) > (log10(range) - 2.27) / 0.927. # However, here we set the requirement that by default # the intermediate array can only be 6x # the combined memory allocation of the original # arrays. See discussion on # https://github.com/numpy/numpy/pull/12065. if ( range_safe_from_overflow and (below_memory_constraint or kind == 'table') ): if invert: outgoing_array = np.ones_like(ar1, dtype=bool) else: outgoing_array = np.zeros_like(ar1, dtype=bool) # Make elements 1 where the integer exists in ar2 if invert: isin_helper_ar = np.ones(ar2_range + 1, dtype=bool) isin_helper_ar[ar2 - ar2_min] = 0 else: isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool) isin_helper_ar[ar2 - ar2_min] = 1 # Mask out elements we know won't work basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] - ar2_min] return outgoing_array elif kind == 'table': # not range_safe_from_overflow raise RuntimeError( ""You have specified kind='table', "" ""but the range of values in `ar2` exceeds the "" ""maximum integer of the datatype. "" ""Please set `kind` to None or 'sort'."" ) elif kind == 'table': raise ValueError( ""The 'table' method is only "" ""supported for boolean or integer arrays. "" ""Please select 'sort' or None for kind."" ) # Check if one of the arrays may contain arbitrary objects contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject # This code is run when # a) the first condition is true, making the code significantly faster # b) the second condition is true (i.e. `ar1` or `ar2` may contain # arbitrary objects), since then sorting is not guaranteed to work if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: if invert: mask = np.ones(len(ar1), dtype=bool) for a in ar2: mask &= (ar1 != a) else: mask = np.zeros(len(ar1), dtype=bool) for a in ar2: mask |= (ar1 == a) return mask # Otherwise use sorting if not assume_unique: ar1, rev_idx = np.unique(ar1, return_inverse=True) ar2 = np.unique(ar2) ar = np.concatenate((ar1, ar2)) # We need this to be a stable sort, so always use 'mergesort' # here. The values from the first array should always come before # the values from the second array. order = ar.argsort(kind='mergesort') sar = ar[order] if invert: bool_ar = (sar[1:] != sar[:-1]) else: bool_ar = (sar[1:] == sar[:-1]) flag = np.concatenate((bool_ar, [invert])) ret = np.empty(ar.shape, dtype=bool) ret[order] = flag if assume_unique: return ret[:len(ar1)] else: return ret[rev_idx] ``` ###Assistant : Test whether each element of a 1-D array is also present in a second array. Returns a boolean array the same length as `ar1` that is True where an element of `ar1` is in `ar2` and False otherwise. We recommend using :func:`isin` instead of `in1d` for new code. Parameters ---------- ar1 : (M,) array_like Input array. ar2 : array_like The values against which to test each value of `ar1`. assume_unique : bool, optional If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. invert : bool, optional If True, the values in the returned array are inverted (that is, False where an element of `ar1` is in `ar2` and True otherwise). Default is False. ``np.in1d(a, b, invert=True)`` is equivalent to (but is faster than) ``np.invert(in1d(a, b))``. kind : {None, 'sort', 'table'}, optional The algorithm to use. This will not affect the final result, but will affect the speed. Default will select automatically based on memory considerations. * If 'sort', will use a mergesort-based approach. This will have a memory usage of roughly 6 times the sum of the sizes of `ar1` and `ar2`, not accounting for size of dtypes. * If 'table', will use a key-dictionary approach similar to a counting sort. This is only available for boolean and integer arrays. This will have a memory usage of the size of `ar1` plus the max-min value of `ar2`. This tends to be the faster method if the following formula is true: ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``, but may use greater memory. * If `None`, will automatically choose 'table' if the required memory allocation is less than or equal to 6 times the sum of the sizes of `ar1` and `ar2`, otherwise will use 'sort'. This is done to not use a large amount of memory by default, even though 'table' may be faster in most cases. .. versionadded:: 1.8.0 Returns ------- in1d : (M,) ndarray, bool The values `ar1[in1d]` are in `ar2`. See Also -------- isin : Version of this function that preserves the shape of ar1. numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Notes ----- `in1d` can be considered as an element-wise function version of the python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly equivalent to ``np.array([item in b for item in a])``. However, this idea fails if `ar2` is a set, or similar (non-sequence) container: As ``ar2`` is converted to an array, in those cases ``asarray(ar2)`` is an object array rather than the expected array of contained values. .. versionadded:: 1.4.0 Examples -------- >>> test = np.array([0, 1, 2, 5, 0]) >>> states = [0, 2] >>> mask = np.in1d(test, states) >>> mask array([ True, False, True, False, True]) >>> test[mask] array([0, 2, 0]) >>> mask = np.in1d(test, states, invert=True) >>> mask array([False, True, False, True, False]) >>> test[mask] array([1, 5]) " 2540,"def test_devices(self) -> None: # Login in as the user self._get_token() # Check that we don't see a new device in our devices list channel = self.make_request( ""GET"", ""devices"", b""{}"", access_token=self.other_user_tok ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) # We should only see the one device (from the login in `prepare`) self.assertEqual(len(channel.json_body[""devices""]), 1) ",Tests that logging in as a user doesn't create a new device for them.,14,50,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_devices(self) -> None: # Login in as the user self._get_token() # Check that we don't see a new device in our devices list channel = self.make_request( ""GET"", ""devices"", b""{}"", access_token=self.other_user_tok ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) # We should only see the one device (from the login in `prepare`) self.assertEqual(len(channel.json_body[""devices""]), 1) ``` ###Assistant : Tests that logging in as a user doesn't create a new device for them. " 2541,"def in_ipython() -> bool: try: eval('__IPYTHON__') except NameError: return False else: # pragma: no cover return True "," Check whether we're in an ipython environment, including jupyter notebooks. ",10,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def in_ipython() -> bool: try: eval('__IPYTHON__') except NameError: return False else: # pragma: no cover return True ``` ###Assistant : Check whether we're in an ipython environment, including jupyter notebooks. " 2542,"def test_stroptions_deprecated_internal_subset(): with pytest.raises(ValueError, match=""deprecated options must be a subset""): StrOptions({""a"", ""b"", ""c""}, deprecated={""a"", ""d""}) with pytest.raises(ValueError, match=""internal options must be a subset""): StrOptions({""a"", ""b"", ""c""}, internal={""a"", ""d""}) ",Check that the deprecated and internal parameters must be subsets of options.,12,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_stroptions_deprecated_internal_subset(): with pytest.raises(ValueError, match=""deprecated options must be a subset""): StrOptions({""a"", ""b"", ""c""}, deprecated={""a"", ""d""}) with pytest.raises(ValueError, match=""internal options must be a subset""): StrOptions({""a"", ""b"", ""c""}, internal={""a"", ""d""}) ``` ###Assistant : Check that the deprecated and internal parameters must be subsets of options. " 2543,"def test_change_view_without_object_change_permission(self): change_url = reverse(""admin9:admin_views_article_change"", args=(self.a1.pk,)) self.client.force_login(self.viewuser) response = self.client.get(change_url) self.assertEqual(response.context[""title""], ""View article"") self.assertContains(response, ""View article | Django site admin"") self.assertContains(response, ""

    View article

    "") self.assertContains( response, 'Close', ) "," The object should be read-only if the user has permission to view it and change objects of that type but not to change the current object. ",26,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_change_view_without_object_change_permission(self): change_url = reverse(""admin9:admin_views_article_change"", args=(self.a1.pk,)) self.client.force_login(self.viewuser) response = self.client.get(change_url) self.assertEqual(response.context[""title""], ""View article"") self.assertContains(response, ""View article | Django site admin"") self.assertContains(response, ""

    View article

    "") self.assertContains( response, 'Close', ) ``` ###Assistant : The object should be read-only if the user has permission to view it and change objects of that type but not to change the current object. " 2544,"def __getattr__(name): import warnings if name in __deprecated_num_index_names: warnings.warn( f""pandas.{name} is deprecated "" ""and will be removed from pandas in a future version. "" ""Use pandas.Index with the appropriate dtype instead."", FutureWarning, stacklevel=2, ) from pandas.core.api import Float64Index, Int64Index, UInt64Index return { ""Float64Index"": Float64Index, ""Int64Index"": Int64Index, ""UInt64Index"": UInt64Index, }[name] elif name == ""datetime"": warnings.warn( ""The pandas.datetime class is deprecated "" ""and will be removed from pandas in a future version. "" ""Import from datetime module instead."", FutureWarning, stacklevel=2, ) from datetime import datetime as dt return dt elif name == ""np"": warnings.warn( ""The pandas.np module is deprecated "" ""and will be removed from pandas in a future version. "" ""Import numpy directly instead."", FutureWarning, stacklevel=2, ) import numpy as np return np elif name in {""SparseSeries"", ""SparseDataFrame""}: warnings.warn( f""The {name} class is removed from pandas. Accessing it from "" ""the top-level namespace will also be removed in the next version."", FutureWarning, stacklevel=2, ) return type(name, (), {}) elif name == ""SparseArray"": warnings.warn( ""The pandas.SparseArray class is deprecated "" ""and will be removed from pandas in a future version. "" ""Use pandas.arrays.SparseArray instead."", FutureWarning, stacklevel=2, ) from pandas.core.arrays.sparse import SparseArray as _SparseArray return _SparseArray raise AttributeError(f""module 'pandas' has no attribute '{name}'"") # module level doc-string __doc__ = # Use __all__ to let type checkers know what is part of the public API. # Pandas is not (yet) a py.typed library: the public API is determined # based on the documentation. __all__ = [ ""BooleanDtype"", ""Categorical"", ""CategoricalDtype"", ""CategoricalIndex"", ""DataFrame"", ""DateOffset"", ""DatetimeIndex"", ""DatetimeTZDtype"", ""ExcelFile"", ""ExcelWriter"", ""Flags"", ""Float32Dtype"", ""Float64Dtype"", ""Grouper"", ""HDFStore"", ""Index"", ""IndexSlice"", ""Int16Dtype"", ""Int32Dtype"", ""Int64Dtype"", ""Int8Dtype"", ""Interval"", ""IntervalDtype"", ""IntervalIndex"", ""MultiIndex"", ""NA"", ""NaT"", ""NamedAgg"", ""Period"", ""PeriodDtype"", ""PeriodIndex"", ""RangeIndex"", ""Series"", ""SparseDtype"", ""StringDtype"", ""Timedelta"", ""TimedeltaIndex"", ""Timestamp"", ""UInt16Dtype"", ""UInt32Dtype"", ""UInt64Dtype"", ""UInt8Dtype"", ""api"", ""array"", ""arrays"", ""bdate_range"", ""concat"", ""crosstab"", ""cut"", ""date_range"", ""describe_option"", ""errors"", ""eval"", ""factorize"", ""get_dummies"", ""get_option"", ""infer_freq"", ""interval_range"", ""io"", ""isna"", ""isnull"", ""json_normalize"", ""lreshape"", ""melt"", ""merge"", ""merge_asof"", ""merge_ordered"", ""notna"", ""notnull"", ""offsets"", ""option_context"", ""options"", ""period_range"", ""pivot"", ""pivot_table"", ""plotting"", ""qcut"", ""read_clipboard"", ""read_csv"", ""read_excel"", ""read_feather"", ""read_fwf"", ""read_gbq"", ""read_hdf"", ""read_html"", ""read_json"", ""read_orc"", ""read_parquet"", ""read_pickle"", ""read_sas"", ""read_spss"", ""read_sql"", ""read_sql_query"", ""read_sql_table"", ""read_stata"", ""read_table"", ""read_xml"", ""reset_option"", ""set_eng_float_format"", ""set_option"", ""show_versions"", ""test"", ""testing"", ""timedelta_range"", ""to_datetime"", ""to_numeric"", ""to_pickle"", ""to_timedelta"", ""tseries"", ""unique"", ""value_counts"", ""wide_to_long"", ] "," pandas - a powerful data analysis and manipulation library for Python ===================================================================== **pandas** is a Python package providing fast, flexible, and expressive data structures designed to make working with ""relational"" or ""labeled"" data both easy and intuitive. It aims to be the fundamental high-level building block for doing practical, **real world** data analysis in Python. Additionally, it has the broader goal of becoming **the most powerful and flexible open source data analysis / manipulation tool available in any language**. It is already well on its way toward this goal. Main Features ------------- Here are just a few of the things that pandas does well: - Easy handling of missing data in floating point as well as non-floating point data. - Size mutability: columns can be inserted and deleted from DataFrame and higher dimensional objects - Automatic and explicit data alignment: objects can be explicitly aligned to a set of labels, or the user can simply ignore the labels and let `Series`, `DataFrame`, etc. automatically align the data for you in computations. - Powerful, flexible group by functionality to perform split-apply-combine operations on data sets, for both aggregating and transforming data. - Make it easy to convert ragged, differently-indexed data in other Python and NumPy data structures into DataFrame objects. - Intelligent label-based slicing, fancy indexing, and subsetting of large data sets. - Intuitive merging and joining data sets. - Flexible reshaping and pivoting of data sets. - Hierarchical labeling of axes (possible to have multiple labels per tick). - Robust IO tools for loading data from flat files (CSV and delimited), Excel files, databases, and saving/loading data from the ultrafast HDF5 format. - Time series-specific functionality: date range generation and frequency conversion, moving window statistics, date shifting and lagging. ",289,355,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __getattr__(name): import warnings if name in __deprecated_num_index_names: warnings.warn( f""pandas.{name} is deprecated "" ""and will be removed from pandas in a future version. "" ""Use pandas.Index with the appropriate dtype instead."", FutureWarning, stacklevel=2, ) from pandas.core.api import Float64Index, Int64Index, UInt64Index return { ""Float64Index"": Float64Index, ""Int64Index"": Int64Index, ""UInt64Index"": UInt64Index, }[name] elif name == ""datetime"": warnings.warn( ""The pandas.datetime class is deprecated "" ""and will be removed from pandas in a future version. "" ""Import from datetime module instead."", FutureWarning, stacklevel=2, ) from datetime import datetime as dt return dt elif name == ""np"": warnings.warn( ""The pandas.np module is deprecated "" ""and will be removed from pandas in a future version. "" ""Import numpy directly instead."", FutureWarning, stacklevel=2, ) import numpy as np return np elif name in {""SparseSeries"", ""SparseDataFrame""}: warnings.warn( f""The {name} class is removed from pandas. Accessing it from "" ""the top-level namespace will also be removed in the next version."", FutureWarning, stacklevel=2, ) return type(name, (), {}) elif name == ""SparseArray"": warnings.warn( ""The pandas.SparseArray class is deprecated "" ""and will be removed from pandas in a future version. "" ""Use pandas.arrays.SparseArray instead."", FutureWarning, stacklevel=2, ) from pandas.core.arrays.sparse import SparseArray as _SparseArray return _SparseArray raise AttributeError(f""module 'pandas' has no attribute '{name}'"") # module level doc-string __doc__ = # Use __all__ to let type checkers know what is part of the public API. # Pandas is not (yet) a py.typed library: the public API is determined # based on the documentation. __all__ = [ ""BooleanDtype"", ""Categorical"", ""CategoricalDtype"", ""CategoricalIndex"", ""DataFrame"", ""DateOffset"", ""DatetimeIndex"", ""DatetimeTZDtype"", ""ExcelFile"", ""ExcelWriter"", ""Flags"", ""Float32Dtype"", ""Float64Dtype"", ""Grouper"", ""HDFStore"", ""Index"", ""IndexSlice"", ""Int16Dtype"", ""Int32Dtype"", ""Int64Dtype"", ""Int8Dtype"", ""Interval"", ""IntervalDtype"", ""IntervalIndex"", ""MultiIndex"", ""NA"", ""NaT"", ""NamedAgg"", ""Period"", ""PeriodDtype"", ""PeriodIndex"", ""RangeIndex"", ""Series"", ""SparseDtype"", ""StringDtype"", ""Timedelta"", ""TimedeltaIndex"", ""Timestamp"", ""UInt16Dtype"", ""UInt32Dtype"", ""UInt64Dtype"", ""UInt8Dtype"", ""api"", ""array"", ""arrays"", ""bdate_range"", ""concat"", ""crosstab"", ""cut"", ""date_range"", ""describe_option"", ""errors"", ""eval"", ""factorize"", ""get_dummies"", ""get_option"", ""infer_freq"", ""interval_range"", ""io"", ""isna"", ""isnull"", ""json_normalize"", ""lreshape"", ""melt"", ""merge"", ""merge_asof"", ""merge_ordered"", ""notna"", ""notnull"", ""offsets"", ""option_context"", ""options"", ""period_range"", ""pivot"", ""pivot_table"", ""plotting"", ""qcut"", ""read_clipboard"", ""read_csv"", ""read_excel"", ""read_feather"", ""read_fwf"", ""read_gbq"", ""read_hdf"", ""read_html"", ""read_json"", ""read_orc"", ""read_parquet"", ""read_pickle"", ""read_sas"", ""read_spss"", ""read_sql"", ""read_sql_query"", ""read_sql_table"", ""read_stata"", ""read_table"", ""read_xml"", ""reset_option"", ""set_eng_float_format"", ""set_option"", ""show_versions"", ""test"", ""testing"", ""timedelta_range"", ""to_datetime"", ""to_numeric"", ""to_pickle"", ""to_timedelta"", ""tseries"", ""unique"", ""value_counts"", ""wide_to_long"", ] ``` ###Assistant : pandas - a powerful data analysis and manipulation library for Python ===================================================================== **pandas** is a Python package providing fast, flexible, and expressive data structures designed to make working with ""relational"" or ""labeled"" data both easy and intuitive. It aims to be the fundamental high-level building block for doing practical, **real world** data analysis in Python. Additionally, it has the broader goal of becoming **the most powerful and flexible open source data analysis / manipulation tool available in any language**. It is already well on its way toward this goal. Main Features ------------- Here are just a few of the things that pandas does well: - Easy handling of missing data in floating point as well as non-floating point data. - Size mutability: columns can be inserted and deleted from DataFrame and higher dimensional objects - Automatic and explicit data alignment: objects can be explicitly aligned to a set of labels, or the user can simply ignore the labels and let `Series`, `DataFrame`, etc. automatically align the data for you in computations. - Powerful, flexible group by functionality to perform split-apply-combine operations on data sets, for both aggregating and transforming data. - Make it easy to convert ragged, differently-indexed data in other Python and NumPy data structures into DataFrame objects. - Intelligent label-based slicing, fancy indexing, and subsetting of large data sets. - Intuitive merging and joining data sets. - Flexible reshaping and pivoting of data sets. - Hierarchical labeling of axes (possible to have multiple labels per tick). - Robust IO tools for loading data from flat files (CSV and delimited), Excel files, databases, and saving/loading data from the ultrafast HDF5 format. - Time series-specific functionality: date range generation and frequency conversion, moving window statistics, date shifting and lagging. " 2545,"def readlink(path, canonicalize=False): path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError(""Path to link must be absolute."") if not os.path.islink(path): raise SaltInvocationError(""A valid link was not specified."") if canonicalize: return os.path.realpath(path) else: return salt.utils.path.readlink(path) "," .. versionadded:: 2014.1.0 Return the path that a symlink points to If canonicalize is set to True, then it return the final target CLI Example: .. code-block:: bash salt '*' file.readlink /path/to/link ",32,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def readlink(path, canonicalize=False): path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError(""Path to link must be absolute."") if not os.path.islink(path): raise SaltInvocationError(""A valid link was not specified."") if canonicalize: return os.path.realpath(path) else: return salt.utils.path.readlink(path) ``` ###Assistant : .. versionadded:: 2014.1.0 Return the path that a symlink points to If canonicalize is set to True, then it return the final target CLI Example: .. code-block:: bash salt '*' file.readlink /path/to/link " 2546,"def warns_deprecated_sympy(): with warns(SymPyDeprecationWarning): yield @contextlib.contextmanager"," Shorthand for ``warns(SymPyDeprecationWarning)`` This is the recommended way to test that ``SymPyDeprecationWarning`` is emitted for deprecated features in SymPy. To test for other warnings use ``warns``. To suppress warnings without asserting that they are emitted use ``ignore_warnings``. .. note:: ``warns_deprecated_sympy()`` is only intended for internal use in the SymPy test suite to test that a deprecation warning triggers properly. All other code in the SymPy codebase, including documentation examples, should not use deprecated behavior. If you are a user of SymPy and you want to disable SymPyDeprecationWarnings, use ``warnings`` filters (see :ref:`silencing-sympy-deprecation-warnings`). >>> from sympy.testing.pytest import warns_deprecated_sympy >>> from sympy.utilities.exceptions import SymPyDeprecationWarning >>> with warns_deprecated_sympy(): ... SymPyDeprecationWarning(""Don't use"", feature=""old thing"", ... deprecated_since_version=""1.0"", issue=123).warn() >>> with warns_deprecated_sympy(): ... pass Traceback (most recent call last): ... Failed: DID NOT WARN. No warnings of type \ SymPyDeprecationWarning was emitted. The list of emitted warnings is: []. ",143,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def warns_deprecated_sympy(): with warns(SymPyDeprecationWarning): yield @contextlib.contextmanager ``` ###Assistant : Shorthand for ``warns(SymPyDeprecationWarning)`` This is the recommended way to test that ``SymPyDeprecationWarning`` is emitted for deprecated features in SymPy. To test for other warnings use ``warns``. To suppress warnings without asserting that they are emitted use ``ignore_warnings``. .. note:: ``warns_deprecated_sympy()`` is only intended for internal use in the SymPy test suite to test that a deprecation warning triggers properly. All other code in the SymPy codebase, including documentation examples, should not use deprecated behavior. If you are a user of SymPy and you want to disable SymPyDeprecationWarnings, use ``warnings`` filters (see :ref:`silencing-sympy-deprecation-warnings`). >>> from sympy.testing.pytest import warns_deprecated_sympy >>> from sympy.utilities.exceptions import SymPyDeprecationWarning >>> with warns_deprecated_sympy(): ... SymPyDeprecationWarning(""Don't use"", feature=""old thing"", ... deprecated_since_version=""1.0"", issue=123).warn() >>> with warns_deprecated_sympy(): ... pass Traceback (most recent call last): ... Failed: DID NOT WARN. No warnings of type \ SymPyDeprecationWarning was emitted. The list of emitted warnings is: []. " 2547,"def assert_array_equal(x, y, err_msg='', verbose=True, *, strict=False): __tracebackhide__ = True # Hide traceback for py.test assert_array_compare(operator.__eq__, x, y, err_msg=err_msg, verbose=verbose, header='Arrays are not equal', strict=strict) "," Raises an AssertionError if two array_like objects are not equal. Given two array_like objects, check that the shape is equal and all elements of these objects are equal (but see the Notes for the special handling of a scalar). An exception is raised at shape mismatch or conflicting values. In contrast to the standard usage in numpy, NaNs are compared like numbers, no assertion is raised if both objects have NaNs in the same positions. The usual caution for verifying equality with floating point numbers is advised. Parameters ---------- x : array_like The actual object to check. y : array_like The desired, expected object. err_msg : str, optional The error message to be printed in case of failure. verbose : bool, optional If True, the conflicting values are appended to the error message. strict : bool, optional If True, raise an AssertionError when either the shape or the data type of the array_like objects does not match. The special handling for scalars mentioned in the Notes section is disabled. Raises ------ AssertionError If actual and desired objects are not equal. See Also -------- assert_allclose: Compare two array_like objects for equality with desired relative and/or absolute precision. assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal Notes ----- When one of `x` and `y` is a scalar and the other is array_like, the function checks that each element of the array_like object is equal to the scalar. This behaviour can be disabled with the `strict` parameter. Examples -------- The first assert does not raise an exception: >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], ... [np.exp(0),2.33333, np.nan]) Assert fails with numerical imprecision with floats: >>> np.testing.assert_array_equal([1.0,np.pi,np.nan], ... [1, np.sqrt(np.pi)**2, np.nan]) Traceback (most recent call last): ... AssertionError: Arrays are not equal Mismatched elements: 1 / 3 (33.3%) Max absolute difference: 4.4408921e-16 Max relative difference: 1.41357986e-16 x: array([1. , 3.141593, nan]) y: array([1. , 3.141593, nan]) Use `assert_allclose` or one of the nulp (number of floating point values) functions for these cases instead: >>> np.testing.assert_allclose([1.0,np.pi,np.nan], ... [1, np.sqrt(np.pi)**2, np.nan], ... rtol=1e-10, atol=0) As mentioned in the Notes section, `assert_array_equal` has special handling for scalars. Here the test checks that each value in `x` is 3: >>> x = np.full((2, 5), fill_value=3) >>> np.testing.assert_array_equal(x, 3) Use `strict` to raise an AssertionError when comparing a scalar with an array: >>> np.testing.assert_array_equal(x, 3, strict=True) Traceback (most recent call last): ... AssertionError: Arrays are not equal (shapes (2, 5), () mismatch) x: array([[3, 3, 3, 3, 3], [3, 3, 3, 3, 3]]) y: array(3) The `strict` parameter also ensures that the array data types match: >>> x = np.array([2, 2, 2]) >>> y = np.array([2., 2., 2.], dtype=np.float32) >>> np.testing.assert_array_equal(x, y, strict=True) Traceback (most recent call last): ... AssertionError: Arrays are not equal (dtypes int64, float32 mismatch) x: array([2, 2, 2]) y: array([2., 2., 2.], dtype=float32) ",461,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def assert_array_equal(x, y, err_msg='', verbose=True, *, strict=False): __tracebackhide__ = True # Hide traceback for py.test assert_array_compare(operator.__eq__, x, y, err_msg=err_msg, verbose=verbose, header='Arrays are not equal', strict=strict) ``` ###Assistant : Raises an AssertionError if two array_like objects are not equal. Given two array_like objects, check that the shape is equal and all elements of these objects are equal (but see the Notes for the special handling of a scalar). An exception is raised at shape mismatch or conflicting values. In contrast to the standard usage in numpy, NaNs are compared like numbers, no assertion is raised if both objects have NaNs in the same positions. The usual caution for verifying equality with floating point numbers is advised. Parameters ---------- x : array_like The actual object to check. y : array_like The desired, expected object. err_msg : str, optional The error message to be printed in case of failure. verbose : bool, optional If True, the conflicting values are appended to the error message. strict : bool, optional If True, raise an AssertionError when either the shape or the data type of the array_like objects does not match. The special handling for scalars mentioned in the Notes section is disabled. Raises ------ AssertionError If actual and desired objects are not equal. See Also -------- assert_allclose: Compare two array_like objects for equality with desired relative and/or absolute precision. assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal Notes ----- When one of `x` and `y` is a scalar and the other is array_like, the function checks that each element of the array_like object is equal to the scalar. This behaviour can be disabled with the `strict` parameter. Examples -------- The first assert does not raise an exception: >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], ... [np.exp(0),2.33333, np.nan]) Assert fails with numerical imprecision with floats: >>> np.testing.assert_array_equal([1.0,np.pi,np.nan], ... [1, np.sqrt(np.pi)**2, np.nan]) Traceback (most recent call last): ... AssertionError: Arrays are not equal Mismatched elements: 1 / 3 (33.3%) Max absolute difference: 4.4408921e-16 Max relative difference: 1.41357986e-16 x: array([1. , 3.141593, nan]) y: array([1. , 3.141593, nan]) Use `assert_allclose` or one of the nulp (number of floating point values) functions for these cases instead: >>> np.testing.assert_allclose([1.0,np.pi,np.nan], ... [1, np.sqrt(np.pi)**2, np.nan], ... rtol=1e-10, atol=0) As mentioned in the Notes section, `assert_array_equal` has special handling for scalars. Here the test checks that each value in `x` is 3: >>> x = np.full((2, 5), fill_value=3) >>> np.testing.assert_array_equal(x, 3) Use `strict` to raise an AssertionError when comparing a scalar with an array: >>> np.testing.assert_array_equal(x, 3, strict=True) Traceback (most recent call last): ... AssertionError: Arrays are not equal (shapes (2, 5), () mismatch) x: array([[3, 3, 3, 3, 3], [3, 3, 3, 3, 3]]) y: array(3) The `strict` parameter also ensures that the array data types match: >>> x = np.array([2, 2, 2]) >>> y = np.array([2., 2., 2.], dtype=np.float32) >>> np.testing.assert_array_equal(x, y, strict=True) Traceback (most recent call last): ... AssertionError: Arrays are not equal (dtypes int64, float32 mismatch) x: array([2, 2, 2]) y: array([2., 2., 2.], dtype=float32) " 2548,"def update_from_data_x(self, x, ignore=None): x = np.ravel(x) self.update_from_data_xy(np.column_stack([x, np.ones(x.size)]), ignore=ignore, updatey=False) "," Update the x-bounds of the `Bbox` based on the passed in data. After updating, the bounds will have positive *width*, and *x0* will be the minimal value. Parameters ---------- x : `~numpy.ndarray` Array of x-values. ignore : bool, optional - When ``True``, ignore the existing bounds of the `Bbox`. - When ``False``, include the existing bounds of the `Bbox`. - When ``None``, use the last value passed to :meth:`ignore`. ",69,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update_from_data_x(self, x, ignore=None): x = np.ravel(x) self.update_from_data_xy(np.column_stack([x, np.ones(x.size)]), ignore=ignore, updatey=False) ``` ###Assistant : Update the x-bounds of the `Bbox` based on the passed in data. After updating, the bounds will have positive *width*, and *x0* will be the minimal value. Parameters ---------- x : `~numpy.ndarray` Array of x-values. ignore : bool, optional - When ``True``, ignore the existing bounds of the `Bbox`. - When ``False``, include the existing bounds of the `Bbox`. - When ``None``, use the last value passed to :meth:`ignore`. " 2549,"def test_https_malformed_referer(self): malformed_referer_msg = ""Referer checking failed - Referer is malformed."" req = self._get_POST_request_with_token() req._is_secure_override = True req.META[""HTTP_REFERER""] = ""http://http://www.example.com/"" mw = CsrfViewMiddleware(post_form_view) self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains( response, ""Referer checking failed - Referer is insecure while host is secure."", status_code=403, ) # Empty req.META[""HTTP_REFERER""] = """" self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # Non-ASCII req.META[""HTTP_REFERER""] = ""ØBöIß"" self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # missing scheme # >>> urlparse('//example.com/') # ParseResult(scheme='', netloc='example.com', path='/', params='', query='', fragment='') req.META[""HTTP_REFERER""] = ""//example.com/"" self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # missing netloc # >>> urlparse('https://') # ParseResult(scheme='https', netloc='', path='', params='', query='', fragment='') req.META[""HTTP_REFERER""] = ""https://"" self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # Invalid URL # >>> urlparse('https://[') # ValueError: Invalid IPv6 URL req.META[""HTTP_REFERER""] = ""https://["" self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) "," A POST HTTPS request with a bad referer is rejected. ",10,157,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_https_malformed_referer(self): malformed_referer_msg = ""Referer checking failed - Referer is malformed."" req = self._get_POST_request_with_token() req._is_secure_override = True req.META[""HTTP_REFERER""] = ""http://http://www.example.com/"" mw = CsrfViewMiddleware(post_form_view) self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains( response, ""Referer checking failed - Referer is insecure while host is secure."", status_code=403, ) # Empty req.META[""HTTP_REFERER""] = """" self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # Non-ASCII req.META[""HTTP_REFERER""] = ""ØBöIß"" self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # missing scheme # >>> urlparse('//example.com/') # ParseResult(scheme='', netloc='example.com', path='/', params='', query='', fragment='') req.META[""HTTP_REFERER""] = ""//example.com/"" self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # missing netloc # >>> urlparse('https://') # ParseResult(scheme='https', netloc='', path='', params='', query='', fragment='') req.META[""HTTP_REFERER""] = ""https://"" self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # Invalid URL # >>> urlparse('https://[') # ValueError: Invalid IPv6 URL req.META[""HTTP_REFERER""] = ""https://["" self._check_referer_rejects(mw, req) response = mw.process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) ``` ###Assistant : A POST HTTPS request with a bad referer is rejected. " 2550,"def test_missing_cpp_namespace(self) -> None: yaml_str = output_error = self.get_errors_from_gen_backend_stubs(yaml_str) self.assertExpectedInline(output_error, ) ","\ backend: XLA supported: - absYou must provide a value for ""cpp_namespace""",12,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_missing_cpp_namespace(self) -> None: yaml_str = output_error = self.get_errors_from_gen_backend_stubs(yaml_str) self.assertExpectedInline(output_error, ) ``` ###Assistant : \ backend: XLA supported: - absYou must provide a value for ""cpp_namespace"" " 2551,"def single_source_dijkstra_path_length(G, source, cutoff=None, weight=""weight""): return multi_source_dijkstra_path_length(G, {source}, cutoff=cutoff, weight=weight) ","Find shortest weighted path lengths in G from a source node. Compute the shortest path length between source and all other reachable nodes for a weighted graph. Parameters ---------- G : NetworkX graph source : node label Starting node for path cutoff : integer or float, optional Length (sum of edge weights) at which the search is stopped. If cutoff is provided, only return paths with summed weight <= cutoff. weight : string or function If this is a string, then edge weights will be accessed via the edge attribute with this key (that is, the weight of the edge joining `u` to `v` will be ``G.edges[u, v][weight]``). If no such edge attribute exists, the weight of the edge is assumed to be one. If this is a function, the weight of an edge is the value returned by the function. The function must accept exactly three positional arguments: the two endpoints of an edge and the dictionary of edge attributes for that edge. The function must return a number or None to indicate a hidden edge. Returns ------- length : dict Dict keyed by node to shortest path length from source. Raises ------ NodeNotFound If `source` is not in `G`. Examples -------- >>> G = nx.path_graph(5) >>> length = nx.single_source_dijkstra_path_length(G, 0) >>> length[4] 4 >>> for node in [0, 1, 2, 3, 4]: ... print(f""{node}: {length[node]}"") 0: 0 1: 1 2: 2 3: 3 4: 4 Notes ----- Edge weight attributes must be numerical. Distances are calculated as sums of weighted edges traversed. The weight function can be used to hide edges by returning None. So ``weight = lambda u, v, d: 1 if d['color']==""red"" else None`` will find the shortest red path. See Also -------- single_source_dijkstra, single_source_bellman_ford_path_length ",289,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def single_source_dijkstra_path_length(G, source, cutoff=None, weight=""weight""): return multi_source_dijkstra_path_length(G, {source}, cutoff=cutoff, weight=weight) ``` ###Assistant : Find shortest weighted path lengths in G from a source node. Compute the shortest path length between source and all other reachable nodes for a weighted graph. Parameters ---------- G : NetworkX graph source : node label Starting node for path cutoff : integer or float, optional Length (sum of edge weights) at which the search is stopped. If cutoff is provided, only return paths with summed weight <= cutoff. weight : string or function If this is a string, then edge weights will be accessed via the edge attribute with this key (that is, the weight of the edge joining `u` to `v` will be ``G.edges[u, v][weight]``). If no such edge attribute exists, the weight of the edge is assumed to be one. If this is a function, the weight of an edge is the value returned by the function. The function must accept exactly three positional arguments: the two endpoints of an edge and the dictionary of edge attributes for that edge. The function must return a number or None to indicate a hidden edge. Returns ------- length : dict Dict keyed by node to shortest path length from source. Raises ------ NodeNotFound If `source` is not in `G`. Examples -------- >>> G = nx.path_graph(5) >>> length = nx.single_source_dijkstra_path_length(G, 0) >>> length[4] 4 >>> for node in [0, 1, 2, 3, 4]: ... print(f""{node}: {length[node]}"") 0: 0 1: 1 2: 2 3: 3 4: 4 Notes ----- Edge weight attributes must be numerical. Distances are calculated as sums of weighted edges traversed. The weight function can be used to hide edges by returning None. So ``weight = lambda u, v, d: 1 if d['color']==""red"" else None`` will find the shortest red path. See Also -------- single_source_dijkstra, single_source_bellman_ford_path_length " 2552,"def print_help(self): has_ticker_start = """" if self.ticker else ""[unvl]"" has_ticker_end = """" if self.ticker else ""[/unvl]"" help_text = f console.print(text=help_text, menu=""Stocks - Behavioural Analysis"") ","[cmds] load load a specific stock ticker for analysis [param]Ticker: [/param]{self.ticker.upper() or None} {has_ticker_start} [src][Finbrain][/src] headlines sentiment from 15+ major news headlines [src][Finnhub][/src] stats sentiment stats including comparison with sector{has_ticker_end} [src][Reddit][/src] wsb show what WSB gang is up to in subreddit wallstreetbets watchlist show other users watchlist popular show popular tickers spac_c show other users spacs announcements from subreddit SPACs community spac show other users spacs announcements from other subs{has_ticker_start} getdd gets due diligence from another user's post{has_ticker_end} [src][Stocktwits][/src] trending trending stocks stalker stalk stocktwits user's last messages{has_ticker_start} bullbear estimate quick sentiment from last 30 messages on board messages output up to the 30 last messages on the board [src][Twitter][/src] infer infer about stock's sentiment from latest tweets sentiment in-depth sentiment prediction from tweets over time [src][Google][/src] mentions interest over time based on stock's mentions regions regions that show highest interest in stock queries top related queries with this stock rise top rising related queries with stock{has_ticker_end} [src][SentimentInvestor][/src] popularsi show most popular stocks on social media right now emerging show stocks that are being talked about more than usual{has_ticker_start} metrics core social sentiment metrics for this stock social social media figures for stock popularity historical plot the past week of data for a selected metric{has_ticker_end}[/cmds] ",205,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def print_help(self): has_ticker_start = """" if self.ticker else ""[unvl]"" has_ticker_end = """" if self.ticker else ""[/unvl]"" help_text = f console.print(text=help_text, menu=""Stocks - Behavioural Analysis"") ``` ###Assistant : [cmds] load load a specific stock ticker for analysis [param]Ticker: [/param]{self.ticker.upper() or None} {has_ticker_start} [src][Finbrain][/src] headlines sentiment from 15+ major news headlines [src][Finnhub][/src] stats sentiment stats including comparison with sector{has_ticker_end} [src][Reddit][/src] wsb show what WSB gang is up to in subreddit wallstreetbets watchlist show other users watchlist popular show popular tickers spac_c show other users spacs announcements from subreddit SPACs community spac show other users spacs announcements from other subs{has_ticker_start} getdd gets due diligence from another user's post{has_ticker_end} [src][Stocktwits][/src] trending trending stocks stalker stalk stocktwits user's last messages{has_ticker_start} bullbear estimate quick sentiment from last 30 messages on board messages output up to the 30 last messages on the board [src][Twitter][/src] infer infer about stock's sentiment from latest tweets sentiment in-depth sentiment prediction from tweets over time [src][Google][/src] mentions interest over time based on stock's mentions regions regions that show highest interest in stock queries top related queries with this stock rise top rising related queries with stock{has_ticker_end} [src][SentimentInvestor][/src] popularsi show most popular stocks on social media right now emerging show stocks that are being talked about more than usual{has_ticker_start} metrics core social sentiment metrics for this stock social social media figures for stock popularity historical plot the past week of data for a selected metric{has_ticker_end}[/cmds] " 2553,"def iterate_instructions(code_object): # The arg extension the EXTENDED_ARG opcode represents is automatically handled by get_instructions() but the # instruction is left in. Get rid of it to make subsequent parsing easier/safer. yield from (i for i in get_instructions(code_object) if i.opname != ""EXTENDED_ARG"") yield None # For each constant in this code object that is itself a code object, # parse this constant in the same manner. for constant in code_object.co_consts: if inspect.iscode(constant): yield from iterate_instructions(constant) ","Delivers the byte-code instructions as a continuous stream. Yields `dis.Instruction`. After each code-block (`co_code`), `None` is yielded to mark the end of the block and to interrupt the steam. ",29,75,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def iterate_instructions(code_object): # The arg extension the EXTENDED_ARG opcode represents is automatically handled by get_instructions() but the # instruction is left in. Get rid of it to make subsequent parsing easier/safer. yield from (i for i in get_instructions(code_object) if i.opname != ""EXTENDED_ARG"") yield None # For each constant in this code object that is itself a code object, # parse this constant in the same manner. for constant in code_object.co_consts: if inspect.iscode(constant): yield from iterate_instructions(constant) ``` ###Assistant : Delivers the byte-code instructions as a continuous stream. Yields `dis.Instruction`. After each code-block (`co_code`), `None` is yielded to mark the end of the block and to interrupt the steam. " 2554,"def fit(self) -> ResultGrid: if not self._is_ray_client: try: return self._local_tuner.fit() except Exception as e: raise TuneError( f""Tune run failed. "" f'Please use tuner = Tuner.restore(""' f'{self._local_tuner.get_experiment_checkpoint_dir()}"") to resume.' ) from e else: experiment_checkpoint_dir = ray.get( self._remote_tuner.get_experiment_checkpoint_dir.remote() ) try: return ray.get(self._remote_tuner.fit.remote()) except Exception as e: raise TuneError( f""Tune run failed. "" f'Please use tuner = Tuner.restore(""' f'{experiment_checkpoint_dir}"") to resume.' ) from e ","Executes hyperparameter tuning job as configured and returns result. Failure handling: For the kind of exception that happens during the execution of a trial, one may inspect it together with stacktrace through the returned result grid. See ``ResultGrid`` for reference. Each trial may fail up to a certain number. This is configured by ``RunConfig.FailureConfig.max_failures``. Exception that happens beyond trials will be thrown by this method as well. In such cases, there will be instruction like the following printed out at the end of console output to inform users on how to resume. Please use tuner = Tuner.restore(""~/ray_results/tuner_resume"") to resume. Raises: RayTaskError: If user-provided trainable raises an exception TuneError: General Ray Tune error. ",112,61,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fit(self) -> ResultGrid: if not self._is_ray_client: try: return self._local_tuner.fit() except Exception as e: raise TuneError( f""Tune run failed. "" f'Please use tuner = Tuner.restore(""' f'{self._local_tuner.get_experiment_checkpoint_dir()}"") to resume.' ) from e else: experiment_checkpoint_dir = ray.get( self._remote_tuner.get_experiment_checkpoint_dir.remote() ) try: return ray.get(self._remote_tuner.fit.remote()) except Exception as e: raise TuneError( f""Tune run failed. "" f'Please use tuner = Tuner.restore(""' f'{experiment_checkpoint_dir}"") to resume.' ) from e ``` ###Assistant : Executes hyperparameter tuning job as configured and returns result. Failure handling: For the kind of exception that happens during the execution of a trial, one may inspect it together with stacktrace through the returned result grid. See ``ResultGrid`` for reference. Each trial may fail up to a certain number. This is configured by ``RunConfig.FailureConfig.max_failures``. Exception that happens beyond trials will be thrown by this method as well. In such cases, there will be instruction like the following printed out at the end of console output to inform users on how to resume. Please use tuner = Tuner.restore(""~/ray_results/tuner_resume"") to resume. Raises: RayTaskError: If user-provided trainable raises an exception TuneError: General Ray Tune error. " 2555,"def _handle_coordinator_update(self) -> None: self._refresh() super()._handle_coordinator_update() "," Handle updated data from the coordinator. Tests fails without this method. ",11,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _handle_coordinator_update(self) -> None: self._refresh() super()._handle_coordinator_update() ``` ###Assistant : Handle updated data from the coordinator. Tests fails without this method. " 2556,"def _skew_1d(self, column, bias=True, nan_policy=""propagate""): # import depends on scipy, not installed by default from dask.array import stats as da_stats if pd.Int64Dtype.is_dtype(column._meta_nonempty): column = column.astype(""f8"") if not np.issubdtype(column.dtype, np.number): column = column.astype(""f8"") name = self._token_prefix + ""skew-1d-"" + tokenize(column) array_skew = da_stats.skew( column.values, axis=0, bias=bias, nan_policy=nan_policy ) layer = {(name, 0): (methods.wrap_skew_reduction, (array_skew._name,), None)} graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_skew]) return new_dd_object( graph, name, column._meta_nonempty.skew(), divisions=[None, None] ) ","1D version of the skew calculation. Uses the array version from da.stats in case we are passing in a single series ",21,67,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _skew_1d(self, column, bias=True, nan_policy=""propagate""): # import depends on scipy, not installed by default from dask.array import stats as da_stats if pd.Int64Dtype.is_dtype(column._meta_nonempty): column = column.astype(""f8"") if not np.issubdtype(column.dtype, np.number): column = column.astype(""f8"") name = self._token_prefix + ""skew-1d-"" + tokenize(column) array_skew = da_stats.skew( column.values, axis=0, bias=bias, nan_policy=nan_policy ) layer = {(name, 0): (methods.wrap_skew_reduction, (array_skew._name,), None)} graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_skew]) return new_dd_object( graph, name, column._meta_nonempty.skew(), divisions=[None, None] ) ``` ###Assistant : 1D version of the skew calculation. Uses the array version from da.stats in case we are passing in a single series " 2557,"def test_device_classes_aligned(): for device_class in NumberDeviceClass: assert hasattr(SensorDeviceClass, device_class.name) assert getattr(SensorDeviceClass, device_class.name).value == device_class.value ",Make sure all number device classes are also available in SensorDeviceClass.,11,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_device_classes_aligned(): for device_class in NumberDeviceClass: assert hasattr(SensorDeviceClass, device_class.name) assert getattr(SensorDeviceClass, device_class.name).value == device_class.value ``` ###Assistant : Make sure all number device classes are also available in SensorDeviceClass. " 2558,"def intersection_all(graphs): R = None for i, G in enumerate(graphs): G_nodes_set = set(G.nodes) G_edges_set = set(G.edges(keys=True) if G.is_multigraph() else G.edges()) if i == 0: # create new graph R = G.__class__() node_intersection = G_nodes_set edge_intersection = G_edges_set elif G.is_multigraph() != R.is_multigraph(): raise nx.NetworkXError(""All graphs must be graphs or multigraphs."") else: node_intersection &= G_nodes_set edge_intersection &= G_edges_set R.graph.update(G.graph) if R is None: raise ValueError(""cannot apply intersection_all to an empty list"") R.add_nodes_from(node_intersection) R.add_edges_from(edge_intersection) return R ","Returns a new graph that contains only the nodes and the edges that exist in all graphs. Parameters ---------- graphs : iterable Iterable of NetworkX graphs Returns ------- R : A new graph with the same type as the first graph in list Raises ------ ValueError If `graphs` is an empty list. Notes ----- Attributes from the graph, nodes, and edges are not copied to the new graph. ",68,73,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def intersection_all(graphs): R = None for i, G in enumerate(graphs): G_nodes_set = set(G.nodes) G_edges_set = set(G.edges(keys=True) if G.is_multigraph() else G.edges()) if i == 0: # create new graph R = G.__class__() node_intersection = G_nodes_set edge_intersection = G_edges_set elif G.is_multigraph() != R.is_multigraph(): raise nx.NetworkXError(""All graphs must be graphs or multigraphs."") else: node_intersection &= G_nodes_set edge_intersection &= G_edges_set R.graph.update(G.graph) if R is None: raise ValueError(""cannot apply intersection_all to an empty list"") R.add_nodes_from(node_intersection) R.add_edges_from(edge_intersection) return R ``` ###Assistant : Returns a new graph that contains only the nodes and the edges that exist in all graphs. Parameters ---------- graphs : iterable Iterable of NetworkX graphs Returns ------- R : A new graph with the same type as the first graph in list Raises ------ ValueError If `graphs` is an empty list. Notes ----- Attributes from the graph, nodes, and edges are not copied to the new graph. " 2559,"def _looks_like_red_hat_scheme() -> bool: from distutils.command.install import install from distutils.dist import Distribution cmd: Any = install(Distribution()) cmd.finalize_options() return ( cmd.exec_prefix == f""{os.path.normpath(sys.exec_prefix)}/local"" and cmd.prefix == f""{os.path.normpath(sys.prefix)}/local"" ) @functools.lru_cache(maxsize=None)","Red Hat patches ``sys.prefix`` and ``sys.exec_prefix``. Red Hat's ``00251-change-user-install-location.patch`` changes the install command's ``prefix`` and ``exec_prefix`` to append ``""/local""``. This is (fortunately?) done quite unconditionally, so we create a default command object without any configuration to detect this. ",38,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _looks_like_red_hat_scheme() -> bool: from distutils.command.install import install from distutils.dist import Distribution cmd: Any = install(Distribution()) cmd.finalize_options() return ( cmd.exec_prefix == f""{os.path.normpath(sys.exec_prefix)}/local"" and cmd.prefix == f""{os.path.normpath(sys.prefix)}/local"" ) @functools.lru_cache(maxsize=None) ``` ###Assistant : Red Hat patches ``sys.prefix`` and ``sys.exec_prefix``. Red Hat's ``00251-change-user-install-location.patch`` changes the install command's ``prefix`` and ``exec_prefix`` to append ``""/local""``. This is (fortunately?) done quite unconditionally, so we create a default command object without any configuration to detect this. " 2560,"def _create_sql_query(self) -> str: escaper = ParamEscaper() maybe_with = """" if self._encryption is not None or self._credential is not None: maybe_encryption = """" if self._encryption is not None: maybe_encryption = self._generate_options(""ENCRYPTION"", escaper, self._encryption, False) maybe_credential = """" if self._credential is not None: maybe_credential = self._generate_options(""CREDENTIAL"", escaper, self._credential, False) maybe_with = f"" WITH ({maybe_credential} {maybe_encryption})"" location = escaper.escape_item(self._file_location) + maybe_with if self._expression_list is not None: location = f""(SELECT {self._expression_list} FROM {location})"" files_or_pattern = """" if self._pattern is not None: files_or_pattern = f""PATTERN = {escaper.escape_item(self._pattern)}\n"" elif self._files is not None: files_or_pattern = f""FILES = {escaper.escape_item(self._files)}\n"" format_options = self._generate_options(""FORMAT_OPTIONS"", escaper, self._format_options) + ""\n"" copy_options = self._generate_options(""COPY_OPTIONS"", escaper, self._copy_options) + ""\n"" validation = """" if self._validate is not None: if isinstance(self._validate, bool): if self._validate: validation = ""VALIDATE ALL\n"" elif isinstance(self._validate, int): if self._validate < 0: raise AirflowException( ""Number of rows for validation should be positive, got: "" + str(self._validate) ) validation = f""VALIDATE {self._validate} ROWS\n"" else: raise AirflowException(""Incorrect data type for validate parameter: "" + type(self._validate)) # TODO: think on how to make sure that table_name and expression_list aren't used for SQL injection sql = f return sql.strip() ","COPY INTO {self._table_name} FROM {location} FILEFORMAT = {self._file_format} {validation}{files_or_pattern}{format_options}{copy_options} ",9,184,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_sql_query(self) -> str: escaper = ParamEscaper() maybe_with = """" if self._encryption is not None or self._credential is not None: maybe_encryption = """" if self._encryption is not None: maybe_encryption = self._generate_options(""ENCRYPTION"", escaper, self._encryption, False) maybe_credential = """" if self._credential is not None: maybe_credential = self._generate_options(""CREDENTIAL"", escaper, self._credential, False) maybe_with = f"" WITH ({maybe_credential} {maybe_encryption})"" location = escaper.escape_item(self._file_location) + maybe_with if self._expression_list is not None: location = f""(SELECT {self._expression_list} FROM {location})"" files_or_pattern = """" if self._pattern is not None: files_or_pattern = f""PATTERN = {escaper.escape_item(self._pattern)}\n"" elif self._files is not None: files_or_pattern = f""FILES = {escaper.escape_item(self._files)}\n"" format_options = self._generate_options(""FORMAT_OPTIONS"", escaper, self._format_options) + ""\n"" copy_options = self._generate_options(""COPY_OPTIONS"", escaper, self._copy_options) + ""\n"" validation = """" if self._validate is not None: if isinstance(self._validate, bool): if self._validate: validation = ""VALIDATE ALL\n"" elif isinstance(self._validate, int): if self._validate < 0: raise AirflowException( ""Number of rows for validation should be positive, got: "" + str(self._validate) ) validation = f""VALIDATE {self._validate} ROWS\n"" else: raise AirflowException(""Incorrect data type for validate parameter: "" + type(self._validate)) # TODO: think on how to make sure that table_name and expression_list aren't used for SQL injection sql = f return sql.strip() ``` ###Assistant : COPY INTO {self._table_name} FROM {location} FILEFORMAT = {self._file_format} {validation}{files_or_pattern}{format_options}{copy_options} " 2561,"def predict(self, x, **kwargs): proba = self.model.predict(x, **kwargs) if proba.shape[-1] > 1: classes = proba.argmax(axis=-1) else: classes = (proba > 0.5).astype(""int32"") return self.classes_[classes] ","Returns the class predictions for the given test data. Args: x: array-like, shape `(n_samples, n_features)` Test samples where `n_samples` is the number of samples and `n_features` is the number of features. **kwargs: dictionary arguments Legal arguments are the arguments of `Sequential.predict`. Returns: preds: array-like, shape `(n_samples,)` Class predictions. ",48,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def predict(self, x, **kwargs): proba = self.model.predict(x, **kwargs) if proba.shape[-1] > 1: classes = proba.argmax(axis=-1) else: classes = (proba > 0.5).astype(""int32"") return self.classes_[classes] ``` ###Assistant : Returns the class predictions for the given test data. Args: x: array-like, shape `(n_samples, n_features)` Test samples where `n_samples` is the number of samples and `n_features` is the number of features. **kwargs: dictionary arguments Legal arguments are the arguments of `Sequential.predict`. Returns: preds: array-like, shape `(n_samples,)` Class predictions. " 2562,"def resize_image_type0(self, img): limit_side_len = self.max_side_len h, w, _ = img.shape # limit the max side if max(h, w) > limit_side_len: if h > w: ratio = float(limit_side_len) / h else: ratio = float(limit_side_len) / w else: ratio = 1. resize_h = int(h * ratio) resize_w = int(w * ratio) resize_h = int(round(resize_h / 32) * 32) resize_w = int(round(resize_w / 32) * 32) try: if int(resize_w) <= 0 or int(resize_h) <= 0: return None, (None, None) img = cv2.resize(img, (int(resize_w), int(resize_h))) except: print(img.shape, resize_w, resize_h) sys.exit(0) ratio_h = resize_h / float(h) ratio_w = resize_w / float(w) # return img, np.array([h, w]) return img, [ratio_h, ratio_w] "," resize image to a size multiple of 32 which is required by the network args: img(array): array with shape [h, w, c] return(tuple): img, (ratio_h, ratio_w) ",26,106,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def resize_image_type0(self, img): limit_side_len = self.max_side_len h, w, _ = img.shape # limit the max side if max(h, w) > limit_side_len: if h > w: ratio = float(limit_side_len) / h else: ratio = float(limit_side_len) / w else: ratio = 1. resize_h = int(h * ratio) resize_w = int(w * ratio) resize_h = int(round(resize_h / 32) * 32) resize_w = int(round(resize_w / 32) * 32) try: if int(resize_w) <= 0 or int(resize_h) <= 0: return None, (None, None) img = cv2.resize(img, (int(resize_w), int(resize_h))) except: print(img.shape, resize_w, resize_h) sys.exit(0) ratio_h = resize_h / float(h) ratio_w = resize_w / float(w) # return img, np.array([h, w]) return img, [ratio_h, ratio_w] ``` ###Assistant : resize image to a size multiple of 32 which is required by the network args: img(array): array with shape [h, w, c] return(tuple): img, (ratio_h, ratio_w) " 2563,"def metrics(self): metrics = [] if self._is_compiled: # TODO(omalleyt): Track `LossesContainer` and `MetricsContainer` objects # so that attr names are not load-bearing. if self.compiled_loss is not None: metrics += self.compiled_loss.metrics if self.compiled_metrics is not None: metrics += self.compiled_metrics.metrics for l in self._flatten_layers(): metrics.extend(l._metrics) # pylint: disable=protected-access return metrics ","Returns the model's metrics added using `compile()`, `add_metric()` APIs. Note: Metrics passed to `compile()` are available only after a `keras.Model` has been trained/evaluated on actual data. Examples: >>> inputs = tf.keras.layers.Input(shape=(3,)) >>> outputs = tf.keras.layers.Dense(2)(inputs) >>> model = tf.keras.models.Model(inputs=inputs, outputs=outputs) >>> model.compile(optimizer=""Adam"", loss=""mse"", metrics=[""mae""]) >>> [m.name for m in model.metrics] [] >>> x = np.random.random((2, 3)) >>> y = np.random.randint(0, 2, (2, 2)) >>> model.fit(x, y) >>> [m.name for m in model.metrics] ['loss', 'mae'] >>> inputs = tf.keras.layers.Input(shape=(3,)) >>> d = tf.keras.layers.Dense(2, name='out') >>> output_1 = d(inputs) >>> output_2 = d(inputs) >>> model = tf.keras.models.Model( ... inputs=inputs, outputs=[output_1, output_2]) >>> model.add_metric( ... tf.reduce_sum(output_2), name='mean', aggregation='mean') >>> model.compile(optimizer=""Adam"", loss=""mse"", metrics=[""mae"", ""acc""]) >>> model.fit(x, (y, y)) >>> [m.name for m in model.metrics] ['loss', 'out_loss', 'out_1_loss', 'out_mae', 'out_acc', 'out_1_mae', 'out_1_acc', 'mean'] ",128,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def metrics(self): metrics = [] if self._is_compiled: # TODO(omalleyt): Track `LossesContainer` and `MetricsContainer` objects # so that attr names are not load-bearing. if self.compiled_loss is not None: metrics += self.compiled_loss.metrics if self.compiled_metrics is not None: metrics += self.compiled_metrics.metrics for l in self._flatten_layers(): metrics.extend(l._metrics) # pylint: disable=protected-access return metrics ``` ###Assistant : Returns the model's metrics added using `compile()`, `add_metric()` APIs. Note: Metrics passed to `compile()` are available only after a `keras.Model` has been trained/evaluated on actual data. Examples: >>> inputs = tf.keras.layers.Input(shape=(3,)) >>> outputs = tf.keras.layers.Dense(2)(inputs) >>> model = tf.keras.models.Model(inputs=inputs, outputs=outputs) >>> model.compile(optimizer=""Adam"", loss=""mse"", metrics=[""mae""]) >>> [m.name for m in model.metrics] [] >>> x = np.random.random((2, 3)) >>> y = np.random.randint(0, 2, (2, 2)) >>> model.fit(x, y) >>> [m.name for m in model.metrics] ['loss', 'mae'] >>> inputs = tf.keras.layers.Input(shape=(3,)) >>> d = tf.keras.layers.Dense(2, name='out') >>> output_1 = d(inputs) >>> output_2 = d(inputs) >>> model = tf.keras.models.Model( ... inputs=inputs, outputs=[output_1, output_2]) >>> model.add_metric( ... tf.reduce_sum(output_2), name='mean', aggregation='mean') >>> model.compile(optimizer=""Adam"", loss=""mse"", metrics=[""mae"", ""acc""]) >>> model.fit(x, (y, y)) >>> [m.name for m in model.metrics] ['loss', 'out_loss', 'out_1_loss', 'out_mae', 'out_acc', 'out_1_mae', 'out_1_acc', 'mean'] " 2564,"def set_level(request, level): if not hasattr(request, ""_messages""): return False request._messages.level = level return True "," Set the minimum level of messages to be recorded, and return ``True`` if the level was recorded successfully. If set to ``None``, use the default level (see the get_level() function). ",30,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_level(request, level): if not hasattr(request, ""_messages""): return False request._messages.level = level return True ``` ###Assistant : Set the minimum level of messages to be recorded, and return ``True`` if the level was recorded successfully. If set to ``None``, use the default level (see the get_level() function). " 2565,"def all_pairs_lowest_common_ancestor(G, pairs=None): if not nx.is_directed_acyclic_graph(G): raise nx.NetworkXError(""LCA only defined on directed acyclic graphs."") if len(G) == 0: raise nx.NetworkXPointlessConcept(""LCA meaningless on null graphs."") if pairs is None: pairs = combinations_with_replacement(G, 2) else: # Convert iterator to iterable, if necessary. Trim duplicates. pairs = dict.fromkeys(pairs) # Verify that each of the nodes in the provided pairs is in G nodeset = set(G) for pair in pairs: if set(pair) - nodeset: raise nx.NodeNotFound( f""Node(s) {set(pair) - nodeset} from pair {pair} not in G."" ) # Once input validation is done, construct the generator","Return the lowest common ancestor of all pairs or the provided pairs Parameters ---------- G : NetworkX directed graph pairs : iterable of pairs of nodes, optional (default: all pairs) The pairs of nodes of interest. If None, will find the LCA of all pairs of nodes. Yields ------ ((node1, node2), lca) : 2-tuple Where lca is least common ancestor of node1 and node2. Note that for the default case, the order of the node pair is not considered, e.g. you will not get both ``(a, b)`` and ``(b, a)`` Raises ------ NetworkXPointlessConcept If `G` is null. NetworkXError If `G` is not a DAG. Examples -------- The default behavior is to yield the lowest common ancestor for all possible combinations of nodes in `G`, including self-pairings: >>> G = nx.DiGraph([(0, 1), (0, 3), (1, 2)]) >>> dict(nx.all_pairs_lowest_common_ancestor(G)) {(0, 0): 0, (0, 1): 0, (0, 3): 0, (0, 2): 0, (1, 1): 1, (1, 3): 0, (1, 2): 1, (3, 3): 3, (3, 2): 0, (2, 2): 2} The pairs argument can be used to limit the output to only the specified node pairings: >>> dict(nx.all_pairs_lowest_common_ancestor(G, pairs=[(1, 2), (2, 3)])) {(1, 2): 1, (2, 3): 0} Notes ----- Only defined on non-null directed acyclic graphs. See Also -------- lowest_common_ancestor ",208,92,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def all_pairs_lowest_common_ancestor(G, pairs=None): if not nx.is_directed_acyclic_graph(G): raise nx.NetworkXError(""LCA only defined on directed acyclic graphs."") if len(G) == 0: raise nx.NetworkXPointlessConcept(""LCA meaningless on null graphs."") if pairs is None: pairs = combinations_with_replacement(G, 2) else: # Convert iterator to iterable, if necessary. Trim duplicates. pairs = dict.fromkeys(pairs) # Verify that each of the nodes in the provided pairs is in G nodeset = set(G) for pair in pairs: if set(pair) - nodeset: raise nx.NodeNotFound( f""Node(s) {set(pair) - nodeset} from pair {pair} not in G."" ) # Once input validation is done, construct the generator ``` ###Assistant : Return the lowest common ancestor of all pairs or the provided pairs Parameters ---------- G : NetworkX directed graph pairs : iterable of pairs of nodes, optional (default: all pairs) The pairs of nodes of interest. If None, will find the LCA of all pairs of nodes. Yields ------ ((node1, node2), lca) : 2-tuple Where lca is least common ancestor of node1 and node2. Note that for the default case, the order of the node pair is not considered, e.g. you will not get both ``(a, b)`` and ``(b, a)`` Raises ------ NetworkXPointlessConcept If `G` is null. NetworkXError If `G` is not a DAG. Examples -------- The default behavior is to yield the lowest common ancestor for all possible combinations of nodes in `G`, including self-pairings: >>> G = nx.DiGraph([(0, 1), (0, 3), (1, 2)]) >>> dict(nx.all_pairs_lowest_common_ancestor(G)) {(0, 0): 0, (0, 1): 0, (0, 3): 0, (0, 2): 0, (1, 1): 1, (1, 3): 0, (1, 2): 1, (3, 3): 3, (3, 2): 0, (2, 2): 2} The pairs argument can be used to limit the output to only the specified node pairings: >>> dict(nx.all_pairs_lowest_common_ancestor(G, pairs=[(1, 2), (2, 3)])) {(1, 2): 1, (2, 3): 0} Notes ----- Only defined on non-null directed acyclic graphs. See Also -------- lowest_common_ancestor " 2566,"async def test_is_pickleable_after_start(self, task_runner): task_runner.client_kwargs[""set_as_default""] = True"," The task_runner must be picklable as it is attached to `PrefectFuture` objects Reimplemented to set Dask client as default to allow unpickling ",22,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_is_pickleable_after_start(self, task_runner): task_runner.client_kwargs[""set_as_default""] = True ``` ###Assistant : The task_runner must be picklable as it is attached to `PrefectFuture` objects Reimplemented to set Dask client as default to allow unpickling " 2567,"def _add_callback_signalsafe(self, handle): self._add_callback(handle) self._write_to_self() ",Like _add_callback() but called from a signal handler.,8,5,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _add_callback_signalsafe(self, handle): self._add_callback(handle) self._write_to_self() ``` ###Assistant : Like _add_callback() but called from a signal handler. " 2568,"def get_local_ffmpeg() -> Optional[Path]: ffmpeg_path = Path( get_spotdl_path(), ""ffmpeg"" + "".exe"" if platform.system() == ""Windows"" else """" ) if ffmpeg_path.is_file(): return ffmpeg_path return None "," Get local ffmpeg binary path or None if not found. ",10,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_local_ffmpeg() -> Optional[Path]: ffmpeg_path = Path( get_spotdl_path(), ""ffmpeg"" + "".exe"" if platform.system() == ""Windows"" else """" ) if ffmpeg_path.is_file(): return ffmpeg_path return None ``` ###Assistant : Get local ffmpeg binary path or None if not found. " 2569,"def _render_cmd(cmd, cwd, template, saltenv=None, pillarenv=None, pillar_override=None): if saltenv is None: saltenv = __opts__.get(""saltenv"", ""base"") if not template: return (cmd, cwd) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( ""Attempted to render file paths with unavailable engine {}"".format(template) ) kwargs = {} kwargs[""salt""] = __salt__ if pillarenv is not None or pillar_override is not None: pillarenv = pillarenv or __opts__[""pillarenv""] kwargs[""pillar""] = _gather_pillar(pillarenv, pillar_override) else: kwargs[""pillar""] = __pillar__ kwargs[""grains""] = __grains__ kwargs[""opts""] = __opts__ kwargs[""saltenv""] = saltenv "," If template is a valid template engine, process the cmd and cwd through that engine. ",15,88,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _render_cmd(cmd, cwd, template, saltenv=None, pillarenv=None, pillar_override=None): if saltenv is None: saltenv = __opts__.get(""saltenv"", ""base"") if not template: return (cmd, cwd) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( ""Attempted to render file paths with unavailable engine {}"".format(template) ) kwargs = {} kwargs[""salt""] = __salt__ if pillarenv is not None or pillar_override is not None: pillarenv = pillarenv or __opts__[""pillarenv""] kwargs[""pillar""] = _gather_pillar(pillarenv, pillar_override) else: kwargs[""pillar""] = __pillar__ kwargs[""grains""] = __grains__ kwargs[""opts""] = __opts__ kwargs[""saltenv""] = saltenv ``` ###Assistant : If template is a valid template engine, process the cmd and cwd through that engine. " 2570,"def test_windows_1252(self) -> None: html = b tree = decode_body(html, ""http://example.com/test.html"") og = parse_html_to_open_graph(tree, ""http://example.com/test.html"") self.assertEqual(og, {""og:title"": ""ó"", ""og:description"": ""Some text.""}) ","A body which uses cp1252, but doesn't declare that. \xf3 Some text. ",16,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_windows_1252(self) -> None: html = b tree = decode_body(html, ""http://example.com/test.html"") og = parse_html_to_open_graph(tree, ""http://example.com/test.html"") self.assertEqual(og, {""og:title"": ""ó"", ""og:description"": ""Some text.""}) ``` ###Assistant : A body which uses cp1252, but doesn't declare that. \xf3 Some text. " 2571,"def square_root(value, default=_SENTINEL): try: return math.sqrt(float(value)) except (ValueError, TypeError): if default is _SENTINEL: raise_no_default(""sqrt"", value) return default ",Filter and function to get square root of the value.,10,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def square_root(value, default=_SENTINEL): try: return math.sqrt(float(value)) except (ValueError, TypeError): if default is _SENTINEL: raise_no_default(""sqrt"", value) return default ``` ###Assistant : Filter and function to get square root of the value. " 2572,"async def test_track_task_functions(event_loop): hass = ha.HomeAssistant() try: assert hass._track_task hass.async_stop_track_tasks() assert not hass._track_task hass.async_track_tasks() assert hass._track_task finally: await hass.async_stop() ",Test function to start/stop track task and initial state.,9,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_track_task_functions(event_loop): hass = ha.HomeAssistant() try: assert hass._track_task hass.async_stop_track_tasks() assert not hass._track_task hass.async_track_tasks() assert hass._track_task finally: await hass.async_stop() ``` ###Assistant : Test function to start/stop track task and initial state. " 2573,"def media_position_updated_at(self) -> datetime | None: if self._device.movie.play_status in KALEIDESCAPE_PLAYING_STATES: return utcnow() return None ",When was the position of the current playing media valid.,10,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def media_position_updated_at(self) -> datetime | None: if self._device.movie.play_status in KALEIDESCAPE_PLAYING_STATES: return utcnow() return None ``` ###Assistant : When was the position of the current playing media valid. " 2574,"def write(self, data): if not isinstance(data, (bytes, bytearray, memoryview)): raise TypeError(f""data: expecting a bytes-like instance, "" f""got {type(data).__name__}"") if not data: return self._ssl_protocol._write_appdata(data) ","Write some data bytes to the transport. This does not block; it buffers the data and arranges for it to be sent out asynchronously. ",24,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def write(self, data): if not isinstance(data, (bytes, bytearray, memoryview)): raise TypeError(f""data: expecting a bytes-like instance, "" f""got {type(data).__name__}"") if not data: return self._ssl_protocol._write_appdata(data) ``` ###Assistant : Write some data bytes to the transport. This does not block; it buffers the data and arranges for it to be sent out asynchronously. " 2575,"def _extract_color_tags(self): tags = re.finditer( r'(.+?)', self.original_text, re.S, ) colormap = [] for tag in tags: start = self._count_real_chars(self.original_text[: tag.start(0)]) end = start + self._count_real_chars(tag.group(4)) offsets = tag.group(3).split("","") if tag.group(3) else [0] start_offset = int(offsets[0]) if offsets[0] else 0 end_offset = int(offsets[1]) if len(offsets) == 2 and offsets[1] else 0 colormap.append( { ""start"": start, ""end"": end, ""color"": tag.group(1), ""start_offset"": start_offset, ""end_offset"": end_offset, }, ) self.text = re.sub(""]+>(.+?)"", r""\1"", self.text, 0, re.S) return colormap ","Used to determine which parts (if any) of the string should be formatted with a custom color. Removes the ```` tag, as it is not part of Pango's markup and would cause an error. Note: Using the ```` tags is deprecated. As soon as the legacy syntax is gone, this function will be removed. ",54,73,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _extract_color_tags(self): tags = re.finditer( r'(.+?)', self.original_text, re.S, ) colormap = [] for tag in tags: start = self._count_real_chars(self.original_text[: tag.start(0)]) end = start + self._count_real_chars(tag.group(4)) offsets = tag.group(3).split("","") if tag.group(3) else [0] start_offset = int(offsets[0]) if offsets[0] else 0 end_offset = int(offsets[1]) if len(offsets) == 2 and offsets[1] else 0 colormap.append( { ""start"": start, ""end"": end, ""color"": tag.group(1), ""start_offset"": start_offset, ""end_offset"": end_offset, }, ) self.text = re.sub(""]+>(.+?)"", r""\1"", self.text, 0, re.S) return colormap ``` ###Assistant : Used to determine which parts (if any) of the string should be formatted with a custom color. Removes the ```` tag, as it is not part of Pango's markup and would cause an error. Note: Using the ```` tags is deprecated. As soon as the legacy syntax is gone, this function will be removed. " 2576,"def write_readme(self, file_path, parametric_eq_peqs=None, fixed_band_eq_peq=None): file_path = os.path.abspath(file_path) dir_path = os.path.dirname(file_path) model = self.name # Write model s = '# {}\n'.format(model) s += 'See [usage instructions](https://github.com/jaakkopasanen/AutoEq#usage) for more options and info.\n\n' # Add parametric EQ settings if parametric_eq_peqs is not None: s += '### Parametric EQs\n' if len(parametric_eq_peqs) > 1: compound = PEQ(self.frequency.copy(), parametric_eq_peqs[0].fs, []) n = 0 filter_ranges = '' preamps = '' for i, peq in enumerate(parametric_eq_peqs): for filt in peq.filters: compound.add_filter(filt) filter_ranges += f'1-{len(peq.filters) + n}' preamps += f'{-compound.max_gain - 0.1:.1f} dB' if i < len(parametric_eq_peqs) - 2: filter_ranges += ', ' preamps += ', ' elif i == len(parametric_eq_peqs) - 2: filter_ranges += ' or ' preamps += ' or ' n += len(peq.filters) s += f'You can use filters {filter_ranges}. Apply preamp of {preamps}, respectively.\n\n' else: compound = PEQ(self.frequency.copy(), parametric_eq_peqs[0].fs, []) for peq in parametric_eq_peqs: for filt in peq.filters: compound.add_filter(filt) s += f'Apply preamp of -{compound.max_gain + 0.1:.1f} dB when using parametric equalizer.\n\n' s += compound.markdown_table() + '\n\n' # Add fixed band eq if fixed_band_eq_peq is not None: s += f'### Fixed Band EQs\nWhen using fixed band (also called graphic) equalizer, apply preamp of ' \ f'**-{fixed_band_eq_peq.max_gain + 0.1:.1f} dB** (if available) and set gains manually with these ' \ f'parameters.\n\n{fixed_band_eq_peq.markdown_table()}\n\n' # Write image link img_path = os.path.join(dir_path, model + '.png') if os.path.isfile(img_path): img_url = f'./{os.path.split(img_path)[1]}' img_url = urllib.parse.quote(img_url, safe=""%/:=&?~#+!$,;'@()*[]"") s += f'### Graphs\n![]({img_url})\n' # Write file with open(file_path, 'w', encoding='utf-8') as f: f.write(s) ",Writes README.md with picture and Equalizer APO settings.,8,239,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def write_readme(self, file_path, parametric_eq_peqs=None, fixed_band_eq_peq=None): file_path = os.path.abspath(file_path) dir_path = os.path.dirname(file_path) model = self.name # Write model s = '# {}\n'.format(model) s += 'See [usage instructions](https://github.com/jaakkopasanen/AutoEq#usage) for more options and info.\n\n' # Add parametric EQ settings if parametric_eq_peqs is not None: s += '### Parametric EQs\n' if len(parametric_eq_peqs) > 1: compound = PEQ(self.frequency.copy(), parametric_eq_peqs[0].fs, []) n = 0 filter_ranges = '' preamps = '' for i, peq in enumerate(parametric_eq_peqs): for filt in peq.filters: compound.add_filter(filt) filter_ranges += f'1-{len(peq.filters) + n}' preamps += f'{-compound.max_gain - 0.1:.1f} dB' if i < len(parametric_eq_peqs) - 2: filter_ranges += ', ' preamps += ', ' elif i == len(parametric_eq_peqs) - 2: filter_ranges += ' or ' preamps += ' or ' n += len(peq.filters) s += f'You can use filters {filter_ranges}. Apply preamp of {preamps}, respectively.\n\n' else: compound = PEQ(self.frequency.copy(), parametric_eq_peqs[0].fs, []) for peq in parametric_eq_peqs: for filt in peq.filters: compound.add_filter(filt) s += f'Apply preamp of -{compound.max_gain + 0.1:.1f} dB when using parametric equalizer.\n\n' s += compound.markdown_table() + '\n\n' # Add fixed band eq if fixed_band_eq_peq is not None: s += f'### Fixed Band EQs\nWhen using fixed band (also called graphic) equalizer, apply preamp of ' \ f'**-{fixed_band_eq_peq.max_gain + 0.1:.1f} dB** (if available) and set gains manually with these ' \ f'parameters.\n\n{fixed_band_eq_peq.markdown_table()}\n\n' # Write image link img_path = os.path.join(dir_path, model + '.png') if os.path.isfile(img_path): img_url = f'./{os.path.split(img_path)[1]}' img_url = urllib.parse.quote(img_url, safe=""%/:=&?~#+!$,;'@()*[]"") s += f'### Graphs\n![]({img_url})\n' # Write file with open(file_path, 'w', encoding='utf-8') as f: f.write(s) ``` ###Assistant : Writes README.md with picture and Equalizer APO settings. " 2577,"def _get_execution_environment(): if os.environ.get(""CI"", ""False"").lower() == ""true"": execution_env = ""ci"" elif ""google.colab"" in sys.modules: execution_env = ""colab"" elif ""KUBERNETES_SERVICE_HOST"" in os.environ: execution_env = ""kubernetes"" elif HAYSTACK_DOCKER_CONTAINER in os.environ: execution_env = os.environ.get(HAYSTACK_DOCKER_CONTAINER) # check if pytest is imported elif ""pytest"" in sys.modules: execution_env = ""test"" else: try: execution_env = get_ipython().__class__.__name__ # pylint: disable=undefined-variable except NameError: execution_env = ""script"" return execution_env "," Identifies the execution environment that Haystack is running in. Options are: colab notebook, kubernetes, CPU/GPU docker container, test environment, jupyter notebook, python script ",23,59,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_execution_environment(): if os.environ.get(""CI"", ""False"").lower() == ""true"": execution_env = ""ci"" elif ""google.colab"" in sys.modules: execution_env = ""colab"" elif ""KUBERNETES_SERVICE_HOST"" in os.environ: execution_env = ""kubernetes"" elif HAYSTACK_DOCKER_CONTAINER in os.environ: execution_env = os.environ.get(HAYSTACK_DOCKER_CONTAINER) # check if pytest is imported elif ""pytest"" in sys.modules: execution_env = ""test"" else: try: execution_env = get_ipython().__class__.__name__ # pylint: disable=undefined-variable except NameError: execution_env = ""script"" return execution_env ``` ###Assistant : Identifies the execution environment that Haystack is running in. Options are: colab notebook, kubernetes, CPU/GPU docker container, test environment, jupyter notebook, python script " 2578,"def _ensure_html_response(url, session): # type: (str, PipSession) -> None scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url) if scheme not in {'http', 'https'}: raise _NotHTTP() resp = session.head(url, allow_redirects=True) raise_for_status(resp) _ensure_html_header(resp) ","Send a HEAD request to the URL, and ensure the response contains HTML. Raises `_NotHTTP` if the URL is not available for a HEAD request, or `_NotHTML` if the content type is not text/html. ",34,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _ensure_html_response(url, session): # type: (str, PipSession) -> None scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url) if scheme not in {'http', 'https'}: raise _NotHTTP() resp = session.head(url, allow_redirects=True) raise_for_status(resp) _ensure_html_header(resp) ``` ###Assistant : Send a HEAD request to the URL, and ensure the response contains HTML. Raises `_NotHTTP` if the URL is not available for a HEAD request, or `_NotHTML` if the content type is not text/html. " 2579,"def _parse_item(self) -> Optional[Tuple[Optional[Key], Item]]: self.mark() with self._state as state: while True: c = self._current if c == ""\n"": # Found a newline; Return all whitespace found up to this point. self.inc() return None, Whitespace(self.extract()) elif c in "" \t\r"": # Skip whitespace. if not self.inc(): return None, Whitespace(self.extract()) elif c == ""#"": # Found a comment, parse it indent = self.extract() cws, comment, trail = self._parse_comment_trail() return None, Comment(Trivia(indent, cws, comment, trail)) elif c == ""["": # Found a table, delegate to the calling function. return else: # Begining of a KV pair. # Return to beginning of whitespace so it gets included # as indentation for the KV about to be parsed. state.restore = True break return self._parse_key_value(True) "," Attempts to parse the next item and returns it, along with its key if the item is value-like. ",18,120,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _parse_item(self) -> Optional[Tuple[Optional[Key], Item]]: self.mark() with self._state as state: while True: c = self._current if c == ""\n"": # Found a newline; Return all whitespace found up to this point. self.inc() return None, Whitespace(self.extract()) elif c in "" \t\r"": # Skip whitespace. if not self.inc(): return None, Whitespace(self.extract()) elif c == ""#"": # Found a comment, parse it indent = self.extract() cws, comment, trail = self._parse_comment_trail() return None, Comment(Trivia(indent, cws, comment, trail)) elif c == ""["": # Found a table, delegate to the calling function. return else: # Begining of a KV pair. # Return to beginning of whitespace so it gets included # as indentation for the KV about to be parsed. state.restore = True break return self._parse_key_value(True) ``` ###Assistant : Attempts to parse the next item and returns it, along with its key if the item is value-like. " 2580,"def _xreplace(self, rule): if self in rule: return rule[self], True elif rule: rule = self._dedupe_indices_in_rule(rule) args = [] changed = False for a in self.args: _xreplace = getattr(a, '_xreplace', None) if _xreplace is not None: a_xr = _xreplace(rule) args.append(a_xr[0]) changed |= a_xr[1] else: args.append(a) args = tuple(args) if changed: return self.func(*args), True return self, False "," Helper for xreplace. Tracks whether a replacement actually occurred. Given that the rule has entries {old:new, ...}, this handles the fact that if a dummy index in new is the same as an index in self, the dummy index in new must be renamed. ",44,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _xreplace(self, rule): if self in rule: return rule[self], True elif rule: rule = self._dedupe_indices_in_rule(rule) args = [] changed = False for a in self.args: _xreplace = getattr(a, '_xreplace', None) if _xreplace is not None: a_xr = _xreplace(rule) args.append(a_xr[0]) changed |= a_xr[1] else: args.append(a) args = tuple(args) if changed: return self.func(*args), True return self, False ``` ###Assistant : Helper for xreplace. Tracks whether a replacement actually occurred. Given that the rule has entries {old:new, ...}, this handles the fact that if a dummy index in new is the same as an index in self, the dummy index in new must be renamed. " 2581,"async def async_turn_on(self) -> None: await self._client.play() await self._update_playlists(no_throttle=True) ",Service to send the MPD the command to start playing.,10,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_turn_on(self) -> None: await self._client.play() await self._update_playlists(no_throttle=True) ``` ###Assistant : Service to send the MPD the command to start playing. " 2582,"def _build_network_on_replica(model, mode, inputs=None, targets=None): # Need to do imports here since we run into a circular dependency error. from keras import models # pylint: disable=g-import-not-at-top from keras.engine import sequential # pylint: disable=g-import-not-at-top # We rely on the internal methods to avoid having share_weights weights in the # public API. if isinstance(model, sequential.Sequential): updated_model = models._clone_sequential_model( model, input_tensors=inputs, layer_fn=models.share_weights ) else: updated_model = models._clone_functional_model( model, input_tensors=inputs, layer_fn=models.share_weights ) # Callable losses added directly to a functional Model need to be added # here. updated_model._callable_losses = model._callable_losses # Recast all low precision outputs back to float32 since we only casted # the inputs to bfloat16 and not targets. This is done so that we can preserve # precision when calculating the loss value.","Build an updated model on replicas. We create a new Keras model while sharing the variables from the old graph. Building a new sub-graph is required since the original keras model creates placeholders for the input and the output that are not accessible till we call iterator.get_next() inside the step_fn for `fit`/`evaluate`/`predict`. The sharing of weights and layers between the old and the new model guarantee that we're using Strategy variables and any updates on either model are reflected correctly in callbacks and loop iterations. We need to make sure we share the optimizers between the old and the new model as well so that optimizer state is not lost if the user is running fit multiple times. Args: model: Model to be replicated across Replicas mode: Which of fit/eval/predict is building the distributed network inputs: Input variables to be passed to the model targets: Target tensor to be passed to model.compile Returns: A new model with shared layers with the old model. ",163,122,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _build_network_on_replica(model, mode, inputs=None, targets=None): # Need to do imports here since we run into a circular dependency error. from keras import models # pylint: disable=g-import-not-at-top from keras.engine import sequential # pylint: disable=g-import-not-at-top # We rely on the internal methods to avoid having share_weights weights in the # public API. if isinstance(model, sequential.Sequential): updated_model = models._clone_sequential_model( model, input_tensors=inputs, layer_fn=models.share_weights ) else: updated_model = models._clone_functional_model( model, input_tensors=inputs, layer_fn=models.share_weights ) # Callable losses added directly to a functional Model need to be added # here. updated_model._callable_losses = model._callable_losses # Recast all low precision outputs back to float32 since we only casted # the inputs to bfloat16 and not targets. This is done so that we can preserve # precision when calculating the loss value. ``` ###Assistant : Build an updated model on replicas. We create a new Keras model while sharing the variables from the old graph. Building a new sub-graph is required since the original keras model creates placeholders for the input and the output that are not accessible till we call iterator.get_next() inside the step_fn for `fit`/`evaluate`/`predict`. The sharing of weights and layers between the old and the new model guarantee that we're using Strategy variables and any updates on either model are reflected correctly in callbacks and loop iterations. We need to make sure we share the optimizers between the old and the new model as well so that optimizer state is not lost if the user is running fit multiple times. Args: model: Model to be replicated across Replicas mode: Which of fit/eval/predict is building the distributed network inputs: Input variables to be passed to the model targets: Target tensor to be passed to model.compile Returns: A new model with shared layers with the old model. " 2583,"def can_jit_compile(warn=False): if platform.system() == ""Darwin"" and ""arm"" in platform.processor().lower(): if warn: logging.warning( ""Tensorflow is not compiled with XLA on Mac M1 Arm processors, "" ""so cannot set `jit_compile` to True."" ) return False return True ",Returns True if TensorFlow XLA is available for the platform.,10,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def can_jit_compile(warn=False): if platform.system() == ""Darwin"" and ""arm"" in platform.processor().lower(): if warn: logging.warning( ""Tensorflow is not compiled with XLA on Mac M1 Arm processors, "" ""so cannot set `jit_compile` to True."" ) return False return True ``` ###Assistant : Returns True if TensorFlow XLA is available for the platform. " 2584,"def __setstate__(self, state) -> None: # TODO (sven): Validate that our config and the config in state are compatible. # For example, the model architectures may differ. # Also, what should the behavior be if e.g. some training parameter # (e.g. lr) changed? if hasattr(self, ""workers"") and ""worker"" in state: self.workers.local_worker().set_state(state[""worker""]) remote_state = ray.put(state[""worker""]) for r in self.workers.remote_workers(): r.set_state.remote(remote_state) if self.evaluation_workers: # If evaluation workers are used, also restore the policies # there in case they are used for evaluation purpose. for r in self.evaluation_workers.remote_workers(): r.set_state.remote(remote_state) # If necessary, restore replay data as well. if self.local_replay_buffer is not None: # TODO: Experimental functionality: Restore contents of replay # buffer from checkpoint, only if user has configured this. if self.config.get(""store_buffer_in_checkpoints""): if ""local_replay_buffer"" in state: self.local_replay_buffer.set_state(state[""local_replay_buffer""]) else: logger.warning( ""`store_buffer_in_checkpoints` is True, but no replay "" ""data found in state!"" ) elif ""local_replay_buffer"" in state and log_once( ""no_store_buffer_in_checkpoints_but_data_found"" ): logger.warning( ""`store_buffer_in_checkpoints` is False, but some replay "" ""data found in state!"" ) if self.train_exec_impl is not None: self.train_exec_impl.shared_metrics.get().restore(state[""train_exec_impl""]) ","Sets the algorithm to the provided state. Args: state: The state dict to restore this Algorithm instance to. `state` may have been returned by a call to an Algorithm's `__getstate__()` method. ",31,165,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __setstate__(self, state) -> None: # TODO (sven): Validate that our config and the config in state are compatible. # For example, the model architectures may differ. # Also, what should the behavior be if e.g. some training parameter # (e.g. lr) changed? if hasattr(self, ""workers"") and ""worker"" in state: self.workers.local_worker().set_state(state[""worker""]) remote_state = ray.put(state[""worker""]) for r in self.workers.remote_workers(): r.set_state.remote(remote_state) if self.evaluation_workers: # If evaluation workers are used, also restore the policies # there in case they are used for evaluation purpose. for r in self.evaluation_workers.remote_workers(): r.set_state.remote(remote_state) # If necessary, restore replay data as well. if self.local_replay_buffer is not None: # TODO: Experimental functionality: Restore contents of replay # buffer from checkpoint, only if user has configured this. if self.config.get(""store_buffer_in_checkpoints""): if ""local_replay_buffer"" in state: self.local_replay_buffer.set_state(state[""local_replay_buffer""]) else: logger.warning( ""`store_buffer_in_checkpoints` is True, but no replay "" ""data found in state!"" ) elif ""local_replay_buffer"" in state and log_once( ""no_store_buffer_in_checkpoints_but_data_found"" ): logger.warning( ""`store_buffer_in_checkpoints` is False, but some replay "" ""data found in state!"" ) if self.train_exec_impl is not None: self.train_exec_impl.shared_metrics.get().restore(state[""train_exec_impl""]) ``` ###Assistant : Sets the algorithm to the provided state. Args: state: The state dict to restore this Algorithm instance to. `state` may have been returned by a call to an Algorithm's `__getstate__()` method. " 2585,"def test_http2_client_aborts(tctx, stream, when, how): server = Placeholder(Server) flow = Placeholder(HTTPFlow) playbook, cff = start_h2_client(tctx) resp = Placeholder(bytes) "," Test handling of the case where a client aborts during request or response transmission. If the client aborts the request transmission, we must trigger an error hook, if the client disconnects during response transmission, no error hook is triggered. ",39,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_http2_client_aborts(tctx, stream, when, how): server = Placeholder(Server) flow = Placeholder(HTTPFlow) playbook, cff = start_h2_client(tctx) resp = Placeholder(bytes) ``` ###Assistant : Test handling of the case where a client aborts during request or response transmission. If the client aborts the request transmission, we must trigger an error hook, if the client disconnects during response transmission, no error hook is triggered. " 2586,"def get_changelist_instance(self, request): list_display = self.get_list_display(request) list_display_links = self.get_list_display_links(request, list_display) # Add the action checkboxes if any actions are available. if self.get_actions(request): list_display = [""action_checkbox"", *list_display] sortable_by = self.get_sortable_by(request) ChangeList = self.get_changelist(request) return ChangeList( request, self.model, list_display, list_display_links, self.get_list_filter(request), self.date_hierarchy, self.get_search_fields(request), self.get_list_select_related(request), self.list_per_page, self.list_max_show_all, self.list_editable, self, sortable_by, self.search_help_text, ) "," Return a `ChangeList` instance based on `request`. May raise `IncorrectLookupParameters`. ",10,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_changelist_instance(self, request): list_display = self.get_list_display(request) list_display_links = self.get_list_display_links(request, list_display) # Add the action checkboxes if any actions are available. if self.get_actions(request): list_display = [""action_checkbox"", *list_display] sortable_by = self.get_sortable_by(request) ChangeList = self.get_changelist(request) return ChangeList( request, self.model, list_display, list_display_links, self.get_list_filter(request), self.date_hierarchy, self.get_search_fields(request), self.get_list_select_related(request), self.list_per_page, self.list_max_show_all, self.list_editable, self, sortable_by, self.search_help_text, ) ``` ###Assistant : Return a `ChangeList` instance based on `request`. May raise `IncorrectLookupParameters`. " 2587,"def _use_cholesky(u, m, n, params): a, b, c = params _, N = u.shape x = c * (u.T.conj() @ u) + jnp.eye(N, dtype=jnp.dtype(u)) # Pads the lower-right corner with the identity matrix to prevent the Cholesky # decomposition from failing due to the matrix not being PSD if padded with # zeros. x = _mask(x, (n, n), jnp.eye(N, dtype=x.dtype)) # `y` is lower triangular. y = lax_linalg.cholesky(x, symmetrize_input=False) z = lax_linalg.triangular_solve( y, u.T, left_side=True, lower=True, conjugate_a=True).conj() z = lax_linalg.triangular_solve(y, z, left_side=True, lower=True, transpose_a=True, conjugate_a=True).T.conj() e = b / c u = e * u + (a - e) * z return u ","QDWH iteration using Cholesky decomposition. Args: u: a matrix, with static (padded) shape M x N m, n: the dynamic shape of the matrix, where m <= M and n <= N. params: the QDWH parameters. ",36,103,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _use_cholesky(u, m, n, params): a, b, c = params _, N = u.shape x = c * (u.T.conj() @ u) + jnp.eye(N, dtype=jnp.dtype(u)) # Pads the lower-right corner with the identity matrix to prevent the Cholesky # decomposition from failing due to the matrix not being PSD if padded with # zeros. x = _mask(x, (n, n), jnp.eye(N, dtype=x.dtype)) # `y` is lower triangular. y = lax_linalg.cholesky(x, symmetrize_input=False) z = lax_linalg.triangular_solve( y, u.T, left_side=True, lower=True, conjugate_a=True).conj() z = lax_linalg.triangular_solve(y, z, left_side=True, lower=True, transpose_a=True, conjugate_a=True).T.conj() e = b / c u = e * u + (a - e) * z return u ``` ###Assistant : QDWH iteration using Cholesky decomposition. Args: u: a matrix, with static (padded) shape M x N m, n: the dynamic shape of the matrix, where m <= M and n <= N. params: the QDWH parameters. " 2588,"def piecewise_integrate(self, x, **kwargs): from sympy.integrals import integrate return self.func(*[(integrate(e, x, **kwargs), c) for e, c in self.args]) ","Return the Piecewise with each expression being replaced with its antiderivative. To obtain a continuous antiderivative, use the :func:`~.integrate` function or method. Examples ======== >>> from sympy import Piecewise >>> from sympy.abc import x >>> p = Piecewise((0, x < 0), (1, x < 1), (2, True)) >>> p.piecewise_integrate(x) Piecewise((0, x < 0), (x, x < 1), (2*x, True)) Note that this does not give a continuous function, e.g. at x = 1 the 3rd condition applies and the antiderivative there is 2*x so the value of the antiderivative is 2: >>> anti = _ >>> anti.subs(x, 1) 2 The continuous derivative accounts for the integral *up to* the point of interest, however: >>> p.integrate(x) Piecewise((0, x < 0), (x, x < 1), (2*x - 1, True)) >>> _.subs(x, 1) 1 See Also ======== Piecewise._eval_integral ",135,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def piecewise_integrate(self, x, **kwargs): from sympy.integrals import integrate return self.func(*[(integrate(e, x, **kwargs), c) for e, c in self.args]) ``` ###Assistant : Return the Piecewise with each expression being replaced with its antiderivative. To obtain a continuous antiderivative, use the :func:`~.integrate` function or method. Examples ======== >>> from sympy import Piecewise >>> from sympy.abc import x >>> p = Piecewise((0, x < 0), (1, x < 1), (2, True)) >>> p.piecewise_integrate(x) Piecewise((0, x < 0), (x, x < 1), (2*x, True)) Note that this does not give a continuous function, e.g. at x = 1 the 3rd condition applies and the antiderivative there is 2*x so the value of the antiderivative is 2: >>> anti = _ >>> anti.subs(x, 1) 2 The continuous derivative accounts for the integral *up to* the point of interest, however: >>> p.integrate(x) Piecewise((0, x < 0), (x, x < 1), (2*x - 1, True)) >>> _.subs(x, 1) 1 See Also ======== Piecewise._eval_integral " 2589,"def check_send_to_ereader(entry): formats = list() book_formats = list() if len(entry.data): for ele in iter(entry.data): if ele.uncompressed_size < config.mail_size: formats.append(ele.format) if 'EPUB' in formats: book_formats.append({'format': 'Epub', 'convert': 0, 'text': _('Send %(format)s to E-Reader', format='Epub')}) if 'MOBI' in formats: book_formats.append({'format': 'Mobi', 'convert': 0, 'text': _('Send %(format)s to E-Reader', format='Mobi')}) if 'PDF' in formats: book_formats.append({'format': 'Pdf', 'convert': 0, 'text': _('Send %(format)s to E-Reader', format='Pdf')}) if 'AZW' in formats: book_formats.append({'format': 'Azw', 'convert': 0, 'text': _('Send %(format)s to E-Reader', format='Azw')}) if config.config_converterpath: book_formats.extend(check_send_to_ereader_with_converter(formats)) return book_formats else: log.error(u'Cannot find book entry %d', entry.id) return None # Check if a reader is existing for any of the book formats, if not, return empty list, otherwise return # list with supported formats"," returns all available book formats for sending to E-Reader ",9,114,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_send_to_ereader(entry): formats = list() book_formats = list() if len(entry.data): for ele in iter(entry.data): if ele.uncompressed_size < config.mail_size: formats.append(ele.format) if 'EPUB' in formats: book_formats.append({'format': 'Epub', 'convert': 0, 'text': _('Send %(format)s to E-Reader', format='Epub')}) if 'MOBI' in formats: book_formats.append({'format': 'Mobi', 'convert': 0, 'text': _('Send %(format)s to E-Reader', format='Mobi')}) if 'PDF' in formats: book_formats.append({'format': 'Pdf', 'convert': 0, 'text': _('Send %(format)s to E-Reader', format='Pdf')}) if 'AZW' in formats: book_formats.append({'format': 'Azw', 'convert': 0, 'text': _('Send %(format)s to E-Reader', format='Azw')}) if config.config_converterpath: book_formats.extend(check_send_to_ereader_with_converter(formats)) return book_formats else: log.error(u'Cannot find book entry %d', entry.id) return None # Check if a reader is existing for any of the book formats, if not, return empty list, otherwise return # list with supported formats ``` ###Assistant : returns all available book formats for sending to E-Reader " 2590,"def Multinomial(syms, n, *p): if not isinstance(p[0], list): p = (list(p), ) return multivariate_rv(MultinomialDistribution, syms, n, p[0]) #------------------------------------------------------------------------------- # Negative Multinomial Distribution -------------------------------------------- "," Creates a discrete random variable with Multinomial Distribution. The density of the said distribution can be found at [1]. Parameters ========== n : Positive integer Represents number of trials p : List of event probabilites Must be in the range of [0, 1] Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import density, Multinomial, marginal_distribution >>> from sympy import symbols >>> x1, x2, x3 = symbols('x1, x2, x3', nonnegative=True, integer=True) >>> p1, p2, p3 = symbols('p1, p2, p3', positive=True) >>> M = Multinomial('M', 3, p1, p2, p3) >>> density(M)(x1, x2, x3) Piecewise((6*p1**x1*p2**x2*p3**x3/(factorial(x1)*factorial(x2)*factorial(x3)), Eq(x1 + x2 + x3, 3)), (0, True)) >>> marginal_distribution(M, M[0])(x1).subs(x1, 1) 3*p1*p2**2 + 6*p1*p2*p3 + 3*p1*p3**2 References ========== .. [1] https://en.wikipedia.org/wiki/Multinomial_distribution .. [2] http://mathworld.wolfram.com/MultinomialDistribution.html ",117,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def Multinomial(syms, n, *p): if not isinstance(p[0], list): p = (list(p), ) return multivariate_rv(MultinomialDistribution, syms, n, p[0]) #------------------------------------------------------------------------------- # Negative Multinomial Distribution -------------------------------------------- ``` ###Assistant : Creates a discrete random variable with Multinomial Distribution. The density of the said distribution can be found at [1]. Parameters ========== n : Positive integer Represents number of trials p : List of event probabilites Must be in the range of [0, 1] Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import density, Multinomial, marginal_distribution >>> from sympy import symbols >>> x1, x2, x3 = symbols('x1, x2, x3', nonnegative=True, integer=True) >>> p1, p2, p3 = symbols('p1, p2, p3', positive=True) >>> M = Multinomial('M', 3, p1, p2, p3) >>> density(M)(x1, x2, x3) Piecewise((6*p1**x1*p2**x2*p3**x3/(factorial(x1)*factorial(x2)*factorial(x3)), Eq(x1 + x2 + x3, 3)), (0, True)) >>> marginal_distribution(M, M[0])(x1).subs(x1, 1) 3*p1*p2**2 + 6*p1*p2*p3 + 3*p1*p3**2 References ========== .. [1] https://en.wikipedia.org/wiki/Multinomial_distribution .. [2] http://mathworld.wolfram.com/MultinomialDistribution.html " 2591,"def psi_n(n, x, m, omega): # sympify arguments n, x, m, omega = map(S, [n, x, m, omega]) nu = m * omega / hbar # normalization coefficient C = (nu/pi)**Rational(1, 4) * sqrt(1/(2**n*factorial(n))) return C * exp(-nu* x**2 /2) * hermite(n, sqrt(nu)*x) "," Returns the wavefunction psi_{n} for the One-dimensional harmonic oscillator. Parameters ========== n : the ""nodal"" quantum number. Corresponds to the number of nodes in the wavefunction. ``n >= 0`` x : x coordinate. m : Mass of the particle. omega : Angular frequency of the oscillator. Examples ======== >>> from sympy.physics.qho_1d import psi_n >>> from sympy.abc import m, x, omega >>> psi_n(0, x, m, omega) (m*omega)**(1/4)*exp(-m*omega*x**2/(2*hbar))/(hbar**(1/4)*pi**(1/4)) ",66,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def psi_n(n, x, m, omega): # sympify arguments n, x, m, omega = map(S, [n, x, m, omega]) nu = m * omega / hbar # normalization coefficient C = (nu/pi)**Rational(1, 4) * sqrt(1/(2**n*factorial(n))) return C * exp(-nu* x**2 /2) * hermite(n, sqrt(nu)*x) ``` ###Assistant : Returns the wavefunction psi_{n} for the One-dimensional harmonic oscillator. Parameters ========== n : the ""nodal"" quantum number. Corresponds to the number of nodes in the wavefunction. ``n >= 0`` x : x coordinate. m : Mass of the particle. omega : Angular frequency of the oscillator. Examples ======== >>> from sympy.physics.qho_1d import psi_n >>> from sympy.abc import m, x, omega >>> psi_n(0, x, m, omega) (m*omega)**(1/4)*exp(-m*omega*x**2/(2*hbar))/(hbar**(1/4)*pi**(1/4)) " 2592,"def getsourcefile(object): filename = getfile(object) all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:] all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:] if any(filename.endswith(s) for s in all_bytecode_suffixes): filename = (os.path.splitext(filename)[0] + importlib.machinery.SOURCE_SUFFIXES[0]) elif any(filename.endswith(s) for s in importlib.machinery.EXTENSION_SUFFIXES): return None if os.path.exists(filename): return filename # only return a non-existent filename if the module has a PEP 302 loader module = getmodule(object, filename) if getattr(module, '__loader__', None) is not None: return filename elif getattr(getattr(module, ""__spec__"", None), ""loader"", None) is not None: return filename # or it is in the linecache elif filename in linecache.cache: return filename ","Return the filename that can be used to locate an object's source. Return None if no way can be identified to get the source. ",24,85,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def getsourcefile(object): filename = getfile(object) all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:] all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:] if any(filename.endswith(s) for s in all_bytecode_suffixes): filename = (os.path.splitext(filename)[0] + importlib.machinery.SOURCE_SUFFIXES[0]) elif any(filename.endswith(s) for s in importlib.machinery.EXTENSION_SUFFIXES): return None if os.path.exists(filename): return filename # only return a non-existent filename if the module has a PEP 302 loader module = getmodule(object, filename) if getattr(module, '__loader__', None) is not None: return filename elif getattr(getattr(module, ""__spec__"", None), ""loader"", None) is not None: return filename # or it is in the linecache elif filename in linecache.cache: return filename ``` ###Assistant : Return the filename that can be used to locate an object's source. Return None if no way can be identified to get the source. " 2593,"def module_repr(self, module): warnings.warn(""importlib.abc.Loader.module_repr() is deprecated and "" ""slated for removal in Python 3.12"", DeprecationWarning) # The exception will cause ModuleType.__repr__ to ignore this method. raise NotImplementedError ","Return a module's repr. Used by the module type when the method does not raise NotImplementedError. This method is deprecated. ",20,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def module_repr(self, module): warnings.warn(""importlib.abc.Loader.module_repr() is deprecated and "" ""slated for removal in Python 3.12"", DeprecationWarning) # The exception will cause ModuleType.__repr__ to ignore this method. raise NotImplementedError ``` ###Assistant : Return a module's repr. Used by the module type when the method does not raise NotImplementedError. This method is deprecated. " 2594,"def test_need_validated_email(self): with self.assertRaises(SynapseError) as cm: self.get_success_or_raise( self.hs.get_pusherpool().add_or_update_pusher( user_id=self.user_id, access_token=self.token_id, kind=""email"", app_id=""m.email"", app_display_name=""Email Notifications"", device_display_name=""b@example.com"", pushkey=""b@example.com"", lang=None, data={}, ) ) self.assertEqual(400, cm.exception.code) self.assertEqual(Codes.THREEPID_NOT_FOUND, cm.exception.errcode) ","Test that we can only add an email pusher if the user has validated their email. ",16,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_need_validated_email(self): with self.assertRaises(SynapseError) as cm: self.get_success_or_raise( self.hs.get_pusherpool().add_or_update_pusher( user_id=self.user_id, access_token=self.token_id, kind=""email"", app_id=""m.email"", app_display_name=""Email Notifications"", device_display_name=""b@example.com"", pushkey=""b@example.com"", lang=None, data={}, ) ) self.assertEqual(400, cm.exception.code) self.assertEqual(Codes.THREEPID_NOT_FOUND, cm.exception.errcode) ``` ###Assistant : Test that we can only add an email pusher if the user has validated their email. " 2595,"def test_get_existing_comments(self): doc = Document.objects.create( title=""test"", mime_type=""application/pdf"", content=""this is a document which will have comments!"", ) comment = Comment.objects.create( comment=""This is a comment."", document=doc, user=self.user, ) response = self.client.get( f""/api/documents/{doc.pk}/comments/"", format=""json"", ) self.assertEqual(response.status_code, 200) resp_data = response.json() self.assertEqual(len(resp_data), 1) resp_data = resp_data[0] del resp_data[""created""] self.assertDictEqual( resp_data, { ""id"": comment.id, ""comment"": comment.comment, ""user"": { ""id"": comment.user.id, ""username"": comment.user.username, ""firstname"": comment.user.first_name, ""lastname"": comment.user.last_name, }, }, ) "," GIVEN: - A document with a single comment WHEN: - API reuqest for document comments is made THEN: - The associated comment is returned ",24,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_existing_comments(self): doc = Document.objects.create( title=""test"", mime_type=""application/pdf"", content=""this is a document which will have comments!"", ) comment = Comment.objects.create( comment=""This is a comment."", document=doc, user=self.user, ) response = self.client.get( f""/api/documents/{doc.pk}/comments/"", format=""json"", ) self.assertEqual(response.status_code, 200) resp_data = response.json() self.assertEqual(len(resp_data), 1) resp_data = resp_data[0] del resp_data[""created""] self.assertDictEqual( resp_data, { ""id"": comment.id, ""comment"": comment.comment, ""user"": { ""id"": comment.user.id, ""username"": comment.user.username, ""firstname"": comment.user.first_name, ""lastname"": comment.user.last_name, }, }, ) ``` ###Assistant : GIVEN: - A document with a single comment WHEN: - API reuqest for document comments is made THEN: - The associated comment is returned " 2596,"def collate_full_clips(batch): max_mel_length = max([b[0].shape[1] for b in batch]) if len(batch) > 1 else batch[0][0].shape[1] max_audio_length = max([b[1].shape[0] for b in batch]) if len(batch) > 1 else batch[0][1].shape[0] mels = torch.zeros([len(batch), batch[0][0].shape[0], max_mel_length]) audios = torch.zeros([len(batch), max_audio_length]) for idx, b in enumerate(batch): mel = b[0] audio = b[1] mels[idx, :, : mel.shape[1]] = mel audios[idx, : audio.shape[0]] = audio return mels, audios ","This is used in tune_wavegrad.py. It pads sequences to the max length.",12,62,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def collate_full_clips(batch): max_mel_length = max([b[0].shape[1] for b in batch]) if len(batch) > 1 else batch[0][0].shape[1] max_audio_length = max([b[1].shape[0] for b in batch]) if len(batch) > 1 else batch[0][1].shape[0] mels = torch.zeros([len(batch), batch[0][0].shape[0], max_mel_length]) audios = torch.zeros([len(batch), max_audio_length]) for idx, b in enumerate(batch): mel = b[0] audio = b[1] mels[idx, :, : mel.shape[1]] = mel audios[idx, : audio.shape[0]] = audio return mels, audios ``` ###Assistant : This is used in tune_wavegrad.py. It pads sequences to the max length. " 2597,"def feature_test(self, name, force_flags=None, macros=[]): if force_flags is None: force_flags = self.feature_flags(name) self.dist_log( ""testing feature '%s' with flags (%s)"" % ( name, ' '.join(force_flags) )) # Each CPU feature must have C source code contains at # least one intrinsic or instruction related to this feature. test_path = os.path.join( self.conf_check_path, ""cpu_%s.c"" % name.lower() ) if not os.path.exists(test_path): self.dist_fatal(""feature test file is not exist"", test_path) test = self.dist_test( test_path, force_flags + self.cc_flags[""werror""], macros=macros ) if not test: self.dist_log(""testing failed"", stderr=True) return test "," Test a certain CPU feature against the compiler through its own check file. Parameters ---------- name : str Supported CPU feature name. force_flags : list or None, optional If None(default), the returned flags from `feature_flags()` will be used. macros : list of tuples, optional A list of C macro definitions. ",50,81,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def feature_test(self, name, force_flags=None, macros=[]): if force_flags is None: force_flags = self.feature_flags(name) self.dist_log( ""testing feature '%s' with flags (%s)"" % ( name, ' '.join(force_flags) )) # Each CPU feature must have C source code contains at # least one intrinsic or instruction related to this feature. test_path = os.path.join( self.conf_check_path, ""cpu_%s.c"" % name.lower() ) if not os.path.exists(test_path): self.dist_fatal(""feature test file is not exist"", test_path) test = self.dist_test( test_path, force_flags + self.cc_flags[""werror""], macros=macros ) if not test: self.dist_log(""testing failed"", stderr=True) return test ``` ###Assistant : Test a certain CPU feature against the compiler through its own check file. Parameters ---------- name : str Supported CPU feature name. force_flags : list or None, optional If None(default), the returned flags from `feature_flags()` will be used. macros : list of tuples, optional A list of C macro definitions. " 2598,"def get_file_path(self) -> str: if self.file_name is None: raise ValueError(""Must specify file for SVGMobject"") return get_full_vector_image_path(self.file_name) ",Search for an existing file based on the specified file name.,11,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_file_path(self) -> str: if self.file_name is None: raise ValueError(""Must specify file for SVGMobject"") return get_full_vector_image_path(self.file_name) ``` ###Assistant : Search for an existing file based on the specified file name. " 2599,"def default_config(self) -> Dict[str, Any]: base = super().default_config() base[""redis""] = {""enabled"": True} return base "," Overrides the default config to enable Redis. Even if the test only uses make_worker_hs, the main process needs Redis enabled otherwise it won't create a Fake Redis server to listen on the Redis port and accept fake TCP connections. ",39,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def default_config(self) -> Dict[str, Any]: base = super().default_config() base[""redis""] = {""enabled"": True} return base ``` ###Assistant : Overrides the default config to enable Redis. Even if the test only uses make_worker_hs, the main process needs Redis enabled otherwise it won't create a Fake Redis server to listen on the Redis port and accept fake TCP connections. " 2600,"def delete_project_summary_annotations_before_updating_annotation(sender, instance, **kwargs): try: old_annotation = sender.objects.get(id=instance.id) except Annotation.DoesNotExist: # annotation just created - do nothing return old_annotation.decrease_project_summary_counters() # update task counters if annotation changes it's was_cancelled status task = instance.task if old_annotation.was_cancelled != instance.was_cancelled: if instance.was_cancelled: task.cancelled_annotations = task.cancelled_annotations + 1 task.total_annotations = task.total_annotations - 1 else: task.cancelled_annotations = task.cancelled_annotations - 1 task.total_annotations = task.total_annotations + 1 task.update_is_labeled() Task.objects.filter(id=instance.task.id).update( is_labeled=task.is_labeled, total_annotations=task.total_annotations, cancelled_annotations=task.cancelled_annotations ) @receiver(post_save, sender=Annotation)",Before updating annotation fields - ensure previous info removed from project.summary,11,67,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def delete_project_summary_annotations_before_updating_annotation(sender, instance, **kwargs): try: old_annotation = sender.objects.get(id=instance.id) except Annotation.DoesNotExist: # annotation just created - do nothing return old_annotation.decrease_project_summary_counters() # update task counters if annotation changes it's was_cancelled status task = instance.task if old_annotation.was_cancelled != instance.was_cancelled: if instance.was_cancelled: task.cancelled_annotations = task.cancelled_annotations + 1 task.total_annotations = task.total_annotations - 1 else: task.cancelled_annotations = task.cancelled_annotations - 1 task.total_annotations = task.total_annotations + 1 task.update_is_labeled() Task.objects.filter(id=instance.task.id).update( is_labeled=task.is_labeled, total_annotations=task.total_annotations, cancelled_annotations=task.cancelled_annotations ) @receiver(post_save, sender=Annotation) ``` ###Assistant : Before updating annotation fields - ensure previous info removed from project.summary " 2601,"def load_config(self, modelpath, use_gpu, gpu_id, use_mkldnn, cpu_threads): # 对运行位置进行配置 if use_gpu: try: int(os.environ.get('CUDA_VISIBLE_DEVICES')) except Exception: print( ) use_gpu = False if os.path.isdir(modelpath): if os.path.exists(os.path.join(modelpath, ""__params__"")): # __model__ + __params__ model = os.path.join(modelpath, ""__model__"") params = os.path.join(modelpath, ""__params__"") config = Config(model, params) elif os.path.exists(os.path.join(modelpath, ""params"")): # model + params model = os.path.join(modelpath, ""model"") params = os.path.join(modelpath, ""params"") config = Config(model, params) elif os.path.exists(os.path.join(modelpath, ""__model__"")): # __model__ + others config = Config(modelpath) else: raise Exception( ""Error! Can\'t find the model in: %s. Please check your model path."" % os.path.abspath(modelpath)) elif os.path.exists(modelpath + "".pdmodel""): # *.pdmodel + *.pdiparams model = modelpath + "".pdmodel"" params = modelpath + "".pdiparams"" config = Config(model, params) elif isinstance(modelpath, Config): config = modelpath else: raise Exception( ""Error! Can\'t find the model in: %s. Please check your model path."" % os.path.abspath(modelpath)) # 设置参数 if use_gpu: config.enable_use_gpu(100, gpu_id) else: config.disable_gpu() config.set_cpu_math_library_num_threads(cpu_threads) if use_mkldnn: config.enable_mkldnn() config.disable_glog_info() # 返回配置 return config # 预测器创建函数 "," load the model config modelpath: inference model path use_gpu: use gpu or not use_mkldnn: use mkldnn or not Error! Unable to use GPU. Please set the environment variables ""CUDA_VISIBLE_DEVICES=GPU_id"" to use GPU. Now switch to CPU to continue...",38,151,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_config(self, modelpath, use_gpu, gpu_id, use_mkldnn, cpu_threads): # 对运行位置进行配置 if use_gpu: try: int(os.environ.get('CUDA_VISIBLE_DEVICES')) except Exception: print( ) use_gpu = False if os.path.isdir(modelpath): if os.path.exists(os.path.join(modelpath, ""__params__"")): # __model__ + __params__ model = os.path.join(modelpath, ""__model__"") params = os.path.join(modelpath, ""__params__"") config = Config(model, params) elif os.path.exists(os.path.join(modelpath, ""params"")): # model + params model = os.path.join(modelpath, ""model"") params = os.path.join(modelpath, ""params"") config = Config(model, params) elif os.path.exists(os.path.join(modelpath, ""__model__"")): # __model__ + others config = Config(modelpath) else: raise Exception( ""Error! Can\'t find the model in: %s. Please check your model path."" % os.path.abspath(modelpath)) elif os.path.exists(modelpath + "".pdmodel""): # *.pdmodel + *.pdiparams model = modelpath + "".pdmodel"" params = modelpath + "".pdiparams"" config = Config(model, params) elif isinstance(modelpath, Config): config = modelpath else: raise Exception( ""Error! Can\'t find the model in: %s. Please check your model path."" % os.path.abspath(modelpath)) # 设置参数 if use_gpu: config.enable_use_gpu(100, gpu_id) else: config.disable_gpu() config.set_cpu_math_library_num_threads(cpu_threads) if use_mkldnn: config.enable_mkldnn() config.disable_glog_info() # 返回配置 return config # 预测器创建函数 ``` ###Assistant : load the model config modelpath: inference model path use_gpu: use gpu or not use_mkldnn: use mkldnn or not Error! Unable to use GPU. Please set the environment variables ""CUDA_VISIBLE_DEVICES=GPU_id"" to use GPU. Now switch to CPU to continue... " 2602,"def trim_line(line, column=0): line = line.strip(""\n"") ll = len(line) if ll <= 150: return line if column > ll: column = ll start = max(column - 60, 0) # Round down if it brings us close to the edge if start < 5: start = 0 end = min(start + 140, ll) # Round up to the end if it's close if end > ll - 5: end = ll # If we are bumped all the way to the end, # make sure we still get a full 140 characters in the line if end == ll: start = max(end - 140, 0) line = line[start:end] if end < ll: # we've snipped from the end line += "" {snip}"" if start > 0: # we've snipped from the beginning line = ""{snip} "" + line return line "," Trims a line down to a goal of 140 characters, with a little wiggle room to be sensible and tries to trim around the given `column`. So it tries to extract 60 characters before and after the provided `column` and yield a better context. ",44,139,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def trim_line(line, column=0): line = line.strip(""\n"") ll = len(line) if ll <= 150: return line if column > ll: column = ll start = max(column - 60, 0) # Round down if it brings us close to the edge if start < 5: start = 0 end = min(start + 140, ll) # Round up to the end if it's close if end > ll - 5: end = ll # If we are bumped all the way to the end, # make sure we still get a full 140 characters in the line if end == ll: start = max(end - 140, 0) line = line[start:end] if end < ll: # we've snipped from the end line += "" {snip}"" if start > 0: # we've snipped from the beginning line = ""{snip} "" + line return line ``` ###Assistant : Trims a line down to a goal of 140 characters, with a little wiggle room to be sensible and tries to trim around the given `column`. So it tries to extract 60 characters before and after the provided `column` and yield a better context. " 2603,"def test_background_update_default_batch_set_by_config(self): self.get_success( self.store.db_pool.simple_insert( ""background_updates"", values={""update_name"": ""test_update"", ""progress_json"": '{""my_key"": 1}'}, ) ) self.update_handler.side_effect = self.update self.update_handler.reset_mock() res = self.get_success( self.updates.do_next_background_update(False), by=0.01, ) self.assertFalse(res) # on the first call, we should get run with the default background update size specified in the config self.update_handler.assert_called_once_with({""my_key"": 1}, 20) "," Test that the background update is run with the default_batch_size set by the config ",14,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_background_update_default_batch_set_by_config(self): self.get_success( self.store.db_pool.simple_insert( ""background_updates"", values={""update_name"": ""test_update"", ""progress_json"": '{""my_key"": 1}'}, ) ) self.update_handler.side_effect = self.update self.update_handler.reset_mock() res = self.get_success( self.updates.do_next_background_update(False), by=0.01, ) self.assertFalse(res) # on the first call, we should get run with the default background update size specified in the config self.update_handler.assert_called_once_with({""my_key"": 1}, 20) ``` ###Assistant : Test that the background update is run with the default_batch_size set by the config " 2604,"def _expiry_date(self, session_data): return session_data.get(""_session_expiry"") or ( self._last_modification() + datetime.timedelta(seconds=self.get_session_cookie_age()) ) "," Return the expiry time of the file storing the session's content. ",11,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _expiry_date(self, session_data): return session_data.get(""_session_expiry"") or ( self._last_modification() + datetime.timedelta(seconds=self.get_session_cookie_age()) ) ``` ###Assistant : Return the expiry time of the file storing the session's content. " 2605,"def __call__(self, *args, **kwargs): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*args, **kwargs) images = kwargs.pop(""images"", None) text = kwargs.pop(""text"", None) if len(args) > 0: images = args[0] args = args[1:] if images is None and text is None: raise ValueError(""You need to specify either an `images` or `text` input to process."") if images is not None: inputs = self.feature_extractor(images, *args, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif images is None: return encodings else: inputs[""labels""] = encodings[""input_ids""] return inputs "," When used in normal mode, this method forwards all its arguments to AutoFeatureExtractor's [`~AutoFeatureExtractor.__call__`] and returns its output. If used in the context [`~TrOCRProcessor.as_target_processor`] this method forwards all its arguments to TrOCRTokenizer's [`~TrOCRTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information. ",46,89,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __call__(self, *args, **kwargs): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*args, **kwargs) images = kwargs.pop(""images"", None) text = kwargs.pop(""text"", None) if len(args) > 0: images = args[0] args = args[1:] if images is None and text is None: raise ValueError(""You need to specify either an `images` or `text` input to process."") if images is not None: inputs = self.feature_extractor(images, *args, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif images is None: return encodings else: inputs[""labels""] = encodings[""input_ids""] return inputs ``` ###Assistant : When used in normal mode, this method forwards all its arguments to AutoFeatureExtractor's [`~AutoFeatureExtractor.__call__`] and returns its output. If used in the context [`~TrOCRProcessor.as_target_processor`] this method forwards all its arguments to TrOCRTokenizer's [`~TrOCRTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information. " 2606,"def get_report(module_name, report_name): reports = get_reports() module = reports.get(module_name) if module is None: return None report = module.get(report_name) if report is None: return None return report "," Return a specific report from within a module. ",8,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_report(module_name, report_name): reports = get_reports() module = reports.get(module_name) if module is None: return None report = module.get(report_name) if report is None: return None return report ``` ###Assistant : Return a specific report from within a module. " 2607,"def test_worker_duty_configs(self) -> None: worker1_config = self._make_worker_config( worker_app=""synapse.app.generic_worker"", worker_name=""worker1"", extras={ ""notify_appservices_from_worker"": ""worker2"", ""update_user_directory_from_worker"": ""worker1"", }, ) self.assertFalse(worker1_config.should_notify_appservices) self.assertTrue(worker1_config.should_update_user_directory) worker2_config = self._make_worker_config( worker_app=""synapse.app.generic_worker"", worker_name=""worker2"", extras={ ""notify_appservices_from_worker"": ""worker2"", ""update_user_directory_from_worker"": ""worker1"", }, ) self.assertTrue(worker2_config.should_notify_appservices) self.assertFalse(worker2_config.should_update_user_directory) "," Additional tests for the worker duties ",6,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_worker_duty_configs(self) -> None: worker1_config = self._make_worker_config( worker_app=""synapse.app.generic_worker"", worker_name=""worker1"", extras={ ""notify_appservices_from_worker"": ""worker2"", ""update_user_directory_from_worker"": ""worker1"", }, ) self.assertFalse(worker1_config.should_notify_appservices) self.assertTrue(worker1_config.should_update_user_directory) worker2_config = self._make_worker_config( worker_app=""synapse.app.generic_worker"", worker_name=""worker2"", extras={ ""notify_appservices_from_worker"": ""worker2"", ""update_user_directory_from_worker"": ""worker1"", }, ) self.assertTrue(worker2_config.should_notify_appservices) self.assertFalse(worker2_config.should_update_user_directory) ``` ###Assistant : Additional tests for the worker duties " 2608,"def actor_id(self): # only worker mode has actor_id assert ( self.worker.mode == ray.worker.WORKER_MODE ), f""This method is only available when the process is a\ worker. Current mode: {self.worker.mode}"" actor_id = self.worker.actor_id return actor_id if not actor_id.is_nil() else None ","Get the current actor ID in this worker. ID of the actor of the current process. This shouldn't be used in a driver process. Returns: The current actor id in this worker. None if there's no actor id. ",38,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def actor_id(self): # only worker mode has actor_id assert ( self.worker.mode == ray.worker.WORKER_MODE ), f""This method is only available when the process is a\ worker. Current mode: {self.worker.mode}"" actor_id = self.worker.actor_id return actor_id if not actor_id.is_nil() else None ``` ###Assistant : Get the current actor ID in this worker. ID of the actor of the current process. This shouldn't be used in a driver process. Returns: The current actor id in this worker. None if there's no actor id. " 2609,"def test_redirect_to_current(self): start_url = reverse(""wagtailsettings:edit"", args=[""tests"", ""testsetting""]) dest_url = reverse( ""wagtailsettings:edit"", args=[""tests"", ""testsetting"", self.other_site.pk] ) response = self.client.get( start_url, follow=True, HTTP_HOST=self.other_site.hostname ) self.assertRedirects( response, dest_url, status_code=302, fetch_redirect_response=False ) "," Should redirect to the setting for the current site taken from the URL, by default ",15,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_redirect_to_current(self): start_url = reverse(""wagtailsettings:edit"", args=[""tests"", ""testsetting""]) dest_url = reverse( ""wagtailsettings:edit"", args=[""tests"", ""testsetting"", self.other_site.pk] ) response = self.client.get( start_url, follow=True, HTTP_HOST=self.other_site.hostname ) self.assertRedirects( response, dest_url, status_code=302, fetch_redirect_response=False ) ``` ###Assistant : Should redirect to the setting for the current site taken from the URL, by default " 2610,"def fit(self, X, y, sample_weight=None, fit_params=None): if not hasattr(self.estimator, ""fit""): raise ValueError(""The base estimator should implement a fit method"") y = self._validate_data(X=""no_validation"", y=y, multi_output=True) if is_classifier(self): check_classification_targets(y) if y.ndim == 1: raise ValueError( ""y must have at least two dimensions for "" ""multi-output regression but has only one."" ) if sample_weight is not None and not has_fit_parameter( self.estimator, ""sample_weight"" ): raise ValueError(""Underlying estimator does not support sample weights."") if not fit_params: fit_params = [None] * y.shape[1] self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_fit_estimator)( self.estimator, X, y[:, i], sample_weight, **fit_params[i] ) for i in range(y.shape[1]) ) self.classes_ = [] for estimator in self.estimators_: self.classes_.extend(estimator.classes_) if hasattr(self.estimators_[0], ""n_features_in_""): self.n_features_in_ = self.estimators_[0].n_features_in_ if hasattr(self.estimators_[0], ""feature_names_in_""): self.feature_names_in_ = self.estimators_[0].feature_names_in_ return self ","Fit the model to data, separately for each output variable. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. y : {array-like, sparse matrix} of shape (n_samples, n_outputs) Multi-output targets. An indicator matrix turns on multilabel estimation. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If `None`, then samples are equally weighted. Only supported if the underlying classifier supports sample weights. fit_params : A list of dicts for the fit_params Parameters passed to the ``estimator.fit`` method of each step. Each dict may contain same or different values (e.g. different eval_sets or init_models) .. versionadded:: 0.23 Returns ------- self : object Returns a fitted instance. ",110,114,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fit(self, X, y, sample_weight=None, fit_params=None): if not hasattr(self.estimator, ""fit""): raise ValueError(""The base estimator should implement a fit method"") y = self._validate_data(X=""no_validation"", y=y, multi_output=True) if is_classifier(self): check_classification_targets(y) if y.ndim == 1: raise ValueError( ""y must have at least two dimensions for "" ""multi-output regression but has only one."" ) if sample_weight is not None and not has_fit_parameter( self.estimator, ""sample_weight"" ): raise ValueError(""Underlying estimator does not support sample weights."") if not fit_params: fit_params = [None] * y.shape[1] self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_fit_estimator)( self.estimator, X, y[:, i], sample_weight, **fit_params[i] ) for i in range(y.shape[1]) ) self.classes_ = [] for estimator in self.estimators_: self.classes_.extend(estimator.classes_) if hasattr(self.estimators_[0], ""n_features_in_""): self.n_features_in_ = self.estimators_[0].n_features_in_ if hasattr(self.estimators_[0], ""feature_names_in_""): self.feature_names_in_ = self.estimators_[0].feature_names_in_ return self ``` ###Assistant : Fit the model to data, separately for each output variable. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. y : {array-like, sparse matrix} of shape (n_samples, n_outputs) Multi-output targets. An indicator matrix turns on multilabel estimation. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If `None`, then samples are equally weighted. Only supported if the underlying classifier supports sample weights. fit_params : A list of dicts for the fit_params Parameters passed to the ``estimator.fit`` method of each step. Each dict may contain same or different values (e.g. different eval_sets or init_models) .. versionadded:: 0.23 Returns ------- self : object Returns a fitted instance. " 2611,"def inner_choices(self) -> Iterable['ValueChoice']: for arg in self.arguments: if isinstance(arg, ValueChoiceX): yield from arg.inner_choices() "," Return an iterable of all leaf value choices. Useful for composition of value choices. No deduplication on labels. Mutators should take care. ",22,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def inner_choices(self) -> Iterable['ValueChoice']: for arg in self.arguments: if isinstance(arg, ValueChoiceX): yield from arg.inner_choices() ``` ###Assistant : Return an iterable of all leaf value choices. Useful for composition of value choices. No deduplication on labels. Mutators should take care. " 2612,"def get_indices(expr): # We call ourself recursively to determine indices of sub expressions. # break recursion if isinstance(expr, Indexed): c = expr.indices inds, dummies = _remove_repeated(c) return inds, {} elif expr is None: return set(), {} elif isinstance(expr, Idx): return {expr}, {} elif expr.is_Atom: return set(), {} # recurse via specialized functions else: if expr.is_Mul: return _get_indices_Mul(expr) elif expr.is_Add: return _get_indices_Add(expr) elif expr.is_Pow or isinstance(expr, exp): return _get_indices_Pow(expr) elif isinstance(expr, Piecewise): # FIXME: No support for Piecewise yet return set(), {} elif isinstance(expr, Function): # Support ufunc like behaviour by returning indices from arguments. # Functions do not interpret repeated indices across arguments # as summation ind0 = set() for arg in expr.args: ind, sym = get_indices(arg) ind0 |= ind return ind0, sym # this test is expensive, so it should be at the end elif not expr.has(Indexed): return set(), {} raise NotImplementedError( ""FIXME: No specialized handling of type %s"" % type(expr)) ","Determine the outer indices of expression ``expr`` By *outer* we mean indices that are not summation indices. Returns a set and a dict. The set contains outer indices and the dict contains information about index symmetries. Examples ======== >>> from sympy.tensor.index_methods import get_indices >>> from sympy import symbols >>> from sympy.tensor import IndexedBase >>> x, y, A = map(IndexedBase, ['x', 'y', 'A']) >>> i, j, a, z = symbols('i j a z', integer=True) The indices of the total expression is determined, Repeated indices imply a summation, for instance the trace of a matrix A: >>> get_indices(A[i, i]) (set(), {}) In the case of many terms, the terms are required to have identical outer indices. Else an IndexConformanceException is raised. >>> get_indices(x[i] + A[i, j]*y[j]) ({i}, {}) :Exceptions: An IndexConformanceException means that the terms ar not compatible, e.g. >>> get_indices(x[i] + y[j]) #doctest: +SKIP (...) IndexConformanceException: Indices are not consistent: x(i) + y(j) .. warning:: The concept of *outer* indices applies recursively, starting on the deepest level. This implies that dummies inside parenthesis are assumed to be summed first, so that the following expression is handled gracefully: >>> get_indices((x[i] + A[i, j]*y[j])*x[j]) ({i, j}, {}) This is correct and may appear convenient, but you need to be careful with this as SymPy will happily .expand() the product, if requested. The resulting expression would mix the outer ``j`` with the dummies inside the parenthesis, which makes it a different expression. To be on the safe side, it is best to avoid such ambiguities by using unique indices for all contractions that should be held separate. ",263,152,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_indices(expr): # We call ourself recursively to determine indices of sub expressions. # break recursion if isinstance(expr, Indexed): c = expr.indices inds, dummies = _remove_repeated(c) return inds, {} elif expr is None: return set(), {} elif isinstance(expr, Idx): return {expr}, {} elif expr.is_Atom: return set(), {} # recurse via specialized functions else: if expr.is_Mul: return _get_indices_Mul(expr) elif expr.is_Add: return _get_indices_Add(expr) elif expr.is_Pow or isinstance(expr, exp): return _get_indices_Pow(expr) elif isinstance(expr, Piecewise): # FIXME: No support for Piecewise yet return set(), {} elif isinstance(expr, Function): # Support ufunc like behaviour by returning indices from arguments. # Functions do not interpret repeated indices across arguments # as summation ind0 = set() for arg in expr.args: ind, sym = get_indices(arg) ind0 |= ind return ind0, sym # this test is expensive, so it should be at the end elif not expr.has(Indexed): return set(), {} raise NotImplementedError( ""FIXME: No specialized handling of type %s"" % type(expr)) ``` ###Assistant : Determine the outer indices of expression ``expr`` By *outer* we mean indices that are not summation indices. Returns a set and a dict. The set contains outer indices and the dict contains information about index symmetries. Examples ======== >>> from sympy.tensor.index_methods import get_indices >>> from sympy import symbols >>> from sympy.tensor import IndexedBase >>> x, y, A = map(IndexedBase, ['x', 'y', 'A']) >>> i, j, a, z = symbols('i j a z', integer=True) The indices of the total expression is determined, Repeated indices imply a summation, for instance the trace of a matrix A: >>> get_indices(A[i, i]) (set(), {}) In the case of many terms, the terms are required to have identical outer indices. Else an IndexConformanceException is raised. >>> get_indices(x[i] + A[i, j]*y[j]) ({i}, {}) :Exceptions: An IndexConformanceException means that the terms ar not compatible, e.g. >>> get_indices(x[i] + y[j]) #doctest: +SKIP (...) IndexConformanceException: Indices are not consistent: x(i) + y(j) .. warning:: The concept of *outer* indices applies recursively, starting on the deepest level. This implies that dummies inside parenthesis are assumed to be summed first, so that the following expression is handled gracefully: >>> get_indices((x[i] + A[i, j]*y[j])*x[j]) ({i, j}, {}) This is correct and may appear convenient, but you need to be careful with this as SymPy will happily .expand() the product, if requested. The resulting expression would mix the outer ``j`` with the dummies inside the parenthesis, which makes it a different expression. To be on the safe side, it is best to avoid such ambiguities by using unique indices for all contractions that should be held separate. " 2613,"def dist_location(dist): # type: (Distribution) -> str egg_link = egg_link_path(dist) if egg_link: return normalize_path(egg_link) return normalize_path(dist.location) "," Get the site-packages location of this distribution. Generally this is dist.location, except in the case of develop-installed packages, where dist.location is the source code location, and we want to know where the egg-link file is. The returned location is normalized (in particular, with symlinks removed). ",45,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dist_location(dist): # type: (Distribution) -> str egg_link = egg_link_path(dist) if egg_link: return normalize_path(egg_link) return normalize_path(dist.location) ``` ###Assistant : Get the site-packages location of this distribution. Generally this is dist.location, except in the case of develop-installed packages, where dist.location is the source code location, and we want to know where the egg-link file is. The returned location is normalized (in particular, with symlinks removed). " 2614,"def can_connect(url, error_classes=None): if error_classes is None: error_classes = _get_default_network_errors() try: with urlopen(url, timeout=20) as response: # Timeout just in case rate-limiting is applied if response.status != 200: return False except error_classes: return False else: return True # ------------------------------------------------------------------ # File-IO "," Try to connect to the given url. True if succeeds, False if OSError raised Parameters ---------- url : basestring The URL to try to connect to Returns ------- connectable : bool Return True if no OSError (unable to connect) or URLError (bad url) was raised ",45,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def can_connect(url, error_classes=None): if error_classes is None: error_classes = _get_default_network_errors() try: with urlopen(url, timeout=20) as response: # Timeout just in case rate-limiting is applied if response.status != 200: return False except error_classes: return False else: return True # ------------------------------------------------------------------ # File-IO ``` ###Assistant : Try to connect to the given url. True if succeeds, False if OSError raised Parameters ---------- url : basestring The URL to try to connect to Returns ------- connectable : bool Return True if no OSError (unable to connect) or URLError (bad url) was raised " 2615,"def test_valid_slack_channel_id(self): integration = Integration.objects.create( external_id=""1"", provider=""slack"", metadata={""access_token"": ""xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx""}, ) integration.add_organization(self.organization, self.user) base_params = self.valid_params.copy() base_params.update( { ""type"": AlertRuleTriggerAction.get_registered_type( AlertRuleTriggerAction.Type.SLACK ).slug, ""targetType"": ACTION_TARGET_TYPE_TO_STRING[ AlertRuleTriggerAction.TargetType.SPECIFIC ], ""targetIdentifier"": ""merp"", ""integration"": str(integration.id), } ) context = self.context.copy() context.update({""input_channel_id"": ""CSVK0921""}) responses.add( method=responses.GET, url=""https://slack.com/api/conversations.info"", status=200, content_type=""application/json"", body=json.dumps({""ok"": ""true"", ""channel"": {""name"": ""merp"", ""id"": ""CSVK0921""}}), ) serializer = AlertRuleTriggerActionSerializer(context=context, data=base_params) assert serializer.is_valid() serializer.save() # # Make sure the action was created. alert_rule_trigger_actions = list( AlertRuleTriggerAction.objects.filter(integration=integration) ) assert len(alert_rule_trigger_actions) == 1 "," Test that when a valid Slack channel ID is provided, we look up the channel name and validate it against the targetIdentifier. ",22,73,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_valid_slack_channel_id(self): integration = Integration.objects.create( external_id=""1"", provider=""slack"", metadata={""access_token"": ""xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx""}, ) integration.add_organization(self.organization, self.user) base_params = self.valid_params.copy() base_params.update( { ""type"": AlertRuleTriggerAction.get_registered_type( AlertRuleTriggerAction.Type.SLACK ).slug, ""targetType"": ACTION_TARGET_TYPE_TO_STRING[ AlertRuleTriggerAction.TargetType.SPECIFIC ], ""targetIdentifier"": ""merp"", ""integration"": str(integration.id), } ) context = self.context.copy() context.update({""input_channel_id"": ""CSVK0921""}) responses.add( method=responses.GET, url=""https://slack.com/api/conversations.info"", status=200, content_type=""application/json"", body=json.dumps({""ok"": ""true"", ""channel"": {""name"": ""merp"", ""id"": ""CSVK0921""}}), ) serializer = AlertRuleTriggerActionSerializer(context=context, data=base_params) assert serializer.is_valid() serializer.save() # # Make sure the action was created. alert_rule_trigger_actions = list( AlertRuleTriggerAction.objects.filter(integration=integration) ) assert len(alert_rule_trigger_actions) == 1 ``` ###Assistant : Test that when a valid Slack channel ID is provided, we look up the channel name and validate it against the targetIdentifier. " 2616,"def get_legacy(members): if AIX_ABI == 64: # AIX 64-bit member is one of shr64.o, shr_64.o, or shr4_64.o expr = r'shr4?_?64\.o' member = get_one_match(expr, members) if member: return member else: # 32-bit legacy names - both shr.o and shr4.o exist. # shr.o is the preferred name so we look for shr.o first # i.e., shr4.o is returned only when shr.o does not exist for name in ['shr.o', 'shr4.o']: member = get_one_match(re.escape(name), members) if member: return member return None "," This routine provides historical aka legacy naming schemes started in AIX4 shared library support for library members names. e.g., in /usr/lib/libc.a the member name shr.o for 32-bit binary and shr_64.o for 64-bit binary. ",33,77,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_legacy(members): if AIX_ABI == 64: # AIX 64-bit member is one of shr64.o, shr_64.o, or shr4_64.o expr = r'shr4?_?64\.o' member = get_one_match(expr, members) if member: return member else: # 32-bit legacy names - both shr.o and shr4.o exist. # shr.o is the preferred name so we look for shr.o first # i.e., shr4.o is returned only when shr.o does not exist for name in ['shr.o', 'shr4.o']: member = get_one_match(re.escape(name), members) if member: return member return None ``` ###Assistant : This routine provides historical aka legacy naming schemes started in AIX4 shared library support for library members names. e.g., in /usr/lib/libc.a the member name shr.o for 32-bit binary and shr_64.o for 64-bit binary. " 2617,"def set_policy(name, table=""filter"", family=""ipv4"", **kwargs): ret = {""name"": name, ""changes"": {}, ""result"": None, ""comment"": """"} for ignore in _STATE_INTERNAL_KEYWORDS: if ignore in kwargs: del kwargs[ignore] if ( __salt__[""iptables.get_policy""](table, kwargs[""chain""], family) == kwargs[""policy""] ): ret[""result""] = True ret[ ""comment"" ] = ""iptables default policy for chain {} on table {} for {} already set to {}"".format( kwargs[""chain""], table, family, kwargs[""policy""] ) return ret if __opts__[""test""]: ret[""comment""] = ( ""iptables default policy for chain {} on table {} for {} needs to be set"" "" to {}"".format(kwargs[""chain""], table, family, kwargs[""policy""]) ) return ret if not __salt__[""iptables.set_policy""]( table, kwargs[""chain""], kwargs[""policy""], family ): ret[""changes""] = {""locale"": name} ret[""result""] = True ret[""comment""] = ""Set default policy for {} to {} family {}"".format( kwargs[""chain""], kwargs[""policy""], family ) if ""save"" in kwargs and kwargs[""save""]: if kwargs[""save""] is not True: filename = kwargs[""save""] else: filename = None __salt__[""iptables.save""](filename=filename, family=family) ret[ ""comment"" ] = ""Set and saved default policy for {} to {} family {}"".format( kwargs[""chain""], kwargs[""policy""], family ) return ret else: ret[""result""] = False ret[""comment""] = ""Failed to set iptables default policy"" return ret "," .. versionadded:: 2014.1.0 Sets the default policy for iptables firewall tables table The table that owns the chain that should be modified family Networking family, either ipv4 or ipv6 policy The requested table policy save If set to a true value, the new iptables rules for the given family will be saved to a file. See the ``append`` state for more details. ",62,176,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_policy(name, table=""filter"", family=""ipv4"", **kwargs): ret = {""name"": name, ""changes"": {}, ""result"": None, ""comment"": """"} for ignore in _STATE_INTERNAL_KEYWORDS: if ignore in kwargs: del kwargs[ignore] if ( __salt__[""iptables.get_policy""](table, kwargs[""chain""], family) == kwargs[""policy""] ): ret[""result""] = True ret[ ""comment"" ] = ""iptables default policy for chain {} on table {} for {} already set to {}"".format( kwargs[""chain""], table, family, kwargs[""policy""] ) return ret if __opts__[""test""]: ret[""comment""] = ( ""iptables default policy for chain {} on table {} for {} needs to be set"" "" to {}"".format(kwargs[""chain""], table, family, kwargs[""policy""]) ) return ret if not __salt__[""iptables.set_policy""]( table, kwargs[""chain""], kwargs[""policy""], family ): ret[""changes""] = {""locale"": name} ret[""result""] = True ret[""comment""] = ""Set default policy for {} to {} family {}"".format( kwargs[""chain""], kwargs[""policy""], family ) if ""save"" in kwargs and kwargs[""save""]: if kwargs[""save""] is not True: filename = kwargs[""save""] else: filename = None __salt__[""iptables.save""](filename=filename, family=family) ret[ ""comment"" ] = ""Set and saved default policy for {} to {} family {}"".format( kwargs[""chain""], kwargs[""policy""], family ) return ret else: ret[""result""] = False ret[""comment""] = ""Failed to set iptables default policy"" return ret ``` ###Assistant : .. versionadded:: 2014.1.0 Sets the default policy for iptables firewall tables table The table that owns the chain that should be modified family Networking family, either ipv4 or ipv6 policy The requested table policy save If set to a true value, the new iptables rules for the given family will be saved to a file. See the ``append`` state for more details. " 2618,"def _after_start(self): delay = self.request.config.getoption('--qute-delay-start') if delay: with self.disable_capturing(): print(f""- waiting {delay}ms for quteprocess "" f""(PID: {self.proc.processId()})..."") time.sleep(delay / 1000) ","Wait before continuing if requested, e.g. for debugger attachment.",9,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _after_start(self): delay = self.request.config.getoption('--qute-delay-start') if delay: with self.disable_capturing(): print(f""- waiting {delay}ms for quteprocess "" f""(PID: {self.proc.processId()})..."") time.sleep(delay / 1000) ``` ###Assistant : Wait before continuing if requested, e.g. for debugger attachment. " 2619,"def list_secrets(path, default=None): if default is None: default = CommandExecutionError log.debug(""Listing vault secret keys for %s in %s"", __grains__[""id""], path) version2 = __utils__[""vault.is_v2""](path) if version2[""v2""]: path = version2[""metadata""] try: url = ""v1/{}"".format(path) response = __utils__[""vault.make_request""](""LIST"", url) if response.status_code != 200: response.raise_for_status() return response.json()[""data""] except Exception as err: # pylint: disable=broad-except if default is CommandExecutionError: raise CommandExecutionError( ""Failed to list secrets! {}: {}"".format(type(err).__name__, err) ) return default "," .. versionchanged:: 3001 The ``default`` argument has been added. When the path or path/key combination is not found, an exception will be raised, unless a default is provided. List secret keys at the path in vault. The vault policy used must allow this. The path should end with a trailing slash. CLI Example: .. code-block:: bash salt '*' vault.list_secrets ""secret/my/"" ",60,66,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def list_secrets(path, default=None): if default is None: default = CommandExecutionError log.debug(""Listing vault secret keys for %s in %s"", __grains__[""id""], path) version2 = __utils__[""vault.is_v2""](path) if version2[""v2""]: path = version2[""metadata""] try: url = ""v1/{}"".format(path) response = __utils__[""vault.make_request""](""LIST"", url) if response.status_code != 200: response.raise_for_status() return response.json()[""data""] except Exception as err: # pylint: disable=broad-except if default is CommandExecutionError: raise CommandExecutionError( ""Failed to list secrets! {}: {}"".format(type(err).__name__, err) ) return default ``` ###Assistant : .. versionchanged:: 3001 The ``default`` argument has been added. When the path or path/key combination is not found, an exception will be raised, unless a default is provided. List secret keys at the path in vault. The vault policy used must allow this. The path should end with a trailing slash. CLI Example: .. code-block:: bash salt '*' vault.list_secrets ""secret/my/"" " 2620,"def test_custom_kwargs_sharded(tmpdir, cls): strategy = cls(reduce_fp16=True) strategy.model = Mock(spec=LightningModule) strategy.model.trainer = Mock() class_name = ""sharded"" if isinstance(strategy, DDPShardedStrategy) else ""sharded_spawn"" with mock.patch(f""pytorch_lightning.strategies.{class_name}.ShardedDataParallel"", autospec=True) as mock_sharded: strategy.configure_ddp() args, kwargs = mock_sharded.call_args assert ""reduce_fp16"" in kwargs assert kwargs[""reduce_fp16""] @RunIf(skip_windows=True, fairscale=True) @mock.patch(""pytorch_lightning.strategies.DDPShardedStrategy._wrap_optimizers"", autospec=True) @pytest.mark.parametrize([""params"", ""expected_buffer_size""], [(dict(), 0), (dict(reduce_buffer_size=128), 128)]) @pytest.mark.parametrize(""num_nodes"", [1, 2])","Tests to ensure that if custom kwargs are passed, they are set correctly.",13,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_custom_kwargs_sharded(tmpdir, cls): strategy = cls(reduce_fp16=True) strategy.model = Mock(spec=LightningModule) strategy.model.trainer = Mock() class_name = ""sharded"" if isinstance(strategy, DDPShardedStrategy) else ""sharded_spawn"" with mock.patch(f""pytorch_lightning.strategies.{class_name}.ShardedDataParallel"", autospec=True) as mock_sharded: strategy.configure_ddp() args, kwargs = mock_sharded.call_args assert ""reduce_fp16"" in kwargs assert kwargs[""reduce_fp16""] @RunIf(skip_windows=True, fairscale=True) @mock.patch(""pytorch_lightning.strategies.DDPShardedStrategy._wrap_optimizers"", autospec=True) @pytest.mark.parametrize([""params"", ""expected_buffer_size""], [(dict(), 0), (dict(reduce_buffer_size=128), 128)]) @pytest.mark.parametrize(""num_nodes"", [1, 2]) ``` ###Assistant : Tests to ensure that if custom kwargs are passed, they are set correctly. " 2621,"def versions_from_parentdir(parentdir_prefix, root, verbose): rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {""version"": dirname[len(parentdir_prefix):], ""full-revisionid"": None, ""dirty"": False, ""error"": None, ""date"": None} rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print(""Tried directories %s but none started with prefix %s"" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod(""rootdir doesn't start with parentdir_prefix"") SHORT_VERSION_PY = %s ","Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory # This file was generated by 'versioneer.py' (0.21) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = # END VERSION_JSON ",84,58,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def versions_from_parentdir(parentdir_prefix, root, verbose): rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {""version"": dirname[len(parentdir_prefix):], ""full-revisionid"": None, ""dirty"": False, ""error"": None, ""date"": None} rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print(""Tried directories %s but none started with prefix %s"" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod(""rootdir doesn't start with parentdir_prefix"") SHORT_VERSION_PY = %s ``` ###Assistant : Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory # This file was generated by 'versioneer.py' (0.21) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = # END VERSION_JSON " 2622,"def response_add(self, request, obj, post_url_continue=None): opts = obj._meta preserved_filters = self.get_preserved_filters(request) obj_url = reverse( ""admin:%s_%s_change"" % (opts.app_label, opts.model_name), args=(quote(obj.pk),), current_app=self.admin_site.name, ) # Add a link to the object's change form if the user can edit the obj. if self.has_change_permission(request, obj): obj_repr = format_html('{}', urlquote(obj_url), obj) else: obj_repr = str(obj) msg_dict = { ""name"": opts.verbose_name, ""obj"": obj_repr, } # Here, we distinguish between different save types by checking for # the presence of keys in request.POST. if IS_POPUP_VAR in request.POST: to_field = request.POST.get(TO_FIELD_VAR) if to_field: attr = str(to_field) else: attr = obj._meta.pk.attname value = obj.serializable_value(attr) popup_response_data = json.dumps( { ""value"": str(value), ""obj"": str(obj), } ) return TemplateResponse( request, self.popup_response_template or [ ""admin/%s/%s/popup_response.html"" % (opts.app_label, opts.model_name), ""admin/%s/popup_response.html"" % opts.app_label, ""admin/popup_response.html"", ], { ""popup_response_data"": popup_response_data, }, ) elif ""_continue"" in request.POST or ( # Redirecting after ""Save as new"". ""_saveasnew"" in request.POST and self.save_as_continue and self.has_change_permission(request, obj) ): msg = _(""The {name} “{obj}” was added successfully."") if self.has_change_permission(request, obj): msg += "" "" + _(""You may edit it again below."") self.message_user(request, format_html(msg, **msg_dict), messages.SUCCESS) if post_url_continue is None: post_url_continue = obj_url post_url_continue = add_preserved_filters( {""preserved_filters"": preserved_filters, ""opts"": opts}, post_url_continue, ) return HttpResponseRedirect(post_url_continue) elif ""_addanother"" in request.POST: msg = format_html( _( ""The {name} “{obj}” was added successfully. You may add another {name} below."" ), **msg_dict, ) self.message_user(request, msg, messages.SUCCESS) redirect_url = request.path redirect_url = add_preserved_filters( {""preserved_filters"": preserved_filters, ""opts"": opts}, redirect_url ) return HttpResponseRedirect(redirect_url) else: msg = format_html( _(""The {name} “{obj}” was added successfully.""), **msg_dict ) self.message_user(request, msg, messages.SUCCESS) return self.response_post_save_add(request, obj) "," Determine the HttpResponse for the add_view stage. ",7,248,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def response_add(self, request, obj, post_url_continue=None): opts = obj._meta preserved_filters = self.get_preserved_filters(request) obj_url = reverse( ""admin:%s_%s_change"" % (opts.app_label, opts.model_name), args=(quote(obj.pk),), current_app=self.admin_site.name, ) # Add a link to the object's change form if the user can edit the obj. if self.has_change_permission(request, obj): obj_repr = format_html('{}', urlquote(obj_url), obj) else: obj_repr = str(obj) msg_dict = { ""name"": opts.verbose_name, ""obj"": obj_repr, } # Here, we distinguish between different save types by checking for # the presence of keys in request.POST. if IS_POPUP_VAR in request.POST: to_field = request.POST.get(TO_FIELD_VAR) if to_field: attr = str(to_field) else: attr = obj._meta.pk.attname value = obj.serializable_value(attr) popup_response_data = json.dumps( { ""value"": str(value), ""obj"": str(obj), } ) return TemplateResponse( request, self.popup_response_template or [ ""admin/%s/%s/popup_response.html"" % (opts.app_label, opts.model_name), ""admin/%s/popup_response.html"" % opts.app_label, ""admin/popup_response.html"", ], { ""popup_response_data"": popup_response_data, }, ) elif ""_continue"" in request.POST or ( # Redirecting after ""Save as new"". ""_saveasnew"" in request.POST and self.save_as_continue and self.has_change_permission(request, obj) ): msg = _(""The {name} “{obj}” was added successfully."") if self.has_change_permission(request, obj): msg += "" "" + _(""You may edit it again below."") self.message_user(request, format_html(msg, **msg_dict), messages.SUCCESS) if post_url_continue is None: post_url_continue = obj_url post_url_continue = add_preserved_filters( {""preserved_filters"": preserved_filters, ""opts"": opts}, post_url_continue, ) return HttpResponseRedirect(post_url_continue) elif ""_addanother"" in request.POST: msg = format_html( _( ""The {name} “{obj}” was added successfully. You may add another {name} below."" ), **msg_dict, ) self.message_user(request, msg, messages.SUCCESS) redirect_url = request.path redirect_url = add_preserved_filters( {""preserved_filters"": preserved_filters, ""opts"": opts}, redirect_url ) return HttpResponseRedirect(redirect_url) else: msg = format_html( _(""The {name} “{obj}” was added successfully.""), **msg_dict ) self.message_user(request, msg, messages.SUCCESS) return self.response_post_save_add(request, obj) ``` ###Assistant : Determine the HttpResponse for the add_view stage. " 2623,"def test_tika_parse_unreachable(self): html = '

    Some Text

    ' # Check if exception is raised when Tika cannot be reached. self.parser.tika_server = """" self.assertRaises(ParseError, self.parser.tika_parse, html) "," GIVEN: - Fresh start WHEN: - tika parsing is called but tika is not available THEN: - a ParseError Exception is thrown ",22,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_tika_parse_unreachable(self): html = '

    Some Text

    ' # Check if exception is raised when Tika cannot be reached. self.parser.tika_server = """" self.assertRaises(ParseError, self.parser.tika_parse, html) ``` ###Assistant : GIVEN: - Fresh start WHEN: - tika parsing is called but tika is not available THEN: - a ParseError Exception is thrown " 2624,"def test_read_config_file_2(): tpot_obj = TPOTRegressor() assert_raises(ValueError, tpot_obj._read_config_file, ""tests/test_config.py.bad"") ",Assert that _read_config_file rasies ValueError with wrong dictionary format,9,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_read_config_file_2(): tpot_obj = TPOTRegressor() assert_raises(ValueError, tpot_obj._read_config_file, ""tests/test_config.py.bad"") ``` ###Assistant : Assert that _read_config_file rasies ValueError with wrong dictionary format " 2625,"def pre_delete_handler(self, sender, instance, **kwargs): key = self.get_key_for_instance(instance) object_type = instance._meta.verbose_name # Delete an existing object logger.debug(f""[{self.branch}] Staging deletion of {object_type} {instance} (PK: {instance.pk})"") self.queue[key] = (ChangeActionChoices.ACTION_DELETE, None) "," Hooks to the pre_delete signal when a branch is active to queue delete actions. ",14,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def pre_delete_handler(self, sender, instance, **kwargs): key = self.get_key_for_instance(instance) object_type = instance._meta.verbose_name # Delete an existing object logger.debug(f""[{self.branch}] Staging deletion of {object_type} {instance} (PK: {instance.pk})"") self.queue[key] = (ChangeActionChoices.ACTION_DELETE, None) ``` ###Assistant : Hooks to the pre_delete signal when a branch is active to queue delete actions. " 2626,"def _reset_state(self): self.cache = {} self.resolved_nodes = 0 self.finished_last_inference = True # maps DAGNode uuid to unique instance of a gradio block self.node_to_block: Dict[DAGNode, Any] = {} # maps InputAttributeNodes to unique instance of interactive gradio block self.input_key_to_blocks: Dict[int, Any] = {} ",Resets state for each new RayServeHandle representing a new DAG.,10,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _reset_state(self): self.cache = {} self.resolved_nodes = 0 self.finished_last_inference = True # maps DAGNode uuid to unique instance of a gradio block self.node_to_block: Dict[DAGNode, Any] = {} # maps InputAttributeNodes to unique instance of interactive gradio block self.input_key_to_blocks: Dict[int, Any] = {} ``` ###Assistant : Resets state for each new RayServeHandle representing a new DAG. " 2627,"def find_element(self, selector): return self.driver.find_element(By.CSS_SELECTOR, selector) ","find_element returns the first found element by the css `selector` shortcut to `driver.find_element(By.CSS_SELECTOR, ...)`.",14,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_element(self, selector): return self.driver.find_element(By.CSS_SELECTOR, selector) ``` ###Assistant : find_element returns the first found element by the css `selector` shortcut to `driver.find_element(By.CSS_SELECTOR, ...)`. " 2628,"def test_bad_persist_value(self): with self.assertRaises(StreamlitAPIException) as e: ",Throw an error if an invalid value is passed to 'persist'.,11,6,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_bad_persist_value(self): with self.assertRaises(StreamlitAPIException) as e: ``` ###Assistant : Throw an error if an invalid value is passed to 'persist'. " 2629,"def _getDataFileTagsOptionHelp(): return % "", "".join( ""'%s' (%s)"" % d for d in data_files_tags ) data_file_tags_option = data_group.add_option( ""--data-file-tags"", action=""append"", dest=""data_file_tags"", metavar=""DATA_TAGS"", default=[], ) parser.add_option_group(data_group) execute_group = OptionGroup(parser, ""Immediate execution after compilation"") execute_group.add_option( ""--run"", action=""store_true"", dest=""immediate_execution"", default=is_nuitka_run, help= % (""on"" if is_nuitka_run else ""off""), ) execute_group.add_option( ""--debugger"", ""--gdb"", action=""store_true"", dest=""debugger"", default=False, help=, ) execute_group.add_option( ""--execute-with-pythonpath"", action=""store_true"", dest=""keep_pythonpath"", default=False, help=, ) parser.add_option_group(execute_group) dump_group = OptionGroup(parser, ""Dump options for internal tree"") dump_group.add_option( ""--xml"", action=""store_true"", dest=""dump_xml"", default=False, help=""Dump the final result of optimization as XML, then exit."", ) parser.add_option_group(dump_group) codegen_group = OptionGroup(parser, ""Code generation choices"") codegen_group.add_option( ""--disable-bytecode-cache"", action=""store_true"", dest=""disable_bytecode_cache"", default=False, help=, ) codegen_group.add_option( ""--full-compat"", action=""store_false"", dest=""improved"", default=True, help=, ) codegen_group.add_option( ""--file-reference-choice"", action=""store"", dest=""file_reference_mode"", metavar=""MODE"", choices=(""original"", ""runtime"", ""frozen""), default=None, help=, ) codegen_group.add_option( ""--module-name-choice"", action=""store"", dest=""module_name_mode"", metavar=""MODE"", choices=(""original"", ""runtime""), default=None, help=, ) parser.add_option_group(codegen_group) output_group = OptionGroup(parser, ""Output choices"") output_group.add_option( ""-o"", action=""store"", dest=""output_filename"", metavar=""FILENAME"", default=None, help= % """" + ("".exe"" if getOS() == ""Windows"" else "".bin""), ) output_group.add_option( ""--output-dir"", action=""store"", dest=""output_dir"", metavar=""DIRECTORY"", default="""", help=, ) output_group.add_option( ""--remove-output"", action=""store_true"", dest=""remove_build"", default=False, help=, ) output_group.add_option( ""--no-pyi-file"", action=""store_false"", dest=""pyi_file"", default=True, help=, ) parser.add_option_group(output_group) debug_group = OptionGroup(parser, ""Debug features"") debug_group.add_option( ""--debug"", action=""store_true"", dest=""debug"", default=False, help=, ) debug_group.add_option( ""--unstripped"", action=""store_true"", dest=""unstripped"", default=False, help=, ) debug_group.add_option( ""--profile"", action=""store_true"", dest=""profile"", default=False, help=, ) debug_group.add_option( ""--internal-graph"", action=""store_true"", dest=""graph"", default=False, help=, ) debug_group.add_option( ""--trace-execution"", action=""store_true"", dest=""trace_execution"", default=False, help=, ) debug_group.add_option( ""--recompile-c-only"", action=""store_true"", dest=""recompile_c_only"", default=False, help=, ) debug_group.add_option( ""--generate-c-only"", action=""store_true"", dest=""generate_c_only"", default=False, help=, ) debug_group.add_option( ""--experimental"", action=""append"", dest=""experimental"", metavar=""FLAG"", default=[], help=, ) debug_group.add_option( ""--explain-imports"", action=""store_true"", dest=""explain_imports"", default=False, help=SUPPRESS_HELP, ) debug_group.add_option( ""--low-memory"", action=""store_true"", dest=""low_memory"", default=False, help=, ) if os.name == ""nt"": debug_group.add_option( ""--disable-dll-dependency-cache"", action=""store_true"", dest=""no_dependency_cache"", default=False, help=, ) debug_group.add_option( ""--force-dll-dependency-cache-update"", action=""store_true"", dest=""update_dependency_cache"", default=False, help=, ) # This is for testing framework, ""coverage.py"" hates to loose the process. And # we can use it to make sure it's not done unknowingly. parser.add_option( ""--must-not-re-execute"", action=""store_false"", dest=""allow_reexecute"", default=True, help=SUPPRESS_HELP, ) parser.add_option_group(debug_group) c_compiler_group = OptionGroup(parser, ""Backend C compiler choice"") c_compiler_group.add_option( ""--clang"", action=""store_true"", dest=""clang"", default=False, help=, ) c_compiler_group.add_option( ""--mingw64"", action=""store_true"", dest=""mingw64"", default=False, help=, ) c_compiler_group.add_option( ""--msvc"", action=""store"", dest=""msvc_version"", default=None, help=, ) c_compiler_group.add_option( ""-j"", ""--jobs"", action=""store"", dest=""jobs"", metavar=""N"", default=None, help=, ) c_compiler_group.add_option( ""--lto"", action=""store"", dest=""lto"", metavar=""choice"", default=""auto"", choices=(""yes"", ""no"", ""auto""), help=, ) c_compiler_group.add_option( ""--static-libpython"", action=""store"", dest=""static_libpython"", metavar=""choice"", default=""auto"", choices=(""yes"", ""no"", ""auto""), help=, ) c_compiler_group.add_option( ""--disable-ccache"", action=""store_true"", dest=""disable_ccache"", default=False, help=, ) parser.add_option_group(c_compiler_group) pgo_group = OptionGroup(parser, ""PGO compilation choices"") pgo_group.add_option( ""--pgo"", action=""store_true"", dest=""is_c_pgo"", default=False, help=, ) pgo_group.add_option( ""--pgo-python"", action=""store_true"", dest=""is_python_pgo"", default=False, help=SUPPRESS_HELP, ) pgo_group.add_option( ""--pgo-python-input"", action=""store"", dest=""python_pgo_input"", default=None, help=SUPPRESS_HELP, ) pgo_group.add_option( ""--pgo-python-policy-unused-module"", action=""store"", dest=""python_pgo_policy_unused_module"", choices=(""include"", ""exclude"", ""bytecode""), default=""include"", help=SUPPRESS_HELP, ) pgo_group.add_option( ""--pgo-args"", action=""store"", dest=""pgo_args"", default="""", help=, ) pgo_group.add_option( ""--pgo-executable"", action=""store"", dest=""pgo_executable"", default=None, help=, ) parser.add_option_group(pgo_group) tracing_group = OptionGroup(parser, ""Tracing features"") tracing_group.add_option( ""--quiet"", action=""store_true"", dest=""quiet"", default=False, help=, ) tracing_group.add_option( ""--show-scons"", action=""store_true"", dest=""show_scons"", default=False, help=, ) tracing_group.add_option( ""--show-progress"", action=""store_true"", dest=""show_progress"", default=False, help=, ) tracing_group.add_option( ""--no-progressbar"", action=""store_false"", dest=""progress_bar"", default=True, help=, ) tracing_group.add_option( ""--show-memory"", action=""store_true"", dest=""show_memory"", default=False, help=, ) tracing_group.add_option( ""--show-modules"", action=""store_true"", dest=""show_inclusion"", default=False, help=, ) tracing_group.add_option( ""--show-modules-output"", action=""store"", dest=""show_inclusion_output"", metavar=""PATH"", default=None, help=, ) tracing_group.add_option( ""--report"", action=""store"", dest=""compilation_report_filename"", default=None, help=, ) tracing_group.add_option( ""--verbose"", action=""store_true"", dest=""verbose"", default=False, help=, ) tracing_group.add_option( ""--verbose-output"", action=""store"", dest=""verbose_output"", metavar=""PATH"", default=None, help=, ) parser.add_option_group(tracing_group) windows_group = OptionGroup(parser, ""Windows specific controls"") windows_group.add_option( ""--windows-dependency-tool"", action=""store"", dest=""dependency_tool"", default=None, help=SUPPRESS_HELP, ) windows_group.add_option( ""--windows-disable-console"", action=""store_true"", dest=""disable_console"", default=False, help=, ) windows_group.add_option( ""--windows-icon-from-ico"", action=""append"", dest=""icon_path"", metavar=""ICON_PATH"", default=[], help=, ) windows_group.add_option( ""--windows-icon-from-exe"", action=""store"", dest=""icon_exe_path"", metavar=""ICON_EXE_PATH"", default=None, help=""Copy executable icons from this existing executable (Windows only)."", ) windows_group.add_option( ""--onefile-windows-splash-screen-image"", action=""store"", dest=""splash_screen_image"", default=None, help=, ) windows_group.add_option( ""--windows-uac-admin"", action=""store_true"", dest=""windows_uac_admin"", metavar=""WINDOWS_UAC_ADMIN"", default=False, help=""Request Windows User Control, to grant admin rights on execution. (Windows only). Defaults to off."", ) windows_group.add_option( ""--windows-uac-uiaccess"", action=""store_true"", dest=""windows_uac_uiaccess"", metavar=""WINDOWS_UAC_UIACCESS"", default=False, help=, ) windows_group.add_option( ""--windows-company-name"", action=""store"", dest=""windows_company_name"", metavar=""WINDOWS_COMPANY_NAME"", default=None, help=, ) windows_group.add_option( ""--windows-product-name"", action=""store"", dest=""windows_product_name"", metavar=""WINDOWS_PRODUCT_NAME"", default=None, help=, ) windows_group.add_option( ""--windows-file-version"", action=""store"", dest=""windows_file_version"", metavar=""WINDOWS_FILE_VERSION"", default=None, help=, ) windows_group.add_option( ""--windows-product-version"", action=""store"", dest=""windows_product_version"", metavar=""WINDOWS_PRODUCT_VERSION"", default=None, help=, ) windows_group.add_option( ""--windows-file-description"", action=""store"", dest=""windows_file_description"", metavar=""WINDOWS_FILE_DESCRIPTION"", default=None, help=, ) windows_group.add_option( ""--windows-onefile-tempdir"", ""--onefile-tempdir"", action=""store_true"", dest=""is_onefile_tempdir"", metavar=""ONEFILE_TEMPDIR"", default=False, help=SUPPRESS_HELP, ) windows_group.add_option( ""--windows-onefile-tempdir-spec"", ""--onefile-tempdir-spec"", action=""store"", dest=""onefile_tempdir_spec"", metavar=""ONEFILE_TEMPDIR_SPEC"", default=None, help=, ) windows_group.add_option( ""--windows-force-stdout-spec"", action=""store"", dest=""force_stdout_spec"", metavar=""WINDOWS_FORCE_STDOUT_SPEC"", default=None, help=, ) windows_group.add_option( ""--windows-force-stderr-spec"", action=""store"", dest=""force_stderr_spec"", metavar=""WINDOWS_FORCE_STDERR_SPEC"", default=None, help=, ) parser.add_option_group(windows_group) macos_group = OptionGroup(parser, ""macOS specific controls"") macos_group.add_option( ""--macos-target-arch"", action=""store"", dest=""macos_target_arch"", choices=(""universal"", ""arm64"", ""x86_64""), metavar=""MACOS_TARGET_ARCH"", default=None, help=, ) macos_group.add_option( ""--macos-disable-console"", ""--disable-console"", action=""store_true"", dest=""disable_console"", default=False, help=, ) macos_group.add_option( ""--macos-create-app-bundle"", action=""store_true"", dest=""macos_create_bundle"", default=False, help=, ) macos_group.add_option( ""--macos-onefile-icon"", action=""append"", dest=""icon_path"", metavar=""ICON_PATH"", default=[], help=""Add executable icon for binary to use. Can be given only one time. Defaults to Python icon if available."", ) macos_group.add_option( ""--macos-signed-app-name"", action=""store"", dest=""macos_signed_app_name"", metavar=""MACOS_SIGNED_APP_NAME"", default=None, help=, ) macos_group.add_option( ""--macos-app-name"", action=""store"", dest=""macos_app_name"", metavar=""MACOS_APP_NAME"", default=None, help=, ) macos_group.add_option( ""--macos-app-version"", action=""store"", dest=""macos_app_version"", metavar=""MACOS_APP_VERSION"", default=None, help=, ) parser.add_option_group(macos_group) linux_group = OptionGroup(parser, ""Linux specific controls"") linux_group.add_option( ""--linux-onefile-icon"", action=""append"", dest=""icon_path"", metavar=""ICON_PATH"", default=[], help=""Add executable icon for onefile binary to use. Can be given only one time. Defaults to Python icon if available."", ) linux_group.add_option( ""--linux-onefile-compression"", action=""store"", dest=""appimage_compression"", choices=(""gzip"", ""xz""), metavar=""COMPRESSION"", default=""gzip"", help=""Compression method to use for Linux onefile builds. Defaults to gzip for faster decompression"", ) parser.add_option_group(linux_group) plugin_group = OptionGroup(parser, ""Plugin control"") plugin_group.add_option( ""--enable-plugin"", ""--plugin-enable"", action=""append"", dest=""plugins_enabled"", metavar=""PLUGIN_NAME"", default=[], help=, ) plugin_group.add_option( ""--disable-plugin"", ""--plugin-disable"", action=""append"", dest=""plugins_disabled"", metavar=""PLUGIN_NAME"", default=[], help=, ) plugin_group.add_option( ""--plugin-no-detection"", action=""store_false"", dest=""detect_missing_plugins"", default=True, help=, ) plugin_group.add_option( ""--plugin-list"", action=""store_true"", dest=""list_plugins"", default=False, help=, ) parser.add_option_group(plugin_group) plugin_group.add_option( ""--user-plugin"", action=""append"", dest=""user_plugins"", metavar=""PATH"", default=[], help=""The file name of user plugin. Can be given multiple times. Default empty."", ) plugin_group.add_option( ""--persist-source-changes"", action=""store_true"", dest=""persist_source_changes"", default=False, help=, ) ","\ For included data files, special handlings can be chosen. With the commercial plugins, e.g. files can be included directly in the binary. The list is completed by some plugins. With the current list of plugins, these are available: %s. The default is empty.\ Execute immediately the created binary (or import the compiled module). Defaults to %s.\ Execute inside a debugger, e.g. ""gdb"" or ""lldb"" to automatically get a stack trace. Defaults to off.\ When immediately executing the created binary (--execute), don't reset PYTHONPATH. When all modules are successfully included, you ought to not need PYTHONPATH anymore.\ Do not reuse dependency analysis results for modules, esp. from standard library, that are included as bytecode.\ Enforce absolute compatibility with CPython. Do not even allow minor deviations from CPython behavior, e.g. not having better tracebacks or exception messages which are not really incompatible, but only different. This is intended for tests only and should not be used for normal use.\ Select what value ""__file__"" is going to be. With ""runtime"" (default for standalone binary mode and module mode), the created binaries and modules, use the location of themselves to deduct the value of ""__file__"". Included packages pretend to be in directories below that location. This allows you to include data files in deployments. If you merely seek acceleration, it's better for you to use the ""original"" value, where the source files location will be used. With ""frozen"" a notation """" is used. For compatibility reasons, the ""__file__"" value will always have "".py"" suffix independent of what it really is.\ Select what value ""__name__"" and ""__package__"" are going to be. With ""runtime"" (default for module mode), the created module, it uses the parent package to deduct the value of ""__package__"", to be fully compatible. This allows for more optimization to happen, but normally any package can be loaded into another one, but this will raise an import error when it detects that with ""original"" mode.\ Specify how the executable should be named. For extension modules there is no choice, also not for standalone mode and using it will be an error. This may include path information that needs to exist though. Defaults to '%s' on this platform. \ Specify where intermediate and final output files should be put. The DIRECTORY will be populated with C files, object files, etc. Defaults to current directory. \ Removes the build directory after producing the module or exe file. Defaults to off.\ Do not create a "".pyi"" file for extension modules created by Nuitka. This is used to detect implicit imports. Defaults to off.\ Executing all self checks possible to find errors in Nuitka, do not use for production. Defaults to off.\ Keep debug info in the resulting object file for better debugger interaction. Defaults to off.\ Enable vmprof based profiling of time spent. Not working currently. Defaults to off.\ Create graph of optimization process internals, do not use for whole programs, but only for small test cases. Defaults to off.\ Traced execution output, output the line of code before executing it. Defaults to off.\ This is not incremental compilation, but for Nuitka development only. Takes existing files and simply compile them as C again. Allows compiling edited C files for quick debugging changes to the generated source, e.g. to see if code is passed by, values output, etc, Defaults to off. Depends on compiling Python source to determine which files it should look at.\ Generate only C source code, and do not compile it to binary or module. This is for debugging and code coverage analysis that doesn't waste CPU. Defaults to off. Do not think you can use this directly.\ Use features declared as 'experimental'. May have no effect if no experimental features are present in the code. Uses secret tags (check source) per experimented feature.\ Attempt to use less memory, by forking less C compilation jobs and using options that use less memory. For use on embedded machines. Use this in case of out of memory problems. Defaults to off.\ Disable the dependency walker cache. Will result in much longer times to create the distribution folder, but might be used in case the cache is suspect to cause errors. \ For an update of the dependency walker cache. Will result in much longer times to create the distribution folder, but might be used in case the cache is suspect to cause errors or known to need an update. \ Enforce the use of clang. On Windows this requires a working Visual Studio version to piggy back on. Defaults to off.\ Enforce the use of MinGW64 on Windows. Defaults to off.\ Enforce the use of specific MSVC version on Windows. Allowed values are e.g. ""14.3"" (MSVC 2022) and other MSVC version numbers, specify ""list"" for a list of installed compilers, or use ""latest"". Defaults to latest MSVC being used if installed, otherwise MinGW64 is used.\ Specify the allowed number of parallel C compiler jobs. Defaults to the system CPU count.\ Use link time optimizations (MSVC, gcc, clang). Allowed values are ""yes"", ""no"", and ""auto"" (when it's known to work). Defaults to ""auto"".\ Use static link library of Python. Allowed values are ""yes"", ""no"", and ""auto"" (when it's known to work). Defaults to ""auto"".\ Do not attempt to use ccache (gcc, clang, etc.) or clcache (MSVC, clangcl).\ Enables C level profile guided optimization (PGO), by executing a dedicated build first for a profiling run, and then using the result to feedback into the C compilation. Note: This is experimental and not working with standalone modes of Nuitka yet. Defaults to off.\ Arguments to be passed in case of profile guided optimization. These are passed to the special built executable during the PGO profiling run. Default empty.\ Command to execute when collecting profile information. Use this only, if you need to launch it through a script that prepares it to run. Default use created program.\ Disable all information outputs, but show warnings. Defaults to off.\ Operate Scons in non-quiet mode, showing the executed commands. Defaults to off.Provide progress information and statistics. Defaults to off.Disable progress bar outputs (if tqdm is installed). Defaults to off.Provide memory information and statistics. Defaults to off.\ Provide information for included modules and DLLs Defaults to off.\ Where to output --show-modules, should be a filename. Default is standard output.\ Report module inclusion in an XML output file. Default is off.\ Output details of actions taken, esp. in optimizations. Can become a lot. Defaults to off.\ Where to output --verbose, should be a filename. Default is standard output.\ When compiling for Windows, disable the console window. Defaults to off.\ Add executable icon. Can be given multiple times for different resolutions or files with multiple icons inside. In the later case, you may also suffix with # where n is an integer index starting from 1, specifying a specific icon to be included, and all others to be ignored.\ When compiling for Windows and onefile, show this while loading the application. Defaults to off.\ Request Windows User Control, to enforce running from a few folders only, remote desktop access. (Windows only). Defaults to off.\ Name of the company to use in Windows Version information. One of file or product version is required, when a version resource needs to be added, e.g. to specify product name, or company name. Defaults to unused.\ Name of the product to use in Windows Version information. Defaults to base filename of the binary.\ File version to use in Windows Version information. Must be a sequence of up to 4 numbers, e.g. 1.0.0.0, only this format is allowed. One of file or product version is required, when a version resource needs to be added, e.g. to specify product name, or company name. Defaults to unused.\ Product version to use in Windows Version information. Must be a sequence of up to 4 numbers, e.g. 1.0.0.0, only this format is allowed. One of file or product version is required, when a version resource needs to be added, e.g. to specify product name, or company name. Defaults to unused.\ Description of the file use in Windows Version information. One of file or product version is required, when a version resource needs to be added, e.g. to specify product name, or company name. Defaults to nonsense.\ Use this as a temporary folder. Defaults to '%TEMP%\\onefile_%PID%_%TIME%', i.e. system temporary directory.\ Force standard output of the program to go to this location. Useful for programs with disabled console and programs using the Windows Services Plugin of Nuitka. Defaults to not active, use e.g. '%PROGRAM%.out.txt', i.e. file near your program.\ Force standard error of the program to go to this location. Useful for programs with disabled console and programs using the Windows Services Plugin of Nuitka. Defaults to not active, use e.g. '%PROGRAM%.err.txt', i.e. file near your program.\ What architectures is this to supposed to run on. Default and limit is what the running Python allows for. Default is ""native"" which is the architecture the Python is run with.\ When compiling for macOS, disable the console window and create a GUI application. Defaults to off.\ When compiling for macOS, create a bundle rather than a plain binary application. Currently experimental and incomplete. Currently this is the only way to unlock disabling of console.Defaults to off.\ Name of the application to use for macOS signing. Follow com.yourcompany.appname naming results for best results, as these have to be globally unique, and will grant protected API accesses.\ Name of the product to use in macOS bundle information. Defaults to base filename of the binary.\ Product version to use in macOS bundle information. Defaults to 1.0 if not given.\ Enabled plugins. Must be plug-in names. Use --plugin-list to query the full list and exit. Default empty.\ Disabled plugins. Must be plug-in names. Use --plugin-list to query the full list and exit. Default empty.\ Plugins can detect if they might be used, and the you can disable the warning via ""--disable-plugin=plugin-that-warned"", or you can use this option to disable the mechanism entirely, which also speeds up compilation slightly of course as this detection code is run in vain once you are certain of which plugins to use. Defaults to off.\ Show list of all available plugins and exit. Defaults to off.\ Write source changes to original Python files. Use with care. May need permissions, best for use in a virtualenv to debug if plugin code changes work with standard Python or to benefit from bloat removal even with pure Python. Default False.",1740,859,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _getDataFileTagsOptionHelp(): return % "", "".join( ""'%s' (%s)"" % d for d in data_files_tags ) data_file_tags_option = data_group.add_option( ""--data-file-tags"", action=""append"", dest=""data_file_tags"", metavar=""DATA_TAGS"", default=[], ) parser.add_option_group(data_group) execute_group = OptionGroup(parser, ""Immediate execution after compilation"") execute_group.add_option( ""--run"", action=""store_true"", dest=""immediate_execution"", default=is_nuitka_run, help= % (""on"" if is_nuitka_run else ""off""), ) execute_group.add_option( ""--debugger"", ""--gdb"", action=""store_true"", dest=""debugger"", default=False, help=, ) execute_group.add_option( ""--execute-with-pythonpath"", action=""store_true"", dest=""keep_pythonpath"", default=False, help=, ) parser.add_option_group(execute_group) dump_group = OptionGroup(parser, ""Dump options for internal tree"") dump_group.add_option( ""--xml"", action=""store_true"", dest=""dump_xml"", default=False, help=""Dump the final result of optimization as XML, then exit."", ) parser.add_option_group(dump_group) codegen_group = OptionGroup(parser, ""Code generation choices"") codegen_group.add_option( ""--disable-bytecode-cache"", action=""store_true"", dest=""disable_bytecode_cache"", default=False, help=, ) codegen_group.add_option( ""--full-compat"", action=""store_false"", dest=""improved"", default=True, help=, ) codegen_group.add_option( ""--file-reference-choice"", action=""store"", dest=""file_reference_mode"", metavar=""MODE"", choices=(""original"", ""runtime"", ""frozen""), default=None, help=, ) codegen_group.add_option( ""--module-name-choice"", action=""store"", dest=""module_name_mode"", metavar=""MODE"", choices=(""original"", ""runtime""), default=None, help=, ) parser.add_option_group(codegen_group) output_group = OptionGroup(parser, ""Output choices"") output_group.add_option( ""-o"", action=""store"", dest=""output_filename"", metavar=""FILENAME"", default=None, help= % """" + ("".exe"" if getOS() == ""Windows"" else "".bin""), ) output_group.add_option( ""--output-dir"", action=""store"", dest=""output_dir"", metavar=""DIRECTORY"", default="""", help=, ) output_group.add_option( ""--remove-output"", action=""store_true"", dest=""remove_build"", default=False, help=, ) output_group.add_option( ""--no-pyi-file"", action=""store_false"", dest=""pyi_file"", default=True, help=, ) parser.add_option_group(output_group) debug_group = OptionGroup(parser, ""Debug features"") debug_group.add_option( ""--debug"", action=""store_true"", dest=""debug"", default=False, help=, ) debug_group.add_option( ""--unstripped"", action=""store_true"", dest=""unstripped"", default=False, help=, ) debug_group.add_option( ""--profile"", action=""store_true"", dest=""profile"", default=False, help=, ) debug_group.add_option( ""--internal-graph"", action=""store_true"", dest=""graph"", default=False, help=, ) debug_group.add_option( ""--trace-execution"", action=""store_true"", dest=""trace_execution"", default=False, help=, ) debug_group.add_option( ""--recompile-c-only"", action=""store_true"", dest=""recompile_c_only"", default=False, help=, ) debug_group.add_option( ""--generate-c-only"", action=""store_true"", dest=""generate_c_only"", default=False, help=, ) debug_group.add_option( ""--experimental"", action=""append"", dest=""experimental"", metavar=""FLAG"", default=[], help=, ) debug_group.add_option( ""--explain-imports"", action=""store_true"", dest=""explain_imports"", default=False, help=SUPPRESS_HELP, ) debug_group.add_option( ""--low-memory"", action=""store_true"", dest=""low_memory"", default=False, help=, ) if os.name == ""nt"": debug_group.add_option( ""--disable-dll-dependency-cache"", action=""store_true"", dest=""no_dependency_cache"", default=False, help=, ) debug_group.add_option( ""--force-dll-dependency-cache-update"", action=""store_true"", dest=""update_dependency_cache"", default=False, help=, ) # This is for testing framework, ""coverage.py"" hates to loose the process. And # we can use it to make sure it's not done unknowingly. parser.add_option( ""--must-not-re-execute"", action=""store_false"", dest=""allow_reexecute"", default=True, help=SUPPRESS_HELP, ) parser.add_option_group(debug_group) c_compiler_group = OptionGroup(parser, ""Backend C compiler choice"") c_compiler_group.add_option( ""--clang"", action=""store_true"", dest=""clang"", default=False, help=, ) c_compiler_group.add_option( ""--mingw64"", action=""store_true"", dest=""mingw64"", default=False, help=, ) c_compiler_group.add_option( ""--msvc"", action=""store"", dest=""msvc_version"", default=None, help=, ) c_compiler_group.add_option( ""-j"", ""--jobs"", action=""store"", dest=""jobs"", metavar=""N"", default=None, help=, ) c_compiler_group.add_option( ""--lto"", action=""store"", dest=""lto"", metavar=""choice"", default=""auto"", choices=(""yes"", ""no"", ""auto""), help=, ) c_compiler_group.add_option( ""--static-libpython"", action=""store"", dest=""static_libpython"", metavar=""choice"", default=""auto"", choices=(""yes"", ""no"", ""auto""), help=, ) c_compiler_group.add_option( ""--disable-ccache"", action=""store_true"", dest=""disable_ccache"", default=False, help=, ) parser.add_option_group(c_compiler_group) pgo_group = OptionGroup(parser, ""PGO compilation choices"") pgo_group.add_option( ""--pgo"", action=""store_true"", dest=""is_c_pgo"", default=False, help=, ) pgo_group.add_option( ""--pgo-python"", action=""store_true"", dest=""is_python_pgo"", default=False, help=SUPPRESS_HELP, ) pgo_group.add_option( ""--pgo-python-input"", action=""store"", dest=""python_pgo_input"", default=None, help=SUPPRESS_HELP, ) pgo_group.add_option( ""--pgo-python-policy-unused-module"", action=""store"", dest=""python_pgo_policy_unused_module"", choices=(""include"", ""exclude"", ""bytecode""), default=""include"", help=SUPPRESS_HELP, ) pgo_group.add_option( ""--pgo-args"", action=""store"", dest=""pgo_args"", default="""", help=, ) pgo_group.add_option( ""--pgo-executable"", action=""store"", dest=""pgo_executable"", default=None, help=, ) parser.add_option_group(pgo_group) tracing_group = OptionGroup(parser, ""Tracing features"") tracing_group.add_option( ""--quiet"", action=""store_true"", dest=""quiet"", default=False, help=, ) tracing_group.add_option( ""--show-scons"", action=""store_true"", dest=""show_scons"", default=False, help=, ) tracing_group.add_option( ""--show-progress"", action=""store_true"", dest=""show_progress"", default=False, help=, ) tracing_group.add_option( ""--no-progressbar"", action=""store_false"", dest=""progress_bar"", default=True, help=, ) tracing_group.add_option( ""--show-memory"", action=""store_true"", dest=""show_memory"", default=False, help=, ) tracing_group.add_option( ""--show-modules"", action=""store_true"", dest=""show_inclusion"", default=False, help=, ) tracing_group.add_option( ""--show-modules-output"", action=""store"", dest=""show_inclusion_output"", metavar=""PATH"", default=None, help=, ) tracing_group.add_option( ""--report"", action=""store"", dest=""compilation_report_filename"", default=None, help=, ) tracing_group.add_option( ""--verbose"", action=""store_true"", dest=""verbose"", default=False, help=, ) tracing_group.add_option( ""--verbose-output"", action=""store"", dest=""verbose_output"", metavar=""PATH"", default=None, help=, ) parser.add_option_group(tracing_group) windows_group = OptionGroup(parser, ""Windows specific controls"") windows_group.add_option( ""--windows-dependency-tool"", action=""store"", dest=""dependency_tool"", default=None, help=SUPPRESS_HELP, ) windows_group.add_option( ""--windows-disable-console"", action=""store_true"", dest=""disable_console"", default=False, help=, ) windows_group.add_option( ""--windows-icon-from-ico"", action=""append"", dest=""icon_path"", metavar=""ICON_PATH"", default=[], help=, ) windows_group.add_option( ""--windows-icon-from-exe"", action=""store"", dest=""icon_exe_path"", metavar=""ICON_EXE_PATH"", default=None, help=""Copy executable icons from this existing executable (Windows only)."", ) windows_group.add_option( ""--onefile-windows-splash-screen-image"", action=""store"", dest=""splash_screen_image"", default=None, help=, ) windows_group.add_option( ""--windows-uac-admin"", action=""store_true"", dest=""windows_uac_admin"", metavar=""WINDOWS_UAC_ADMIN"", default=False, help=""Request Windows User Control, to grant admin rights on execution. (Windows only). Defaults to off."", ) windows_group.add_option( ""--windows-uac-uiaccess"", action=""store_true"", dest=""windows_uac_uiaccess"", metavar=""WINDOWS_UAC_UIACCESS"", default=False, help=, ) windows_group.add_option( ""--windows-company-name"", action=""store"", dest=""windows_company_name"", metavar=""WINDOWS_COMPANY_NAME"", default=None, help=, ) windows_group.add_option( ""--windows-product-name"", action=""store"", dest=""windows_product_name"", metavar=""WINDOWS_PRODUCT_NAME"", default=None, help=, ) windows_group.add_option( ""--windows-file-version"", action=""store"", dest=""windows_file_version"", metavar=""WINDOWS_FILE_VERSION"", default=None, help=, ) windows_group.add_option( ""--windows-product-version"", action=""store"", dest=""windows_product_version"", metavar=""WINDOWS_PRODUCT_VERSION"", default=None, help=, ) windows_group.add_option( ""--windows-file-description"", action=""store"", dest=""windows_file_description"", metavar=""WINDOWS_FILE_DESCRIPTION"", default=None, help=, ) windows_group.add_option( ""--windows-onefile-tempdir"", ""--onefile-tempdir"", action=""store_true"", dest=""is_onefile_tempdir"", metavar=""ONEFILE_TEMPDIR"", default=False, help=SUPPRESS_HELP, ) windows_group.add_option( ""--windows-onefile-tempdir-spec"", ""--onefile-tempdir-spec"", action=""store"", dest=""onefile_tempdir_spec"", metavar=""ONEFILE_TEMPDIR_SPEC"", default=None, help=, ) windows_group.add_option( ""--windows-force-stdout-spec"", action=""store"", dest=""force_stdout_spec"", metavar=""WINDOWS_FORCE_STDOUT_SPEC"", default=None, help=, ) windows_group.add_option( ""--windows-force-stderr-spec"", action=""store"", dest=""force_stderr_spec"", metavar=""WINDOWS_FORCE_STDERR_SPEC"", default=None, help=, ) parser.add_option_group(windows_group) macos_group = OptionGroup(parser, ""macOS specific controls"") macos_group.add_option( ""--macos-target-arch"", action=""store"", dest=""macos_target_arch"", choices=(""universal"", ""arm64"", ""x86_64""), metavar=""MACOS_TARGET_ARCH"", default=None, help=, ) macos_group.add_option( ""--macos-disable-console"", ""--disable-console"", action=""store_true"", dest=""disable_console"", default=False, help=, ) macos_group.add_option( ""--macos-create-app-bundle"", action=""store_true"", dest=""macos_create_bundle"", default=False, help=, ) macos_group.add_option( ""--macos-onefile-icon"", action=""append"", dest=""icon_path"", metavar=""ICON_PATH"", default=[], help=""Add executable icon for binary to use. Can be given only one time. Defaults to Python icon if available."", ) macos_group.add_option( ""--macos-signed-app-name"", action=""store"", dest=""macos_signed_app_name"", metavar=""MACOS_SIGNED_APP_NAME"", default=None, help=, ) macos_group.add_option( ""--macos-app-name"", action=""store"", dest=""macos_app_name"", metavar=""MACOS_APP_NAME"", default=None, help=, ) macos_group.add_option( ""--macos-app-version"", action=""store"", dest=""macos_app_version"", metavar=""MACOS_APP_VERSION"", default=None, help=, ) parser.add_option_group(macos_group) linux_group = OptionGroup(parser, ""Linux specific controls"") linux_group.add_option( ""--linux-onefile-icon"", action=""append"", dest=""icon_path"", metavar=""ICON_PATH"", default=[], help=""Add executable icon for onefile binary to use. Can be given only one time. Defaults to Python icon if available."", ) linux_group.add_option( ""--linux-onefile-compression"", action=""store"", dest=""appimage_compression"", choices=(""gzip"", ""xz""), metavar=""COMPRESSION"", default=""gzip"", help=""Compression method to use for Linux onefile builds. Defaults to gzip for faster decompression"", ) parser.add_option_group(linux_group) plugin_group = OptionGroup(parser, ""Plugin control"") plugin_group.add_option( ""--enable-plugin"", ""--plugin-enable"", action=""append"", dest=""plugins_enabled"", metavar=""PLUGIN_NAME"", default=[], help=, ) plugin_group.add_option( ""--disable-plugin"", ""--plugin-disable"", action=""append"", dest=""plugins_disabled"", metavar=""PLUGIN_NAME"", default=[], help=, ) plugin_group.add_option( ""--plugin-no-detection"", action=""store_false"", dest=""detect_missing_plugins"", default=True, help=, ) plugin_group.add_option( ""--plugin-list"", action=""store_true"", dest=""list_plugins"", default=False, help=, ) parser.add_option_group(plugin_group) plugin_group.add_option( ""--user-plugin"", action=""append"", dest=""user_plugins"", metavar=""PATH"", default=[], help=""The file name of user plugin. Can be given multiple times. Default empty."", ) plugin_group.add_option( ""--persist-source-changes"", action=""store_true"", dest=""persist_source_changes"", default=False, help=, ) ``` ###Assistant : \ For included data files, special handlings can be chosen. With the commercial plugins, e.g. files can be included directly in the binary. The list is completed by some plugins. With the current list of plugins, these are available: %s. The default is empty.\ Execute immediately the created binary (or import the compiled module). Defaults to %s.\ Execute inside a debugger, e.g. ""gdb"" or ""lldb"" to automatically get a stack trace. Defaults to off.\ When immediately executing the created binary (--execute), don't reset PYTHONPATH. When all modules are successfully included, you ought to not need PYTHONPATH anymore.\ Do not reuse dependency analysis results for modules, esp. from standard library, that are included as bytecode.\ Enforce absolute compatibility with CPython. Do not even allow minor deviations from CPython behavior, e.g. not having better tracebacks or exception messages which are not really incompatible, but only different. This is intended for tests only and should not be used for normal use.\ Select what value ""__file__"" is going to be. With ""runtime"" (default for standalone binary mode and module mode), the created binaries and modules, use the location of themselves to deduct the value of ""__file__"". Included packages pretend to be in directories below that location. This allows you to include data files in deployments. If you merely seek acceleration, it's better for you to use the ""original"" value, where the source files location will be used. With ""frozen"" a notation """" is used. For compatibility reasons, the ""__file__"" value will always have "".py"" suffix independent of what it really is.\ Select what value ""__name__"" and ""__package__"" are going to be. With ""runtime"" (default for module mode), the created module, it uses the parent package to deduct the value of ""__package__"", to be fully compatible. This allows for more optimization to happen, but normally any package can be loaded into another one, but this will raise an import error when it detects that with ""original"" mode.\ Specify how the executable should be named. For extension modules there is no choice, also not for standalone mode and using it will be an error. This may include path information that needs to exist though. Defaults to '%s' on this platform. \ Specify where intermediate and final output files should be put. The DIRECTORY will be populated with C files, object files, etc. Defaults to current directory. \ Removes the build directory after producing the module or exe file. Defaults to off.\ Do not create a "".pyi"" file for extension modules created by Nuitka. This is used to detect implicit imports. Defaults to off.\ Executing all self checks possible to find errors in Nuitka, do not use for production. Defaults to off.\ Keep debug info in the resulting object file for better debugger interaction. Defaults to off.\ Enable vmprof based profiling of time spent. Not working currently. Defaults to off.\ Create graph of optimization process internals, do not use for whole programs, but only for small test cases. Defaults to off.\ Traced execution output, output the line of code before executing it. Defaults to off.\ This is not incremental compilation, but for Nuitka development only. Takes existing files and simply compile them as C again. Allows compiling edited C files for quick debugging changes to the generated source, e.g. to see if code is passed by, values output, etc, Defaults to off. Depends on compiling Python source to determine which files it should look at.\ Generate only C source code, and do not compile it to binary or module. This is for debugging and code coverage analysis that doesn't waste CPU. Defaults to off. Do not think you can use this directly.\ Use features declared as 'experimental'. May have no effect if no experimental features are present in the code. Uses secret tags (check source) per experimented feature.\ Attempt to use less memory, by forking less C compilation jobs and using options that use less memory. For use on embedded machines. Use this in case of out of memory problems. Defaults to off.\ Disable the dependency walker cache. Will result in much longer times to create the distribution folder, but might be used in case the cache is suspect to cause errors. \ For an update of the dependency walker cache. Will result in much longer times to create the distribution folder, but might be used in case the cache is suspect to cause errors or known to need an update. \ Enforce the use of clang. On Windows this requires a working Visual Studio version to piggy back on. Defaults to off.\ Enforce the use of MinGW64 on Windows. Defaults to off.\ Enforce the use of specific MSVC version on Windows. Allowed values are e.g. ""14.3"" (MSVC 2022) and other MSVC version numbers, specify ""list"" for a list of installed compilers, or use ""latest"". Defaults to latest MSVC being used if installed, otherwise MinGW64 is used.\ Specify the allowed number of parallel C compiler jobs. Defaults to the system CPU count.\ Use link time optimizations (MSVC, gcc, clang). Allowed values are ""yes"", ""no"", and ""auto"" (when it's known to work). Defaults to ""auto"".\ Use static link library of Python. Allowed values are ""yes"", ""no"", and ""auto"" (when it's known to work). Defaults to ""auto"".\ Do not attempt to use ccache (gcc, clang, etc.) or clcache (MSVC, clangcl).\ Enables C level profile guided optimization (PGO), by executing a dedicated build first for a profiling run, and then using the result to feedback into the C compilation. Note: This is experimental and not working with standalone modes of Nuitka yet. Defaults to off.\ Arguments to be passed in case of profile guided optimization. These are passed to the special built executable during the PGO profiling run. Default empty.\ Command to execute when collecting profile information. Use this only, if you need to launch it through a script that prepares it to run. Default use created program.\ Disable all information outputs, but show warnings. Defaults to off.\ Operate Scons in non-quiet mode, showing the executed commands. Defaults to off.Provide progress information and statistics. Defaults to off.Disable progress bar outputs (if tqdm is installed). Defaults to off.Provide memory information and statistics. Defaults to off.\ Provide information for included modules and DLLs Defaults to off.\ Where to output --show-modules, should be a filename. Default is standard output.\ Report module inclusion in an XML output file. Default is off.\ Output details of actions taken, esp. in optimizations. Can become a lot. Defaults to off.\ Where to output --verbose, should be a filename. Default is standard output.\ When compiling for Windows, disable the console window. Defaults to off.\ Add executable icon. Can be given multiple times for different resolutions or files with multiple icons inside. In the later case, you may also suffix with # where n is an integer index starting from 1, specifying a specific icon to be included, and all others to be ignored.\ When compiling for Windows and onefile, show this while loading the application. Defaults to off.\ Request Windows User Control, to enforce running from a few folders only, remote desktop access. (Windows only). Defaults to off.\ Name of the company to use in Windows Version information. One of file or product version is required, when a version resource needs to be added, e.g. to specify product name, or company name. Defaults to unused.\ Name of the product to use in Windows Version information. Defaults to base filename of the binary.\ File version to use in Windows Version information. Must be a sequence of up to 4 numbers, e.g. 1.0.0.0, only this format is allowed. One of file or product version is required, when a version resource needs to be added, e.g. to specify product name, or company name. Defaults to unused.\ Product version to use in Windows Version information. Must be a sequence of up to 4 numbers, e.g. 1.0.0.0, only this format is allowed. One of file or product version is required, when a version resource needs to be added, e.g. to specify product name, or company name. Defaults to unused.\ Description of the file use in Windows Version information. One of file or product version is required, when a version resource needs to be added, e.g. to specify product name, or company name. Defaults to nonsense.\ Use this as a temporary folder. Defaults to '%TEMP%\\onefile_%PID%_%TIME%', i.e. system temporary directory.\ Force standard output of the program to go to this location. Useful for programs with disabled console and programs using the Windows Services Plugin of Nuitka. Defaults to not active, use e.g. '%PROGRAM%.out.txt', i.e. file near your program.\ Force standard error of the program to go to this location. Useful for programs with disabled console and programs using the Windows Services Plugin of Nuitka. Defaults to not active, use e.g. '%PROGRAM%.err.txt', i.e. file near your program.\ What architectures is this to supposed to run on. Default and limit is what the running Python allows for. Default is ""native"" which is the architecture the Python is run with.\ When compiling for macOS, disable the console window and create a GUI application. Defaults to off.\ When compiling for macOS, create a bundle rather than a plain binary application. Currently experimental and incomplete. Currently this is the only way to unlock disabling of console.Defaults to off.\ Name of the application to use for macOS signing. Follow com.yourcompany.appname naming results for best results, as these have to be globally unique, and will grant protected API accesses.\ Name of the product to use in macOS bundle information. Defaults to base filename of the binary.\ Product version to use in macOS bundle information. Defaults to 1.0 if not given.\ Enabled plugins. Must be plug-in names. Use --plugin-list to query the full list and exit. Default empty.\ Disabled plugins. Must be plug-in names. Use --plugin-list to query the full list and exit. Default empty.\ Plugins can detect if they might be used, and the you can disable the warning via ""--disable-plugin=plugin-that-warned"", or you can use this option to disable the mechanism entirely, which also speeds up compilation slightly of course as this detection code is run in vain once you are certain of which plugins to use. Defaults to off.\ Show list of all available plugins and exit. Defaults to off.\ Write source changes to original Python files. Use with care. May need permissions, best for use in a virtualenv to debug if plugin code changes work with standard Python or to benefit from bloat removal even with pure Python. Default False. " 2630,"def test_collect_workflow_action_data_post(self): response = self.client.post( reverse( ""wagtailadmin_pages:collect_workflow_action_data"", args=( self.page.id, ""approve"", self.page.current_workflow_task_state.id, ), ), {""comment"": ""This is my comment""}, ) self.assertEqual(response.status_code, 200) response_json = json.loads(response.content) self.assertEqual(response_json[""step""], ""success"") self.assertEqual( response_json[""cleaned_data""], {""comment"": ""This is my comment""} ) "," This tests that a POST request to the collect_workflow_action_data view (for the approve action) returns a modal response with the validated data ",22,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_collect_workflow_action_data_post(self): response = self.client.post( reverse( ""wagtailadmin_pages:collect_workflow_action_data"", args=( self.page.id, ""approve"", self.page.current_workflow_task_state.id, ), ), {""comment"": ""This is my comment""}, ) self.assertEqual(response.status_code, 200) response_json = json.loads(response.content) self.assertEqual(response_json[""step""], ""success"") self.assertEqual( response_json[""cleaned_data""], {""comment"": ""This is my comment""} ) ``` ###Assistant : This tests that a POST request to the collect_workflow_action_data view (for the approve action) returns a modal response with the validated data " 2631,"def test_02_train_predictor(self): query = f response = self.handler.native_query(query) self.assertTrue(response.type == RESPONSE_TYPE.OK) "," CREATE PREDICTOR {self.test_model_1} FROM {PG_HANDLER_NAME} (SELECT * FROM {self.data_table_1} limit 50) PREDICT rental_price ",13,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_02_train_predictor(self): query = f response = self.handler.native_query(query) self.assertTrue(response.type == RESPONSE_TYPE.OK) ``` ###Assistant : CREATE PREDICTOR {self.test_model_1} FROM {PG_HANDLER_NAME} (SELECT * FROM {self.data_table_1} limit 50) PREDICT rental_price " 2632,"def __call__(self, results): img = results['img'] if self.to_float32: img = img.astype(np.float32) results['img_path'] = None results['img'] = img height, width = img.shape[:2] results['height'] = height results['width'] = width results['ori_height'] = height results['ori_width'] = width return results @TRANSFORMS.register_module()","Call functions to add image meta information. Args: results (dict): Result dict with Webcam read image in ``results['img']``. Returns: dict: The dict contains loaded image and meta information. ",28,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __call__(self, results): img = results['img'] if self.to_float32: img = img.astype(np.float32) results['img_path'] = None results['img'] = img height, width = img.shape[:2] results['height'] = height results['width'] = width results['ori_height'] = height results['ori_width'] = width return results @TRANSFORMS.register_module() ``` ###Assistant : Call functions to add image meta information. Args: results (dict): Result dict with Webcam read image in ``results['img']``. Returns: dict: The dict contains loaded image and meta information. " 2633,"def test_jemalloc_env_var_propagate(): gcs_ptype = ray.ray_constants.PROCESS_TYPE_GCS_SERVER expected = {} actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path="""", jemalloc_conf="""", jemalloc_comps=[], process_type=gcs_ptype ) assert actual == expected actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path=None, jemalloc_conf=""a,b,c"", jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER], process_type=gcs_ptype, ) assert actual == expected library_path = ""/abc"" expected = {""LD_PRELOAD"": library_path} actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path=library_path, jemalloc_conf="""", jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER], process_type=gcs_ptype, ) assert actual == expected # comps should be a list type. with pytest.raises(AssertionError): ray._private.services.propagate_jemalloc_env_var( jemalloc_path=library_path, jemalloc_conf="""", jemalloc_comps=""ray.ray_constants.PROCESS_TYPE_GCS_SERVER,"", process_type=gcs_ptype, ) # When comps don't match the process_type, it should return an empty dict. expected = {} actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path=library_path, jemalloc_conf="""", jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_RAYLET], process_type=gcs_ptype, ) library_path = ""/abc"" malloc_conf = ""a,b,c"" expected = {""LD_PRELOAD"": library_path, ""MALLOC_CONF"": malloc_conf} actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path=library_path, jemalloc_conf=malloc_conf, jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER], process_type=gcs_ptype, ) assert actual == expected ","Test `propagate_jemalloc_env_var` If the shared library path is not specified, it should return an empty dict. When the shared library is specified When the malloc config is specified ",28,114,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_jemalloc_env_var_propagate(): gcs_ptype = ray.ray_constants.PROCESS_TYPE_GCS_SERVER expected = {} actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path="""", jemalloc_conf="""", jemalloc_comps=[], process_type=gcs_ptype ) assert actual == expected actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path=None, jemalloc_conf=""a,b,c"", jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER], process_type=gcs_ptype, ) assert actual == expected library_path = ""/abc"" expected = {""LD_PRELOAD"": library_path} actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path=library_path, jemalloc_conf="""", jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER], process_type=gcs_ptype, ) assert actual == expected # comps should be a list type. with pytest.raises(AssertionError): ray._private.services.propagate_jemalloc_env_var( jemalloc_path=library_path, jemalloc_conf="""", jemalloc_comps=""ray.ray_constants.PROCESS_TYPE_GCS_SERVER,"", process_type=gcs_ptype, ) # When comps don't match the process_type, it should return an empty dict. expected = {} actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path=library_path, jemalloc_conf="""", jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_RAYLET], process_type=gcs_ptype, ) library_path = ""/abc"" malloc_conf = ""a,b,c"" expected = {""LD_PRELOAD"": library_path, ""MALLOC_CONF"": malloc_conf} actual = ray._private.services.propagate_jemalloc_env_var( jemalloc_path=library_path, jemalloc_conf=malloc_conf, jemalloc_comps=[ray.ray_constants.PROCESS_TYPE_GCS_SERVER], process_type=gcs_ptype, ) assert actual == expected ``` ###Assistant : Test `propagate_jemalloc_env_var` If the shared library path is not specified, it should return an empty dict. When the shared library is specified When the malloc config is specified " 2634,"def aug_test_bboxes(self, feats, img_metas, rescale=False): # check with_nms argument gb_sig = signature(self.get_results) gb_args = [p.name for p in gb_sig.parameters.values()] gbs_sig = signature(self._get_results_single) gbs_args = [p.name for p in gbs_sig.parameters.values()] assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \ f'{self.__class__.__name__}' \ ' does not support test-time augmentation' aug_bboxes = [] aug_scores = [] aug_labels = [] for x, img_meta in zip(feats, img_metas): # only one image in the batch outs = self.forward(x) bbox_outputs = self.get_results( *outs, img_metas=img_meta, cfg=self.test_cfg, rescale=False, with_nms=False)[0] aug_bboxes.append(bbox_outputs.bboxes) aug_scores.append(bbox_outputs.scores) if len(bbox_outputs) >= 3: aug_labels.append(bbox_outputs.labels) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = self.merge_aug_bboxes( aug_bboxes, aug_scores, img_metas) merged_labels = torch.cat(aug_labels, dim=0) if aug_labels else None if merged_bboxes.numel() == 0: det_bboxes = torch.cat([merged_bboxes, merged_scores[:, None]], -1) return [ (det_bboxes, merged_labels), ] det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores, merged_labels, self.test_cfg.nms) det_bboxes = det_bboxes[:self.test_cfg.max_per_img] det_labels = merged_labels[keep_idxs][:self.test_cfg.max_per_img] if rescale: _det_bboxes = det_bboxes else: _det_bboxes = det_bboxes.clone() _det_bboxes[:, :4] *= det_bboxes.new_tensor( img_metas[0][0]['scale_factor']) results = InstanceData() results.bboxes = _det_bboxes[:, :4] results.scores = _det_bboxes[:, 4] results.labels = det_labels return [results] ","Test det bboxes with test time augmentation, can be applied in DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is ``bboxes`` with shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). The shape of the second tensor in the tuple is ``labels`` with shape (n,). The length of list should always be 1. ",131,171,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def aug_test_bboxes(self, feats, img_metas, rescale=False): # check with_nms argument gb_sig = signature(self.get_results) gb_args = [p.name for p in gb_sig.parameters.values()] gbs_sig = signature(self._get_results_single) gbs_args = [p.name for p in gbs_sig.parameters.values()] assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \ f'{self.__class__.__name__}' \ ' does not support test-time augmentation' aug_bboxes = [] aug_scores = [] aug_labels = [] for x, img_meta in zip(feats, img_metas): # only one image in the batch outs = self.forward(x) bbox_outputs = self.get_results( *outs, img_metas=img_meta, cfg=self.test_cfg, rescale=False, with_nms=False)[0] aug_bboxes.append(bbox_outputs.bboxes) aug_scores.append(bbox_outputs.scores) if len(bbox_outputs) >= 3: aug_labels.append(bbox_outputs.labels) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = self.merge_aug_bboxes( aug_bboxes, aug_scores, img_metas) merged_labels = torch.cat(aug_labels, dim=0) if aug_labels else None if merged_bboxes.numel() == 0: det_bboxes = torch.cat([merged_bboxes, merged_scores[:, None]], -1) return [ (det_bboxes, merged_labels), ] det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores, merged_labels, self.test_cfg.nms) det_bboxes = det_bboxes[:self.test_cfg.max_per_img] det_labels = merged_labels[keep_idxs][:self.test_cfg.max_per_img] if rescale: _det_bboxes = det_bboxes else: _det_bboxes = det_bboxes.clone() _det_bboxes[:, :4] *= det_bboxes.new_tensor( img_metas[0][0]['scale_factor']) results = InstanceData() results.bboxes = _det_bboxes[:, :4] results.scores = _det_bboxes[:, 4] results.labels = det_labels return [results] ``` ###Assistant : Test det bboxes with test time augmentation, can be applied in DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is ``bboxes`` with shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). The shape of the second tensor in the tuple is ``labels`` with shape (n,). The length of list should always be 1. " 2635,"def pauseProducing(self) -> None: logger.info(""[%s] Pause producing"", self.id()) self.state = ConnectionStates.PAUSED ","This is called when both the kernel send buffer and the twisted tcp connection send buffers have become full. We don't actually have any control over those sizes, so we buffer some commands ourselves before knifing the connection due to the remote failing to keep up. ",46,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def pauseProducing(self) -> None: logger.info(""[%s] Pause producing"", self.id()) self.state = ConnectionStates.PAUSED ``` ###Assistant : This is called when both the kernel send buffer and the twisted tcp connection send buffers have become full. We don't actually have any control over those sizes, so we buffer some commands ourselves before knifing the connection due to the remote failing to keep up. " 2636,"def throw(self, typ, val=None, tb=None): if val is None: if tb is None: raise typ val = typ() if tb is not None: val = val.with_traceback(tb) raise val ","Raise an exception in the coroutine. Return next yielded value or raise StopIteration. ",13,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def throw(self, typ, val=None, tb=None): if val is None: if tb is None: raise typ val = typ() if tb is not None: val = val.with_traceback(tb) raise val ``` ###Assistant : Raise an exception in the coroutine. Return next yielded value or raise StopIteration. " 2637,"def _exit_buffer(self) -> None: self._buffer_index -= 1 self._check_buffer() ","Leave buffer context, and render content if required.",8,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _exit_buffer(self) -> None: self._buffer_index -= 1 self._check_buffer() ``` ###Assistant : Leave buffer context, and render content if required. " 2638,"def flatten_sensors_data(sensor): if ""temp"" in sensor[""data""]: sensor[""data""][""temperature""] = sensor[""data""][""temp""][""c""] return sensor ",Deconstruct SwitchBot library temp object C/Fº readings from dictionary.,9,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def flatten_sensors_data(sensor): if ""temp"" in sensor[""data""]: sensor[""data""][""temperature""] = sensor[""data""][""temp""][""c""] return sensor ``` ###Assistant : Deconstruct SwitchBot library temp object C/Fº readings from dictionary. " 2639,"def test_shared_embedding_column_with_non_sequence_categorical(self): with tf.Graph().as_default(): vocabulary_size = 3 sparse_input_a = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) sparse_input_b = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) categorical_column_a = ( tf.feature_column.categorical_column_with_identity( key=""aaa"", num_buckets=vocabulary_size ) ) categorical_column_b = ( tf.feature_column.categorical_column_with_identity( key=""bbb"", num_buckets=vocabulary_size ) ) shared_embedding_columns = tf.feature_column.shared_embeddings( [categorical_column_a, categorical_column_b], dimension=2 ) sequence_input_layer = ksfc.SequenceFeatures( shared_embedding_columns ) with self.assertRaisesRegex( ValueError, r""In embedding_column: aaa_shared_embedding\. "" r""categorical_column must "" r""be of type SequenceCategoricalColumn to use "" r""SequenceFeatures\."", ): _, _ = sequence_input_layer( {""aaa"": sparse_input_a, ""bbb"": sparse_input_b} ) ","Tests that error is raised for non-sequence shared embedding column.",10,115,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_shared_embedding_column_with_non_sequence_categorical(self): with tf.Graph().as_default(): vocabulary_size = 3 sparse_input_a = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) sparse_input_b = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) categorical_column_a = ( tf.feature_column.categorical_column_with_identity( key=""aaa"", num_buckets=vocabulary_size ) ) categorical_column_b = ( tf.feature_column.categorical_column_with_identity( key=""bbb"", num_buckets=vocabulary_size ) ) shared_embedding_columns = tf.feature_column.shared_embeddings( [categorical_column_a, categorical_column_b], dimension=2 ) sequence_input_layer = ksfc.SequenceFeatures( shared_embedding_columns ) with self.assertRaisesRegex( ValueError, r""In embedding_column: aaa_shared_embedding\. "" r""categorical_column must "" r""be of type SequenceCategoricalColumn to use "" r""SequenceFeatures\."", ): _, _ = sequence_input_layer( {""aaa"": sparse_input_a, ""bbb"": sparse_input_b} ) ``` ###Assistant : Tests that error is raised for non-sequence shared embedding column. " 2640,"def get_next_stock_market_days(last_stock_day, n_next_days) -> list: n_days = 0 l_pred_days = [] years: list = [] holidays: list = [] if isinstance(last_stock_day, datetime): while n_days < n_next_days: last_stock_day += timedelta(hours=24) year = last_stock_day.date().year if year not in years: years.append(year) holidays += us_market_holidays(year) # Check if it is a weekend if last_stock_day.date().weekday() > 4: continue # Check if it is a holiday if last_stock_day.strftime(""%Y-%m-%d"") in holidays: continue # Otherwise stock market is open n_days += 1 l_pred_days.append(last_stock_day) else: while n_days < n_next_days: l_pred_days.append(last_stock_day + 1 + n_days) n_days += 1 return l_pred_days ",Gets the next stock market day. Checks against weekends and holidays,11,90,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_next_stock_market_days(last_stock_day, n_next_days) -> list: n_days = 0 l_pred_days = [] years: list = [] holidays: list = [] if isinstance(last_stock_day, datetime): while n_days < n_next_days: last_stock_day += timedelta(hours=24) year = last_stock_day.date().year if year not in years: years.append(year) holidays += us_market_holidays(year) # Check if it is a weekend if last_stock_day.date().weekday() > 4: continue # Check if it is a holiday if last_stock_day.strftime(""%Y-%m-%d"") in holidays: continue # Otherwise stock market is open n_days += 1 l_pred_days.append(last_stock_day) else: while n_days < n_next_days: l_pred_days.append(last_stock_day + 1 + n_days) n_days += 1 return l_pred_days ``` ###Assistant : Gets the next stock market day. Checks against weekends and holidays " 2641,"async def test_max_concurrent_in_progress_functions(extra_req_num): max_req = 10 a = A(max_num_call=max_req) # Run more than allowed concurrent async functions should trigger rate limiting res_arr = await asyncio.gather( *[a.fn1() if i % 2 == 0 else a.fn2() for i in range(max_req + extra_req_num)] ) fail_cnt = 0 for ok in res_arr: fail_cnt += 0 if ok else 1 expected_fail_cnt = max(0, extra_req_num) assert fail_cnt == expected_fail_cnt, ( f""{expected_fail_cnt} out of {max_req + extra_req_num} "" f""concurrent runs should fail with max={max_req} but {fail_cnt}."" ) assert a.num_call_ == 0, ""All requests should be done"" @pytest.mark.asyncio @pytest.mark.parametrize( ""failures"", [ [True, True, True, True, True], [False, False, False, False, False], [False, True, False, True, False], [False, False, False, True, True], [True, True, False, False, False], ], )",Test rate limiting for concurrent in-progress requests on StateHead,9,120,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_max_concurrent_in_progress_functions(extra_req_num): max_req = 10 a = A(max_num_call=max_req) # Run more than allowed concurrent async functions should trigger rate limiting res_arr = await asyncio.gather( *[a.fn1() if i % 2 == 0 else a.fn2() for i in range(max_req + extra_req_num)] ) fail_cnt = 0 for ok in res_arr: fail_cnt += 0 if ok else 1 expected_fail_cnt = max(0, extra_req_num) assert fail_cnt == expected_fail_cnt, ( f""{expected_fail_cnt} out of {max_req + extra_req_num} "" f""concurrent runs should fail with max={max_req} but {fail_cnt}."" ) assert a.num_call_ == 0, ""All requests should be done"" @pytest.mark.asyncio @pytest.mark.parametrize( ""failures"", [ [True, True, True, True, True], [False, False, False, False, False], [False, True, False, True, False], [False, False, False, True, True], [True, True, False, False, False], ], ) ``` ###Assistant : Test rate limiting for concurrent in-progress requests on StateHead " 2642,"def transpose_qkv(X, num_heads): # Shape of input `X`: # (`batch_size`, no. of queries or key-value pairs, `num_hiddens`). # Shape of output `X`: # (`batch_size`, no. of queries or key-value pairs, `num_heads`, # `num_hiddens` / `num_heads`) X = X.reshape(X.shape[0], X.shape[1], num_heads, -1) # Shape of output `X`: # (`batch_size`, `num_heads`, no. of queries or key-value pairs, # `num_hiddens` / `num_heads`) X = X.transpose(0, 2, 1, 3) # Shape of `output`: # (`batch_size` * `num_heads`, no. of queries or key-value pairs, # `num_hiddens` / `num_heads`) return X.reshape(-1, X.shape[2], X.shape[3]) ","Transposition for parallel computation of multiple attention heads. Defined in :numref:`sec_multihead-attention`",11,87,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def transpose_qkv(X, num_heads): # Shape of input `X`: # (`batch_size`, no. of queries or key-value pairs, `num_hiddens`). # Shape of output `X`: # (`batch_size`, no. of queries or key-value pairs, `num_heads`, # `num_hiddens` / `num_heads`) X = X.reshape(X.shape[0], X.shape[1], num_heads, -1) # Shape of output `X`: # (`batch_size`, `num_heads`, no. of queries or key-value pairs, # `num_hiddens` / `num_heads`) X = X.transpose(0, 2, 1, 3) # Shape of `output`: # (`batch_size` * `num_heads`, no. of queries or key-value pairs, # `num_hiddens` / `num_heads`) return X.reshape(-1, X.shape[2], X.shape[3]) ``` ###Assistant : Transposition for parallel computation of multiple attention heads. Defined in :numref:`sec_multihead-attention` " 2643,"def current_state(self, session=NEW_SESSION) -> str: return ( session.query(TaskInstance.state) .filter( TaskInstance.dag_id == self.dag_id, TaskInstance.task_id == self.task_id, TaskInstance.run_id == self.run_id, ) .scalar() ) "," Get the very latest state from the database, if a session is passed, we use and looking up the state becomes part of the session, otherwise a new session is used. :param session: SQLAlchemy ORM Session ",36,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def current_state(self, session=NEW_SESSION) -> str: return ( session.query(TaskInstance.state) .filter( TaskInstance.dag_id == self.dag_id, TaskInstance.task_id == self.task_id, TaskInstance.run_id == self.run_id, ) .scalar() ) ``` ###Assistant : Get the very latest state from the database, if a session is passed, we use and looking up the state becomes part of the session, otherwise a new session is used. :param session: SQLAlchemy ORM Session " 2644,"def house_graph(create_using=None): description = [ ""adjacencylist"", ""House Graph"", 5, [[2, 3], [1, 4], [1, 4, 5], [2, 3, 5], [3, 4]], ] G = make_small_undirected_graph(description, create_using) return G "," Returns the House graph (square with triangle on top) The house graph is a simple undirected graph with 5 nodes and 6 edges [1]_. Parameters ---------- create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Returns ------- G : networkx Graph House graph in the form of a square with a triangle on top References ---------- .. [1] https://mathworld.wolfram.com/HouseGraph.html ",68,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def house_graph(create_using=None): description = [ ""adjacencylist"", ""House Graph"", 5, [[2, 3], [1, 4], [1, 4, 5], [2, 3, 5], [3, 4]], ] G = make_small_undirected_graph(description, create_using) return G ``` ###Assistant : Returns the House graph (square with triangle on top) The house graph is a simple undirected graph with 5 nodes and 6 edges [1]_. Parameters ---------- create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Returns ------- G : networkx Graph House graph in the form of a square with a triangle on top References ---------- .. [1] https://mathworld.wolfram.com/HouseGraph.html " 2645,"def check_header_validity(header): name, value = header for part in header: if type(part) not in HEADER_VALIDATORS: raise InvalidHeader( f""Header part ({part!r}) from {{{name!r}: {value!r}}} must be "" f""of type str or bytes, not {type(part)}"" ) _validate_header_part(name, ""name"", HEADER_VALIDATORS[type(name)][0]) _validate_header_part(value, ""value"", HEADER_VALIDATORS[type(value)][1]) ","Verifies that header parts don't contain leading whitespace reserved characters, or return characters. :param header: tuple, in the format (name, value). ",21,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_header_validity(header): name, value = header for part in header: if type(part) not in HEADER_VALIDATORS: raise InvalidHeader( f""Header part ({part!r}) from {{{name!r}: {value!r}}} must be "" f""of type str or bytes, not {type(part)}"" ) _validate_header_part(name, ""name"", HEADER_VALIDATORS[type(name)][0]) _validate_header_part(value, ""value"", HEADER_VALIDATORS[type(value)][1]) ``` ###Assistant : Verifies that header parts don't contain leading whitespace reserved characters, or return characters. :param header: tuple, in the format (name, value). " 2646,"def test_orderline_query(staff_api_client, permission_manage_orders, fulfilled_order): order = fulfilled_order query = line = order.lines.first() metadata_key = ""md key"" metadata_value = ""md value"" line.store_value_in_private_metadata({metadata_key: metadata_value}) line.store_value_in_metadata({metadata_key: metadata_value}) line.save() staff_api_client.user.user_permissions.add(permission_manage_orders) response = staff_api_client.post_graphql(query) content = get_graphql_content(response) order_data = content[""data""][""orders""][""edges""][0][""node""] first_order_data_line = order_data[""lines""][0] variant_id = graphene.Node.to_global_id(""ProductVariant"", line.variant.pk) assert first_order_data_line[""thumbnail""] is None assert first_order_data_line[""variant""][""id""] == variant_id assert first_order_data_line[""quantity""] == line.quantity assert first_order_data_line[""unitPrice""][""currency""] == line.unit_price.currency assert first_order_data_line[""metadata""] == [ {""key"": metadata_key, ""value"": metadata_value} ] assert first_order_data_line[""privateMetadata""] == [ {""key"": metadata_key, ""value"": metadata_value} ] expected_unit_price = Money( amount=str(first_order_data_line[""unitPrice""][""gross""][""amount""]), currency=""USD"", ) assert first_order_data_line[""totalPrice""][""currency""] == line.unit_price.currency assert expected_unit_price == line.unit_price.gross expected_total_price = Money( amount=str(first_order_data_line[""totalPrice""][""gross""][""amount""]), currency=""USD"", ) assert expected_total_price == line.unit_price.gross * line.quantity allocation = line.allocations.first() allocation_id = graphene.Node.to_global_id(""Allocation"", allocation.pk) warehouse_id = graphene.Node.to_global_id( ""Warehouse"", allocation.stock.warehouse.pk ) assert first_order_data_line[""allocations""] == [ { ""id"": allocation_id, ""quantity"": allocation.quantity_allocated, ""warehouse"": {""id"": warehouse_id}, } ] "," query OrdersQuery { orders(first: 1) { edges { node { lines { thumbnail(size: 540) { url } variant { id } quantity allocations { id quantity warehouse { id } } unitPrice { currency gross { amount } } totalPrice { currency gross { amount } } metadata { key value } privateMetadata { key value } } } } } } ",62,129,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_orderline_query(staff_api_client, permission_manage_orders, fulfilled_order): order = fulfilled_order query = line = order.lines.first() metadata_key = ""md key"" metadata_value = ""md value"" line.store_value_in_private_metadata({metadata_key: metadata_value}) line.store_value_in_metadata({metadata_key: metadata_value}) line.save() staff_api_client.user.user_permissions.add(permission_manage_orders) response = staff_api_client.post_graphql(query) content = get_graphql_content(response) order_data = content[""data""][""orders""][""edges""][0][""node""] first_order_data_line = order_data[""lines""][0] variant_id = graphene.Node.to_global_id(""ProductVariant"", line.variant.pk) assert first_order_data_line[""thumbnail""] is None assert first_order_data_line[""variant""][""id""] == variant_id assert first_order_data_line[""quantity""] == line.quantity assert first_order_data_line[""unitPrice""][""currency""] == line.unit_price.currency assert first_order_data_line[""metadata""] == [ {""key"": metadata_key, ""value"": metadata_value} ] assert first_order_data_line[""privateMetadata""] == [ {""key"": metadata_key, ""value"": metadata_value} ] expected_unit_price = Money( amount=str(first_order_data_line[""unitPrice""][""gross""][""amount""]), currency=""USD"", ) assert first_order_data_line[""totalPrice""][""currency""] == line.unit_price.currency assert expected_unit_price == line.unit_price.gross expected_total_price = Money( amount=str(first_order_data_line[""totalPrice""][""gross""][""amount""]), currency=""USD"", ) assert expected_total_price == line.unit_price.gross * line.quantity allocation = line.allocations.first() allocation_id = graphene.Node.to_global_id(""Allocation"", allocation.pk) warehouse_id = graphene.Node.to_global_id( ""Warehouse"", allocation.stock.warehouse.pk ) assert first_order_data_line[""allocations""] == [ { ""id"": allocation_id, ""quantity"": allocation.quantity_allocated, ""warehouse"": {""id"": warehouse_id}, } ] ``` ###Assistant : query OrdersQuery { orders(first: 1) { edges { node { lines { thumbnail(size: 540) { url } variant { id } quantity allocations { id quantity warehouse { id } } unitPrice { currency gross { amount } } totalPrice { currency gross { amount } } metadata { key value } privateMetadata { key value } } } } } } " 2647,"def in4_pseudoheader(proto, u, plen): # type: (int, IP, int) -> bytes if u.len is not None: if u.ihl is None: olen = sum(len(x) for x in u.options) ihl = 5 + olen // 4 + (1 if olen % 4 else 0) else: ihl = u.ihl ln = max(u.len - 4 * ihl, 0) else: ln = plen # Filter out IPOption_LSRR and IPOption_SSRR sr_options = [opt for opt in u.options if isinstance(opt, IPOption_LSRR) or isinstance(opt, IPOption_SSRR)] len_sr_options = len(sr_options) if len_sr_options == 1 and len(sr_options[0].routers): # The checksum must be computed using the final # destination address u.dst = sr_options[0].routers[-1] elif len_sr_options > 1: message = ""Found %d Source Routing Options! "" message += ""Falling back to IP.dst for checksum computation."" warning(message, len_sr_options) return struct.pack(""!4s4sHH"", inet_pton(socket.AF_INET, u.src), inet_pton(socket.AF_INET, u.dst), proto, ln) ","IPv4 Pseudo Header as defined in RFC793 as bytes :param proto: value of upper layer protocol :param u: IP layer instance :param plen: the length of the upper layer and payload ",31,132,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def in4_pseudoheader(proto, u, plen): # type: (int, IP, int) -> bytes if u.len is not None: if u.ihl is None: olen = sum(len(x) for x in u.options) ihl = 5 + olen // 4 + (1 if olen % 4 else 0) else: ihl = u.ihl ln = max(u.len - 4 * ihl, 0) else: ln = plen # Filter out IPOption_LSRR and IPOption_SSRR sr_options = [opt for opt in u.options if isinstance(opt, IPOption_LSRR) or isinstance(opt, IPOption_SSRR)] len_sr_options = len(sr_options) if len_sr_options == 1 and len(sr_options[0].routers): # The checksum must be computed using the final # destination address u.dst = sr_options[0].routers[-1] elif len_sr_options > 1: message = ""Found %d Source Routing Options! "" message += ""Falling back to IP.dst for checksum computation."" warning(message, len_sr_options) return struct.pack(""!4s4sHH"", inet_pton(socket.AF_INET, u.src), inet_pton(socket.AF_INET, u.dst), proto, ln) ``` ###Assistant : IPv4 Pseudo Header as defined in RFC793 as bytes :param proto: value of upper layer protocol :param u: IP layer instance :param plen: the length of the upper layer and payload " 2648,"def doc_resample_fillna(method, refer_to, params=None, overwrite_template_params=False): action = f""fill missing values in each group independently using {method} method"" params_substitution = ""limit : int\n"" if params: params_substitution = ( params if overwrite_template_params else format_string( ""{params}\n{params_substitution}"", params=params, params_substitution=params_substitution, ) ) build_rules = ""- QueryCompiler contains unsampled data with missing values filled."" return doc_resample( action=action, extra_params=params_substitution, build_rules=build_rules, refer_to=refer_to, ) doc_dt = partial( doc_qc_method, template=, one_column_method=True, refer_to_module_name=""Series.dt"", ) doc_dt_timestamp = partial(doc_dt, dt_type=""datetime"") doc_dt_interval = partial(doc_dt, dt_type=""interval"") doc_dt_period = partial(doc_dt, dt_type=""period"") doc_dt_round = partial( doc_qc_method, template=, one_column_method=True, refer_to_module_name=""Series.dt"", ) doc_str_method = partial( doc_qc_method, template=, one_column_method=True, refer_to_module_name=""Series.str"", ) "," Build decorator which adds docstring for the resample fillna query compiler method. Parameters ---------- method : str Fillna method name. refer_to : str Method name in ``modin.pandas.resample.Resampler`` module to refer to for more information about parameters and output format. params : str, optional Method parameters in the NumPy docstyle format to substitute to the docstring template. overwrite_template_params : bool, default: False If `params` is specified indicates whether to overwrite method parameters in the docstring template or append then at the end. Returns ------- callable Get {prop} for each {dt_type} value. {params} Returns ------- BaseQueryCompiler New QueryCompiler with the same shape as `self`, where each element is {prop} for the corresponding {dt_type} value. Perform {refer_to} operation on the underlying time-series data to the specified `freq`. Parameters ---------- freq : str ambiguous : {{""raise"", ""infer"", ""NaT""}} or bool mask, default: ""raise"" nonexistent : {{""raise"", ""shift_forward"", ""shift_backward"", ""NaT""}} or timedelta, default: ""raise"" Returns ------- BaseQueryCompiler New QueryCompiler with performed {refer_to} operation on every element. Apply ""{refer_to}"" function to each string value in QueryCompiler. {params} Returns ------- BaseQueryCompiler New QueryCompiler containing the result of execution of the ""{refer_to}"" function against each string element. ",189,91,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def doc_resample_fillna(method, refer_to, params=None, overwrite_template_params=False): action = f""fill missing values in each group independently using {method} method"" params_substitution = ""limit : int\n"" if params: params_substitution = ( params if overwrite_template_params else format_string( ""{params}\n{params_substitution}"", params=params, params_substitution=params_substitution, ) ) build_rules = ""- QueryCompiler contains unsampled data with missing values filled."" return doc_resample( action=action, extra_params=params_substitution, build_rules=build_rules, refer_to=refer_to, ) doc_dt = partial( doc_qc_method, template=, one_column_method=True, refer_to_module_name=""Series.dt"", ) doc_dt_timestamp = partial(doc_dt, dt_type=""datetime"") doc_dt_interval = partial(doc_dt, dt_type=""interval"") doc_dt_period = partial(doc_dt, dt_type=""period"") doc_dt_round = partial( doc_qc_method, template=, one_column_method=True, refer_to_module_name=""Series.dt"", ) doc_str_method = partial( doc_qc_method, template=, one_column_method=True, refer_to_module_name=""Series.str"", ) ``` ###Assistant : Build decorator which adds docstring for the resample fillna query compiler method. Parameters ---------- method : str Fillna method name. refer_to : str Method name in ``modin.pandas.resample.Resampler`` module to refer to for more information about parameters and output format. params : str, optional Method parameters in the NumPy docstyle format to substitute to the docstring template. overwrite_template_params : bool, default: False If `params` is specified indicates whether to overwrite method parameters in the docstring template or append then at the end. Returns ------- callable Get {prop} for each {dt_type} value. {params} Returns ------- BaseQueryCompiler New QueryCompiler with the same shape as `self`, where each element is {prop} for the corresponding {dt_type} value. Perform {refer_to} operation on the underlying time-series data to the specified `freq`. Parameters ---------- freq : str ambiguous : {{""raise"", ""infer"", ""NaT""}} or bool mask, default: ""raise"" nonexistent : {{""raise"", ""shift_forward"", ""shift_backward"", ""NaT""}} or timedelta, default: ""raise"" Returns ------- BaseQueryCompiler New QueryCompiler with performed {refer_to} operation on every element. Apply ""{refer_to}"" function to each string value in QueryCompiler. {params} Returns ------- BaseQueryCompiler New QueryCompiler containing the result of execution of the ""{refer_to}"" function against each string element. " 2649,"def delete_events(ref_type, ref_name): events = ( frappe.db.sql_list( , (ref_type, ref_name), ) or [] ) if events: frappe.delete_doc(""Event"", events, for_reload=True) "," SELECT distinct `tabEvent`.name from `tabEvent`, `tabEvent Participants` where `tabEvent`.name = `tabEvent Participants`.parent and `tabEvent Participants`.reference_doctype = %s and `tabEvent Participants`.reference_docname = %s ",22,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def delete_events(ref_type, ref_name): events = ( frappe.db.sql_list( , (ref_type, ref_name), ) or [] ) if events: frappe.delete_doc(""Event"", events, for_reload=True) ``` ###Assistant : SELECT distinct `tabEvent`.name from `tabEvent`, `tabEvent Participants` where `tabEvent`.name = `tabEvent Participants`.parent and `tabEvent Participants`.reference_doctype = %s and `tabEvent Participants`.reference_docname = %s " 2650,"def check_started(self) -> ReplicaStartupStatus: status, version = self._actor.check_ready() if status == ReplicaStartupStatus.SUCCEEDED: # Re-assign DeploymentVersion if start / update / recover succeeded # by reading re-computed version in RayServeReplica if version is not None: self._version = version return status ","Check if the replica has started. If so, transition to RUNNING. Should handle the case where the replica has already stopped. Returns: status: Most recent state of replica by querying actor obj ref ",33,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_started(self) -> ReplicaStartupStatus: status, version = self._actor.check_ready() if status == ReplicaStartupStatus.SUCCEEDED: # Re-assign DeploymentVersion if start / update / recover succeeded # by reading re-computed version in RayServeReplica if version is not None: self._version = version return status ``` ###Assistant : Check if the replica has started. If so, transition to RUNNING. Should handle the case where the replica has already stopped. Returns: status: Most recent state of replica by querying actor obj ref " 2651,"async def test_battery_low(hass, utcnow): helper = await setup_test_component( hass, create_battery_level_sensor, suffix=""battery"" ) state = await helper.async_update( ServicesTypes.BATTERY_SERVICE, { CharacteristicsTypes.BATTERY_LEVEL: 1, CharacteristicsTypes.STATUS_LO_BATT: 0, }, ) assert state.attributes[""icon""] == ""mdi:battery-10"" state = await helper.async_update( ServicesTypes.BATTERY_SERVICE, { CharacteristicsTypes.BATTERY_LEVEL: 1, CharacteristicsTypes.STATUS_LO_BATT: 1, }, ) assert state.attributes[""icon""] == ""mdi:battery-alert"" ",Test reading the state of a HomeKit battery's low state.,10,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_battery_low(hass, utcnow): helper = await setup_test_component( hass, create_battery_level_sensor, suffix=""battery"" ) state = await helper.async_update( ServicesTypes.BATTERY_SERVICE, { CharacteristicsTypes.BATTERY_LEVEL: 1, CharacteristicsTypes.STATUS_LO_BATT: 0, }, ) assert state.attributes[""icon""] == ""mdi:battery-10"" state = await helper.async_update( ServicesTypes.BATTERY_SERVICE, { CharacteristicsTypes.BATTERY_LEVEL: 1, CharacteristicsTypes.STATUS_LO_BATT: 1, }, ) assert state.attributes[""icon""] == ""mdi:battery-alert"" ``` ###Assistant : Test reading the state of a HomeKit battery's low state. " 2652,"def _hyab(self, y_true, y_pred): delta = y_true - y_pred root = K.sqrt(K.clip(K.pow(delta[..., 0:1], 2), self._epsilon, None)) delta_norm = frobenius_norm(delta[..., 1:3]) return root + delta_norm "," Compute the HyAB distance between true and predicted images. Parameters ---------- y_true: :class:`plaidml.tile.Value` The ground truth batch of images in standard or Hunt-adjusted L*A*B* color space y_pred: :class:`plaidml.tile.Value` The predicted batch of images in in standard or Hunt-adjusted L*A*B* color space Returns ------- :class:`plaidml.tile.Value` image tensor containing the per-pixel HyAB distances between true and predicted images ",56,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _hyab(self, y_true, y_pred): delta = y_true - y_pred root = K.sqrt(K.clip(K.pow(delta[..., 0:1], 2), self._epsilon, None)) delta_norm = frobenius_norm(delta[..., 1:3]) return root + delta_norm ``` ###Assistant : Compute the HyAB distance between true and predicted images. Parameters ---------- y_true: :class:`plaidml.tile.Value` The ground truth batch of images in standard or Hunt-adjusted L*A*B* color space y_pred: :class:`plaidml.tile.Value` The predicted batch of images in in standard or Hunt-adjusted L*A*B* color space Returns ------- :class:`plaidml.tile.Value` image tensor containing the per-pixel HyAB distances between true and predicted images " 2653,"def test_dry_run(self, run_cleanup_mock, dry_run_arg, expected): args = self.parser.parse_args( [ 'db', 'clean', '--clean-before-timestamp', '2021-01-01', *dry_run_arg, ] ) db_command.cleanup_tables(args) run_cleanup_mock.assert_called_once_with( table_names=None, dry_run=expected, clean_before_timestamp=pendulum.parse('2021-01-01 00:00:00Z'), verbose=False, confirm=True, skip_archive=False, ) "," When tz included in the string then default timezone should not be used. ",13,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_dry_run(self, run_cleanup_mock, dry_run_arg, expected): args = self.parser.parse_args( [ 'db', 'clean', '--clean-before-timestamp', '2021-01-01', *dry_run_arg, ] ) db_command.cleanup_tables(args) run_cleanup_mock.assert_called_once_with( table_names=None, dry_run=expected, clean_before_timestamp=pendulum.parse('2021-01-01 00:00:00Z'), verbose=False, confirm=True, skip_archive=False, ) ``` ###Assistant : When tz included in the string then default timezone should not be used. " 2654,"def async_dismiss_setup_message(hass, entry_id): persistent_notification.async_dismiss(hass, entry_id) ",Dismiss persistent notification and remove QR code.,7,5,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def async_dismiss_setup_message(hass, entry_id): persistent_notification.async_dismiss(hass, entry_id) ``` ###Assistant : Dismiss persistent notification and remove QR code. " 2655,"def get_containing_app_config(self, object_name): self.check_apps_ready() candidates = [] for app_config in self.app_configs.values(): if object_name.startswith(app_config.name): subpath = object_name[len(app_config.name) :] if subpath == """" or subpath[0] == ""."": candidates.append(app_config) if candidates: return sorted(candidates, key=lambda ac: -len(ac.name))[0] "," Look for an app config containing a given object. object_name is the dotted Python path to the object. Return the app config for the inner application in case of nesting. Return None if the object isn't in any registered app config. ",41,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_containing_app_config(self, object_name): self.check_apps_ready() candidates = [] for app_config in self.app_configs.values(): if object_name.startswith(app_config.name): subpath = object_name[len(app_config.name) :] if subpath == """" or subpath[0] == ""."": candidates.append(app_config) if candidates: return sorted(candidates, key=lambda ac: -len(ac.name))[0] ``` ###Assistant : Look for an app config containing a given object. object_name is the dotted Python path to the object. Return the app config for the inner application in case of nesting. Return None if the object isn't in any registered app config. " 2656,"def _track_variables(self, value): for val in tf.nest.flatten(value): if isinstance(val, tf.Variable): self._track_variable(val) elif tf_utils.is_extension_type(val): # Manually expand extension types to track resource variables. nested_vals = tf_utils.type_spec_from_value(val)._to_components( val ) self._track_variables(nested_vals) ",Tracks `Variable`s including `Variable`s in `CompositeTensor`s.,6,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _track_variables(self, value): for val in tf.nest.flatten(value): if isinstance(val, tf.Variable): self._track_variable(val) elif tf_utils.is_extension_type(val): # Manually expand extension types to track resource variables. nested_vals = tf_utils.type_spec_from_value(val)._to_components( val ) self._track_variables(nested_vals) ``` ###Assistant : Tracks `Variable`s including `Variable`s in `CompositeTensor`s. " 2657,"def _create_vhost_v2(self, node): addrs = set() for param in node.parameters: addr = obj.Addr.fromstring(param) if addr: addrs.add(addr) is_ssl = False # Exclusion to match the behavior in get_virtual_hosts_v2 sslengine = node.find_directives(""SSLEngine"", exclude=False) if sslengine: for directive in sslengine: if directive.parameters[0].lower() == ""on"": is_ssl = True break # ""SSLEngine on"" might be set outside of # Treat vhosts with port 443 as ssl vhosts for addr in addrs: if addr.get_port() == ""443"": is_ssl = True enabled = apache_util.included_in_paths(node.filepath, self.parsed_paths) macro = False # Check if the VirtualHost is contained in a mod_macro block if node.find_ancestors(""Macro""): macro = True vhost = obj.VirtualHost( node.filepath, None, addrs, is_ssl, enabled, modmacro=macro, node=node ) self._populate_vhost_names_v2(vhost) return vhost ","Used by get_virtual_hosts_v2 to create vhost objects using ParserNode interfaces. :param interfaces.BlockNode node: The BlockNode object of VirtualHost block :returns: newly created vhost :rtype: :class:`~certbot_apache.obj.VirtualHost` ",25,111,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_vhost_v2(self, node): addrs = set() for param in node.parameters: addr = obj.Addr.fromstring(param) if addr: addrs.add(addr) is_ssl = False # Exclusion to match the behavior in get_virtual_hosts_v2 sslengine = node.find_directives(""SSLEngine"", exclude=False) if sslengine: for directive in sslengine: if directive.parameters[0].lower() == ""on"": is_ssl = True break # ""SSLEngine on"" might be set outside of # Treat vhosts with port 443 as ssl vhosts for addr in addrs: if addr.get_port() == ""443"": is_ssl = True enabled = apache_util.included_in_paths(node.filepath, self.parsed_paths) macro = False # Check if the VirtualHost is contained in a mod_macro block if node.find_ancestors(""Macro""): macro = True vhost = obj.VirtualHost( node.filepath, None, addrs, is_ssl, enabled, modmacro=macro, node=node ) self._populate_vhost_names_v2(vhost) return vhost ``` ###Assistant : Used by get_virtual_hosts_v2 to create vhost objects using ParserNode interfaces. :param interfaces.BlockNode node: The BlockNode object of VirtualHost block :returns: newly created vhost :rtype: :class:`~certbot_apache.obj.VirtualHost` " 2658,"def insert_on(self, path, loc=None, replace=False): loc = loc or self.location if not loc: return nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) npath = [(p and _normalize_cached(p) or p) for p in path] for p, item in enumerate(npath): if item == nloc: if replace: break else: # don't modify path (even removing duplicates) if # found and not replace return elif item == bdir and self.precedence == EGG_DIST: # if it's an .egg, give it precedence over its directory # UNLESS it's already been added to sys.path and replace=False if (not replace) and nloc in npath[p:]: return if path is sys.path: self.check_version_conflict() path.insert(p, loc) npath.insert(p, nloc) break else: if path is sys.path: self.check_version_conflict() if replace: path.insert(0, loc) else: path.append(loc) return # p is the spot where we found or inserted loc; now remove duplicates while True: try: np = npath.index(nloc, p + 1) except ValueError: break else: del npath[np], path[np] # ha! p = np return ","Ensure self.location is on path If replace=False (default): - If location is already in path anywhere, do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent. - Else: add to the end of path. If replace=True: - If location is already on path anywhere (not eggs) or higher priority than its parent (eggs) do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent, removing any lower-priority entries. - Else: add it to the front of path. ",100,154,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def insert_on(self, path, loc=None, replace=False): loc = loc or self.location if not loc: return nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) npath = [(p and _normalize_cached(p) or p) for p in path] for p, item in enumerate(npath): if item == nloc: if replace: break else: # don't modify path (even removing duplicates) if # found and not replace return elif item == bdir and self.precedence == EGG_DIST: # if it's an .egg, give it precedence over its directory # UNLESS it's already been added to sys.path and replace=False if (not replace) and nloc in npath[p:]: return if path is sys.path: self.check_version_conflict() path.insert(p, loc) npath.insert(p, nloc) break else: if path is sys.path: self.check_version_conflict() if replace: path.insert(0, loc) else: path.append(loc) return # p is the spot where we found or inserted loc; now remove duplicates while True: try: np = npath.index(nloc, p + 1) except ValueError: break else: del npath[np], path[np] # ha! p = np return ``` ###Assistant : Ensure self.location is on path If replace=False (default): - If location is already in path anywhere, do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent. - Else: add to the end of path. If replace=True: - If location is already on path anywhere (not eggs) or higher priority than its parent (eggs) do nothing. - Else: - If it's an egg and its parent directory is on path, insert just ahead of the parent, removing any lower-priority entries. - Else: add it to the front of path. " 2659,"def serving_output(self, output): pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None cross_attns = ( tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions and self.config.add_cross_attention else None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=output.hidden_states, past_key_values=pkv, hidden_states=hs, attentions=attns, cross_attentions=cross_attns, ) @add_start_docstrings( , XGLM_START_DOCSTRING, )"," The XGLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). ",20,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def serving_output(self, output): pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None cross_attns = ( tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions and self.config.add_cross_attention else None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=output.hidden_states, past_key_values=pkv, hidden_states=hs, attentions=attns, cross_attentions=cross_attns, ) @add_start_docstrings( , XGLM_START_DOCSTRING, ) ``` ###Assistant : The XGLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). " 2660,"def _apply_func_to_list_of_partitions(cls, func, partitions, **kwargs): preprocessed_map_func = cls.preprocess_func(func) key_futures = RayWrapper.materialize( [ partition.apply(preprocessed_map_func, **kwargs) for partition in partitions ] ) gpu_managers = [partition.get_gpu_manager() for partition in partitions] return cls._create_partitions(key_futures, gpu_managers) "," Apply `func` to a list of remote partitions from `partitions`. Parameters ---------- func : callable The function to apply. partitions : np.ndarray NumPy array with partitions. **kwargs : dict Additional keywords arguments to be passed in `func`. Returns ------- np.ndarray A NumPy array of ``cuDFOnRayDataframePartition`` objects. Notes ----- This preprocesses the `func` first before applying it to the partitions. ",59,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _apply_func_to_list_of_partitions(cls, func, partitions, **kwargs): preprocessed_map_func = cls.preprocess_func(func) key_futures = RayWrapper.materialize( [ partition.apply(preprocessed_map_func, **kwargs) for partition in partitions ] ) gpu_managers = [partition.get_gpu_manager() for partition in partitions] return cls._create_partitions(key_futures, gpu_managers) ``` ###Assistant : Apply `func` to a list of remote partitions from `partitions`. Parameters ---------- func : callable The function to apply. partitions : np.ndarray NumPy array with partitions. **kwargs : dict Additional keywords arguments to be passed in `func`. Returns ------- np.ndarray A NumPy array of ``cuDFOnRayDataframePartition`` objects. Notes ----- This preprocesses the `func` first before applying it to the partitions. " 2661,"def _execute_impl(self, *args, **kwargs) -> ObjectRef: return self._deployment_function_handle.remote( *self._bound_args, **self._bound_kwargs ) ","Executor of DeploymentNode getting called each time on dag.execute. The execute implementation is recursive, that is, the method nodes will receive whatever this method returns. We return a handle here so method node can directly call upon. ",37,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _execute_impl(self, *args, **kwargs) -> ObjectRef: return self._deployment_function_handle.remote( *self._bound_args, **self._bound_kwargs ) ``` ###Assistant : Executor of DeploymentNode getting called each time on dag.execute. The execute implementation is recursive, that is, the method nodes will receive whatever this method returns. We return a handle here so method node can directly call upon. " 2662,"def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs): if data_format is None: data_format = backend.image_data_format() img = array_to_img(x, data_format=data_format, scale=scale) if img.mode == 'RGBA' and (file_format == 'jpg' or file_format == 'jpeg'): warnings.warn('The JPG format does not support ' 'RGBA images, converting to RGB.') img = img.convert('RGB') img.save(path, format=file_format, **kwargs) @keras_export('keras.utils.load_img', 'keras.preprocessing.image.load_img')","Saves an image stored as a Numpy array to a path or file object. Args: path: Path or file object. x: Numpy array. data_format: Image data format, either ""channels_first"" or ""channels_last"". file_format: Optional file format override. If omitted, the format to use is determined from the filename extension. If a file object was used instead of a filename, this parameter should always be used. scale: Whether to rescale image values to be within `[0, 255]`. **kwargs: Additional keyword arguments passed to `PIL.Image.save()`. ",82,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs): if data_format is None: data_format = backend.image_data_format() img = array_to_img(x, data_format=data_format, scale=scale) if img.mode == 'RGBA' and (file_format == 'jpg' or file_format == 'jpeg'): warnings.warn('The JPG format does not support ' 'RGBA images, converting to RGB.') img = img.convert('RGB') img.save(path, format=file_format, **kwargs) @keras_export('keras.utils.load_img', 'keras.preprocessing.image.load_img') ``` ###Assistant : Saves an image stored as a Numpy array to a path or file object. Args: path: Path or file object. x: Numpy array. data_format: Image data format, either ""channels_first"" or ""channels_last"". file_format: Optional file format override. If omitted, the format to use is determined from the filename extension. If a file object was used instead of a filename, this parameter should always be used. scale: Whether to rescale image values to be within `[0, 255]`. **kwargs: Additional keyword arguments passed to `PIL.Image.save()`. " 2663,"def log_commenting_changes(self, changes, revision): for comment in changes[""new_comments""]: comment.log_create(page_revision=revision, user=self.request.user) for comment in changes[""edited_comments""]: comment.log_edit(page_revision=revision, user=self.request.user) for comment in changes[""resolved_comments""]: comment.log_resolve(page_revision=revision, user=self.request.user) for comment in changes[""deleted_comments""]: comment.log_delete(page_revision=revision, user=self.request.user) for comment, replies in changes[""new_replies""]: for reply in replies: reply.log_create(page_revision=revision, user=self.request.user) for comment, replies in changes[""edited_replies""]: for reply in replies: reply.log_edit(page_revision=revision, user=self.request.user) for comment, replies in changes[""deleted_replies""]: for reply in replies: reply.log_delete(page_revision=revision, user=self.request.user) "," Generates log entries for any changes made to comments or replies. ",11,61,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def log_commenting_changes(self, changes, revision): for comment in changes[""new_comments""]: comment.log_create(page_revision=revision, user=self.request.user) for comment in changes[""edited_comments""]: comment.log_edit(page_revision=revision, user=self.request.user) for comment in changes[""resolved_comments""]: comment.log_resolve(page_revision=revision, user=self.request.user) for comment in changes[""deleted_comments""]: comment.log_delete(page_revision=revision, user=self.request.user) for comment, replies in changes[""new_replies""]: for reply in replies: reply.log_create(page_revision=revision, user=self.request.user) for comment, replies in changes[""edited_replies""]: for reply in replies: reply.log_edit(page_revision=revision, user=self.request.user) for comment, replies in changes[""deleted_replies""]: for reply in replies: reply.log_delete(page_revision=revision, user=self.request.user) ``` ###Assistant : Generates log entries for any changes made to comments or replies. " 2664,"def __new__(cls, stylename, **kwargs): # The ""class"" should have the _style_list attribute, which is a mapping # of style names to style classes. _list = stylename.replace("" "", """").split("","") _name = _list[0].lower() try: _cls = cls._style_list[_name] except KeyError as err: raise ValueError(f""Unknown style: {stylename}"") from err try: _args_pair = [cs.split(""="") for cs in _list[1:]] _args = {k: float(v) for k, v in _args_pair} except ValueError as err: raise ValueError(f""Incorrect style argument: {stylename}"") from err return _cls(**{**_args, **kwargs}) ",Return the instance of the subclass with the given style name.,11,76,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __new__(cls, stylename, **kwargs): # The ""class"" should have the _style_list attribute, which is a mapping # of style names to style classes. _list = stylename.replace("" "", """").split("","") _name = _list[0].lower() try: _cls = cls._style_list[_name] except KeyError as err: raise ValueError(f""Unknown style: {stylename}"") from err try: _args_pair = [cs.split(""="") for cs in _list[1:]] _args = {k: float(v) for k, v in _args_pair} except ValueError as err: raise ValueError(f""Incorrect style argument: {stylename}"") from err return _cls(**{**_args, **kwargs}) ``` ###Assistant : Return the instance of the subclass with the given style name. " 2665,"def losses(self): collected_losses = [] for layer in self._flatten_layers(): # If any eager losses are present, we assume the model to be part of # an eager training loop (either a custom one or the one used when # `run_eagerly=True`) and so we always return just the eager losses. if layer._eager_losses: # Filter placeholder losses that may have been added by revived # layers. (see base_layer_utils for details). if ( layer._eager_losses[0] is not base_layer_utils.REVIVED_LOSS_PLACEHOLDER ): collected_losses.extend(layer._eager_losses) else: collected_losses.extend(layer._losses) for regularizer in layer._callable_losses: loss_tensor = regularizer() if loss_tensor is not None: collected_losses.append(loss_tensor) return collected_losses ","List of losses added using the `add_loss()` API. Variable regularization tensors are created when this property is accessed, so it is eager safe: accessing `losses` under a `tf.GradientTape` will propagate gradients back to the corresponding variables. Examples: >>> class MyLayer(tf.keras.layers.Layer): ... def call(self, inputs): ... self.add_loss(tf.abs(tf.reduce_mean(inputs))) ... return inputs >>> l = MyLayer() >>> l(np.ones((10, 1))) >>> l.losses [1.0] >>> inputs = tf.keras.Input(shape=(10,)) >>> x = tf.keras.layers.Dense(10)(inputs) >>> outputs = tf.keras.layers.Dense(1)(x) >>> model = tf.keras.Model(inputs, outputs) >>> # Activity regularization. >>> len(model.losses) 0 >>> model.add_loss(tf.abs(tf.reduce_mean(x))) >>> len(model.losses) 1 >>> inputs = tf.keras.Input(shape=(10,)) >>> d = tf.keras.layers.Dense(10, kernel_initializer='ones') >>> x = d(inputs) >>> outputs = tf.keras.layers.Dense(1)(x) >>> model = tf.keras.Model(inputs, outputs) >>> # Weight regularization. >>> model.add_loss(lambda: tf.reduce_mean(d.kernel)) >>> model.losses [] Returns: A list of tensors. ",128,93,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def losses(self): collected_losses = [] for layer in self._flatten_layers(): # If any eager losses are present, we assume the model to be part of # an eager training loop (either a custom one or the one used when # `run_eagerly=True`) and so we always return just the eager losses. if layer._eager_losses: # Filter placeholder losses that may have been added by revived # layers. (see base_layer_utils for details). if ( layer._eager_losses[0] is not base_layer_utils.REVIVED_LOSS_PLACEHOLDER ): collected_losses.extend(layer._eager_losses) else: collected_losses.extend(layer._losses) for regularizer in layer._callable_losses: loss_tensor = regularizer() if loss_tensor is not None: collected_losses.append(loss_tensor) return collected_losses ``` ###Assistant : List of losses added using the `add_loss()` API. Variable regularization tensors are created when this property is accessed, so it is eager safe: accessing `losses` under a `tf.GradientTape` will propagate gradients back to the corresponding variables. Examples: >>> class MyLayer(tf.keras.layers.Layer): ... def call(self, inputs): ... self.add_loss(tf.abs(tf.reduce_mean(inputs))) ... return inputs >>> l = MyLayer() >>> l(np.ones((10, 1))) >>> l.losses [1.0] >>> inputs = tf.keras.Input(shape=(10,)) >>> x = tf.keras.layers.Dense(10)(inputs) >>> outputs = tf.keras.layers.Dense(1)(x) >>> model = tf.keras.Model(inputs, outputs) >>> # Activity regularization. >>> len(model.losses) 0 >>> model.add_loss(tf.abs(tf.reduce_mean(x))) >>> len(model.losses) 1 >>> inputs = tf.keras.Input(shape=(10,)) >>> d = tf.keras.layers.Dense(10, kernel_initializer='ones') >>> x = d(inputs) >>> outputs = tf.keras.layers.Dense(1)(x) >>> model = tf.keras.Model(inputs, outputs) >>> # Weight regularization. >>> model.add_loss(lambda: tf.reduce_mean(d.kernel)) >>> model.losses [] Returns: A list of tensors. " 2666,"def fit(self, X, y, Xy=None): self._validate_params() X, y = self._validate_data(X, y, y_numeric=True, multi_output=True) _normalize = _deprecate_normalize( self.normalize, default=True, estimator_name=self.__class__.__name__ ) alpha = getattr(self, ""alpha"", 0.0) if hasattr(self, ""n_nonzero_coefs""): alpha = 0.0 # n_nonzero_coefs parametrization takes priority max_iter = self.n_nonzero_coefs else: max_iter = self.max_iter if self.jitter is not None: rng = check_random_state(self.random_state) noise = rng.uniform(high=self.jitter, size=len(y)) y = y + noise self._fit( X, y, max_iter=max_iter, alpha=alpha, fit_path=self.fit_path, normalize=_normalize, Xy=Xy, ) return self ","Fit the model using X, y as training data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. Xy : array-like of shape (n_samples,) or (n_samples, n_targets), \ default=None Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. Returns ------- self : object Returns an instance of self. ",70,71,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fit(self, X, y, Xy=None): self._validate_params() X, y = self._validate_data(X, y, y_numeric=True, multi_output=True) _normalize = _deprecate_normalize( self.normalize, default=True, estimator_name=self.__class__.__name__ ) alpha = getattr(self, ""alpha"", 0.0) if hasattr(self, ""n_nonzero_coefs""): alpha = 0.0 # n_nonzero_coefs parametrization takes priority max_iter = self.n_nonzero_coefs else: max_iter = self.max_iter if self.jitter is not None: rng = check_random_state(self.random_state) noise = rng.uniform(high=self.jitter, size=len(y)) y = y + noise self._fit( X, y, max_iter=max_iter, alpha=alpha, fit_path=self.fit_path, normalize=_normalize, Xy=Xy, ) return self ``` ###Assistant : Fit the model using X, y as training data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. Xy : array-like of shape (n_samples,) or (n_samples, n_targets), \ default=None Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. Returns ------- self : object Returns an instance of self. " 2667,"def _decode_block_string(block_string): assert isinstance(block_string, str) ops = block_string.split('_') options = {} for op in ops: splits = re.split(r'(\d.*)', op) if len(splits) >= 2: key, value = splits[:2] options[key] = value # Check stride cond_1 = ('s' in options and len(options['s']) == 1) cond_2 = ((len(options['s']) == 2) and (options['s'][0] == options['s'][1])) assert (cond_1 or cond_2) return BlockArgs(kernel_size=int(options['k']), num_repeat=int(options['r']), input_filters=int(options['i']), output_filters=int(options['o']), expand_ratio=int(options['e']), id_skip=('noskip' not in block_string), se_ratio=float(options['se']) if 'se' in options else None, stride=[int(options['s'][0])]) ", Gets a block through a string notation of arguments. ,9,73,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _decode_block_string(block_string): assert isinstance(block_string, str) ops = block_string.split('_') options = {} for op in ops: splits = re.split(r'(\d.*)', op) if len(splits) >= 2: key, value = splits[:2] options[key] = value # Check stride cond_1 = ('s' in options and len(options['s']) == 1) cond_2 = ((len(options['s']) == 2) and (options['s'][0] == options['s'][1])) assert (cond_1 or cond_2) return BlockArgs(kernel_size=int(options['k']), num_repeat=int(options['r']), input_filters=int(options['i']), output_filters=int(options['o']), expand_ratio=int(options['e']), id_skip=('noskip' not in block_string), se_ratio=float(options['se']) if 'se' in options else None, stride=[int(options['s'][0])]) ``` ###Assistant : Gets a block through a string notation of arguments. " 2668,"def copy(a, order='K', subok=False): return array(a, order=order, subok=subok, copy=True) # Basic operations "," Return an array copy of the given object. Parameters ---------- a : array_like Input data. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. (Note that this function and :meth:`ndarray.copy` are very similar, but have different default values for their order= arguments.) subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array (defaults to False). .. versionadded:: 1.19.0 Returns ------- arr : ndarray Array interpretation of `a`. See Also -------- ndarray.copy : Preferred method for creating an array copy Notes ----- This is equivalent to: >>> np.array(a, copy=True) #doctest: +SKIP Examples -------- Create an array x, with a reference y and a copy z: >>> x = np.array([1, 2, 3]) >>> y = x >>> z = np.copy(x) Note that, when we modify x, y changes, but not z: >>> x[0] = 10 >>> x[0] == y[0] True >>> x[0] == z[0] False Note that, np.copy clears previously set WRITEABLE=False flag. >>> a = np.array([1, 2, 3]) >>> a.flags[""WRITEABLE""] = False >>> b = np.copy(a) >>> b.flags[""WRITEABLE""] True >>> b[0] = 3 >>> b array([3, 2, 3]) Note that np.copy is a shallow copy and will not copy object elements within arrays. This is mainly important for arrays containing Python objects. The new array will contain the same object which may lead to surprises if that object can be modified (is mutable): >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) >>> b = np.copy(a) >>> b[2][0] = 10 >>> a array([1, 'm', list([10, 3, 4])], dtype=object) To ensure all elements within an ``object`` array are copied, use `copy.deepcopy`: >>> import copy >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) >>> c = copy.deepcopy(a) >>> c[2][0] = 10 >>> c array([1, 'm', list([10, 3, 4])], dtype=object) >>> a array([1, 'm', list([2, 3, 4])], dtype=object) ",340,12,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def copy(a, order='K', subok=False): return array(a, order=order, subok=subok, copy=True) # Basic operations ``` ###Assistant : Return an array copy of the given object. Parameters ---------- a : array_like Input data. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. (Note that this function and :meth:`ndarray.copy` are very similar, but have different default values for their order= arguments.) subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array (defaults to False). .. versionadded:: 1.19.0 Returns ------- arr : ndarray Array interpretation of `a`. See Also -------- ndarray.copy : Preferred method for creating an array copy Notes ----- This is equivalent to: >>> np.array(a, copy=True) #doctest: +SKIP Examples -------- Create an array x, with a reference y and a copy z: >>> x = np.array([1, 2, 3]) >>> y = x >>> z = np.copy(x) Note that, when we modify x, y changes, but not z: >>> x[0] = 10 >>> x[0] == y[0] True >>> x[0] == z[0] False Note that, np.copy clears previously set WRITEABLE=False flag. >>> a = np.array([1, 2, 3]) >>> a.flags[""WRITEABLE""] = False >>> b = np.copy(a) >>> b.flags[""WRITEABLE""] True >>> b[0] = 3 >>> b array([3, 2, 3]) Note that np.copy is a shallow copy and will not copy object elements within arrays. This is mainly important for arrays containing Python objects. The new array will contain the same object which may lead to surprises if that object can be modified (is mutable): >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) >>> b = np.copy(a) >>> b[2][0] = 10 >>> a array([1, 'm', list([10, 3, 4])], dtype=object) To ensure all elements within an ``object`` array are copied, use `copy.deepcopy`: >>> import copy >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) >>> c = copy.deepcopy(a) >>> c[2][0] = 10 >>> c array([1, 'm', list([10, 3, 4])], dtype=object) >>> a array([1, 'm', list([2, 3, 4])], dtype=object) " 2669,"def test_context_placement_group(): driver_code = proc = run_string_as_driver_nonblocking(driver_code) "," import ray from ray.data.context import DatasetContext from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy from ray._private.test_utils import placement_group_assert_no_leak ray.init(num_cpus=1) context = DatasetContext.get_current() # This placement group will take up all cores of the local cluster. placement_group = ray.util.placement_group( name=""core_hog"", strategy=""SPREAD"", bundles=[ {""CPU"": 1}, ], ) ray.get(placement_group.ready()) context.scheduling_strategy = PlacementGroupSchedulingStrategy(placement_group) pipe = ray.data.range(100, parallelism=2) \ .window(blocks_per_window=1) \ .map(lambda x: x + 1) assert pipe.take_all() == list(range(1, 101)) placement_group_assert_no_leak([placement_group]) ray.shutdown() ",64,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_context_placement_group(): driver_code = proc = run_string_as_driver_nonblocking(driver_code) ``` ###Assistant : import ray from ray.data.context import DatasetContext from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy from ray._private.test_utils import placement_group_assert_no_leak ray.init(num_cpus=1) context = DatasetContext.get_current() # This placement group will take up all cores of the local cluster. placement_group = ray.util.placement_group( name=""core_hog"", strategy=""SPREAD"", bundles=[ {""CPU"": 1}, ], ) ray.get(placement_group.ready()) context.scheduling_strategy = PlacementGroupSchedulingStrategy(placement_group) pipe = ray.data.range(100, parallelism=2) \ .window(blocks_per_window=1) \ .map(lambda x: x + 1) assert pipe.take_all() == list(range(1, 101)) placement_group_assert_no_leak([placement_group]) ray.shutdown() " 2670,"def reorder_categories(self, new_categories, ordered=None): if set(self.dtype.categories) != set(new_categories): raise ValueError( ""items in new_categories are not the same as in old categories"" ) return self.set_categories(new_categories, ordered=ordered) "," Reorder categories as specified in new_categories. `new_categories` need to include all old categories and no new category items. Parameters ---------- new_categories : Index-like The categories in new order. ordered : bool, optional Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. Returns ------- cat : Categorical Categorical with reordered categories. Raises ------ ValueError If the new categories do not contain all old category items or any new ones See Also -------- rename_categories : Rename categories. add_categories : Add new categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. ",114,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def reorder_categories(self, new_categories, ordered=None): if set(self.dtype.categories) != set(new_categories): raise ValueError( ""items in new_categories are not the same as in old categories"" ) return self.set_categories(new_categories, ordered=ordered) ``` ###Assistant : Reorder categories as specified in new_categories. `new_categories` need to include all old categories and no new category items. Parameters ---------- new_categories : Index-like The categories in new order. ordered : bool, optional Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. Returns ------- cat : Categorical Categorical with reordered categories. Raises ------ ValueError If the new categories do not contain all old category items or any new ones See Also -------- rename_categories : Rename categories. add_categories : Add new categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. " 2671,"def test_get_member_list_no_permission_former_member_with_at_token(self): # create a room, invite the user and the user joins room_id = self.helper.create_room_as(""@alice:red"") self.helper.invite(room_id, ""@alice:red"", self.user_id) self.helper.join(room_id, self.user_id) # sync to get an at token channel = self.make_request(""GET"", ""/sync"") self.assertEqual(200, channel.code) sync_token = channel.json_body[""next_batch""] # check that the user can see the member list to start with channel = self.make_request( ""GET"", ""/rooms/%s/members?at=%s"" % (room_id, sync_token) ) self.assertEqual(200, channel.code, msg=channel.result[""body""]) # ban the user (Note: the user is actually allowed to see this event and # state so that they know they're banned!) self.helper.change_membership(room_id, ""@alice:red"", self.user_id, ""ban"") # invite a third user and let them join self.helper.invite(room_id, ""@alice:red"", ""@bob:red"") self.helper.join(room_id, ""@bob:red"") # now, with the original user, sync again to get a new at token channel = self.make_request(""GET"", ""/sync"") self.assertEqual(200, channel.code) sync_token = channel.json_body[""next_batch""] # check the user can no longer see the updated member list channel = self.make_request( ""GET"", ""/rooms/%s/members?at=%s"" % (room_id, sync_token) ) self.assertEqual(403, channel.code, msg=channel.result[""body""]) "," Tests that a former member of the room can not get the member list (in the case that they use an at token). ",23,150,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_member_list_no_permission_former_member_with_at_token(self): # create a room, invite the user and the user joins room_id = self.helper.create_room_as(""@alice:red"") self.helper.invite(room_id, ""@alice:red"", self.user_id) self.helper.join(room_id, self.user_id) # sync to get an at token channel = self.make_request(""GET"", ""/sync"") self.assertEqual(200, channel.code) sync_token = channel.json_body[""next_batch""] # check that the user can see the member list to start with channel = self.make_request( ""GET"", ""/rooms/%s/members?at=%s"" % (room_id, sync_token) ) self.assertEqual(200, channel.code, msg=channel.result[""body""]) # ban the user (Note: the user is actually allowed to see this event and # state so that they know they're banned!) self.helper.change_membership(room_id, ""@alice:red"", self.user_id, ""ban"") # invite a third user and let them join self.helper.invite(room_id, ""@alice:red"", ""@bob:red"") self.helper.join(room_id, ""@bob:red"") # now, with the original user, sync again to get a new at token channel = self.make_request(""GET"", ""/sync"") self.assertEqual(200, channel.code) sync_token = channel.json_body[""next_batch""] # check the user can no longer see the updated member list channel = self.make_request( ""GET"", ""/rooms/%s/members?at=%s"" % (room_id, sync_token) ) self.assertEqual(403, channel.code, msg=channel.result[""body""]) ``` ###Assistant : Tests that a former member of the room can not get the member list (in the case that they use an at token). " 2672,"def _keep_original_ws(s, tag_s): return ''.join( c if tag_c == "" "" and c.isspace() else tag_c for c, tag_c in zip(s, tag_s) ) ",Replace whitespace with the original whitespace characters in `s`,9,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _keep_original_ws(s, tag_s): return ''.join( c if tag_c == "" "" and c.isspace() else tag_c for c, tag_c in zip(s, tag_s) ) ``` ###Assistant : Replace whitespace with the original whitespace characters in `s` " 2673,"def unregister_cmap(name): cmap = _colormaps.get(name, None) _colormaps.unregister(name) return cmap "," Remove a colormap recognized by :func:`get_cmap`. You may not remove built-in colormaps. If the named colormap is not registered, returns with no error, raises if you try to de-register a default colormap. .. warning:: Colormap names are currently a shared namespace that may be used by multiple packages. Use `unregister_cmap` only if you know you have registered that name before. In particular, do not unregister just in case to clean the name before registering a new colormap. Parameters ---------- name : str The name of the colormap to be un-registered Returns ------- ColorMap or None If the colormap was registered, return it if not return `None` Raises ------ ValueError If you try to de-register a default built-in colormap. ",118,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def unregister_cmap(name): cmap = _colormaps.get(name, None) _colormaps.unregister(name) return cmap ``` ###Assistant : Remove a colormap recognized by :func:`get_cmap`. You may not remove built-in colormaps. If the named colormap is not registered, returns with no error, raises if you try to de-register a default colormap. .. warning:: Colormap names are currently a shared namespace that may be used by multiple packages. Use `unregister_cmap` only if you know you have registered that name before. In particular, do not unregister just in case to clean the name before registering a new colormap. Parameters ---------- name : str The name of the colormap to be un-registered Returns ------- ColorMap or None If the colormap was registered, return it if not return `None` Raises ------ ValueError If you try to de-register a default built-in colormap. " 2674,"def _get_extraction_protocol_with_magic_number(f) -> Optional[str]: magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH) f.seek(0) for i in range(MAGIC_NUMBER_MAX_LENGTH): compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: # TODO(QL): raise an error for .tar.gz files as in _get_extraction_protocol return compression compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: raise NotImplementedError(f""Compression protocol '{compression}' not implemented."") ",read the magic number from a file-like object and return the compression protocol,13,53,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_extraction_protocol_with_magic_number(f) -> Optional[str]: magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH) f.seek(0) for i in range(MAGIC_NUMBER_MAX_LENGTH): compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: # TODO(QL): raise an error for .tar.gz files as in _get_extraction_protocol return compression compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i]) if compression is not None: raise NotImplementedError(f""Compression protocol '{compression}' not implemented."") ``` ###Assistant : read the magic number from a file-like object and return the compression protocol " 2675,"def _get_veths(net_data): if isinstance(net_data, dict): net_data = list(net_data.items()) nics = salt.utils.odict.OrderedDict() current_nic = salt.utils.odict.OrderedDict() no_names = True for item in net_data: if item and isinstance(item, dict): item = list(item.items())[0] # skip LXC configuration comment lines, and play only with tuples conf elif isinstance(item, str): # deal with reflection of commented lxc configs sitem = item.strip() if sitem.startswith(""#"") or not sitem: continue elif ""="" in item: item = tuple(a.strip() for a in item.split(""="", 1)) if item[0] == ""lxc.network.type"": current_nic = salt.utils.odict.OrderedDict() if item[0] == ""lxc.network.name"": no_names = False nics[item[1].strip()] = current_nic current_nic[item[0].strip()] = item[1].strip() # if not ethernet card name has been collected, assuming we collected # data for eth0 if no_names and current_nic: nics[DEFAULT_NIC] = current_nic return nics "," Parse the nic setup inside lxc conf tuples back to a dictionary indexed by network interface ",16,118,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_veths(net_data): if isinstance(net_data, dict): net_data = list(net_data.items()) nics = salt.utils.odict.OrderedDict() current_nic = salt.utils.odict.OrderedDict() no_names = True for item in net_data: if item and isinstance(item, dict): item = list(item.items())[0] # skip LXC configuration comment lines, and play only with tuples conf elif isinstance(item, str): # deal with reflection of commented lxc configs sitem = item.strip() if sitem.startswith(""#"") or not sitem: continue elif ""="" in item: item = tuple(a.strip() for a in item.split(""="", 1)) if item[0] == ""lxc.network.type"": current_nic = salt.utils.odict.OrderedDict() if item[0] == ""lxc.network.name"": no_names = False nics[item[1].strip()] = current_nic current_nic[item[0].strip()] = item[1].strip() # if not ethernet card name has been collected, assuming we collected # data for eth0 if no_names and current_nic: nics[DEFAULT_NIC] = current_nic return nics ``` ###Assistant : Parse the nic setup inside lxc conf tuples back to a dictionary indexed by network interface " 2676,"def start_stdout_logging() -> None: if '_stdout_' in _handlers: return handler = StreamHandler(sys.stdout) handler.setFormatter(_StdoutFormatter()) _handlers['_stdout_'] = handler _root_logger.addHandler(handler) "," Register the stdout handler. This function should be invoked on importing nni. It is safe to call it multiple times. ",20,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def start_stdout_logging() -> None: if '_stdout_' in _handlers: return handler = StreamHandler(sys.stdout) handler.setFormatter(_StdoutFormatter()) _handlers['_stdout_'] = handler _root_logger.addHandler(handler) ``` ###Assistant : Register the stdout handler. This function should be invoked on importing nni. It is safe to call it multiple times. " 2677,"def get_staffing_plan_detail(designation, company, offer_date): detail = frappe.db.sql( , (designation, company, offer_date), as_dict=1, ) return frappe._dict(detail[0]) if (detail and detail[0].parent) else None @frappe.whitelist()"," SELECT DISTINCT spd.parent, sp.from_date as from_date, sp.to_date as to_date, sp.name, sum(spd.vacancies) as vacancies, spd.designation FROM `tabStaffing Plan Detail` spd, `tabStaffing Plan` sp WHERE sp.docstatus=1 AND spd.designation=%s AND sp.company=%s AND spd.parent = sp.name AND %s between sp.from_date and sp.to_date ",38,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_staffing_plan_detail(designation, company, offer_date): detail = frappe.db.sql( , (designation, company, offer_date), as_dict=1, ) return frappe._dict(detail[0]) if (detail and detail[0].parent) else None @frappe.whitelist() ``` ###Assistant : SELECT DISTINCT spd.parent, sp.from_date as from_date, sp.to_date as to_date, sp.name, sum(spd.vacancies) as vacancies, spd.designation FROM `tabStaffing Plan Detail` spd, `tabStaffing Plan` sp WHERE sp.docstatus=1 AND spd.designation=%s AND sp.company=%s AND spd.parent = sp.name AND %s between sp.from_date and sp.to_date " 2678,"def test_display_name(self) -> None: evaluator = self._get_evaluator({""body"": ""foo bar baz""}) condition = { ""kind"": ""contains_display_name"", } # Blank names are skipped. self.assertFalse(evaluator.matches(condition, ""@user:test"", """")) # Check a display name that doesn't match. self.assertFalse(evaluator.matches(condition, ""@user:test"", ""not found"")) # Check a display name which matches. self.assertTrue(evaluator.matches(condition, ""@user:test"", ""foo"")) # A display name that matches, but not a full word does not result in a match. self.assertFalse(evaluator.matches(condition, ""@user:test"", ""ba"")) # A display name should not be interpreted as a regular expression. self.assertFalse(evaluator.matches(condition, ""@user:test"", ""ba[rz]"")) # A display name with spaces should work fine. self.assertTrue(evaluator.matches(condition, ""@user:test"", ""foo bar"")) ",Check for a matching display name in the body of the event.,12,94,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_display_name(self) -> None: evaluator = self._get_evaluator({""body"": ""foo bar baz""}) condition = { ""kind"": ""contains_display_name"", } # Blank names are skipped. self.assertFalse(evaluator.matches(condition, ""@user:test"", """")) # Check a display name that doesn't match. self.assertFalse(evaluator.matches(condition, ""@user:test"", ""not found"")) # Check a display name which matches. self.assertTrue(evaluator.matches(condition, ""@user:test"", ""foo"")) # A display name that matches, but not a full word does not result in a match. self.assertFalse(evaluator.matches(condition, ""@user:test"", ""ba"")) # A display name should not be interpreted as a regular expression. self.assertFalse(evaluator.matches(condition, ""@user:test"", ""ba[rz]"")) # A display name with spaces should work fine. self.assertTrue(evaluator.matches(condition, ""@user:test"", ""foo bar"")) ``` ###Assistant : Check for a matching display name in the body of the event. " 2679,"def test_readlink_not_a_link(file, source): with pytest.raises(SaltInvocationError) as exc: file.readlink(path=source) assert ""A valid link was not specified"" in exc.value.message "," Test readlink where the path is not a link Should throw a SaltInvocationError ",13,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_readlink_not_a_link(file, source): with pytest.raises(SaltInvocationError) as exc: file.readlink(path=source) assert ""A valid link was not specified"" in exc.value.message ``` ###Assistant : Test readlink where the path is not a link Should throw a SaltInvocationError " 2680,"def get_periodic_data(entry, filters): periodic_data = {} for d in entry: period = get_period(d.posting_date, filters) bal_qty = 0 # if period against item does not exist yet, instantiate it # insert existing balance dict against period, and add/subtract to it if periodic_data.get(d.item_code) and not periodic_data.get(d.item_code).get(period): previous_balance = periodic_data[d.item_code][""balance""].copy() periodic_data[d.item_code][period] = previous_balance if d.voucher_type == ""Stock Reconciliation"": if periodic_data.get(d.item_code) and periodic_data.get(d.item_code).get(""balance"").get( d.warehouse ): bal_qty = periodic_data[d.item_code][""balance""][d.warehouse] qty_diff = d.qty_after_transaction - bal_qty else: qty_diff = d.actual_qty if filters[""value_quantity""] == ""Quantity"": value = qty_diff else: value = d.stock_value_difference # period-warehouse wise balance periodic_data.setdefault(d.item_code, {}).setdefault(""balance"", {}).setdefault(d.warehouse, 0.0) periodic_data.setdefault(d.item_code, {}).setdefault(period, {}).setdefault(d.warehouse, 0.0) periodic_data[d.item_code][""balance""][d.warehouse] += value periodic_data[d.item_code][period][d.warehouse] = periodic_data[d.item_code][""balance""][ d.warehouse ] return periodic_data ","Structured as: Item 1 - Balance (updated and carried forward): - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value - Jun 2021 (sum of warehouse quantities used in report) - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value - Jul 2021 (sum of warehouse quantities used in report) - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value Item 2 - Balance (updated and carried forward): - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value - Jun 2021 (sum of warehouse quantities used in report) - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value - Jul 2021 (sum of warehouse quantities used in report) - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value ",118,106,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_periodic_data(entry, filters): periodic_data = {} for d in entry: period = get_period(d.posting_date, filters) bal_qty = 0 # if period against item does not exist yet, instantiate it # insert existing balance dict against period, and add/subtract to it if periodic_data.get(d.item_code) and not periodic_data.get(d.item_code).get(period): previous_balance = periodic_data[d.item_code][""balance""].copy() periodic_data[d.item_code][period] = previous_balance if d.voucher_type == ""Stock Reconciliation"": if periodic_data.get(d.item_code) and periodic_data.get(d.item_code).get(""balance"").get( d.warehouse ): bal_qty = periodic_data[d.item_code][""balance""][d.warehouse] qty_diff = d.qty_after_transaction - bal_qty else: qty_diff = d.actual_qty if filters[""value_quantity""] == ""Quantity"": value = qty_diff else: value = d.stock_value_difference # period-warehouse wise balance periodic_data.setdefault(d.item_code, {}).setdefault(""balance"", {}).setdefault(d.warehouse, 0.0) periodic_data.setdefault(d.item_code, {}).setdefault(period, {}).setdefault(d.warehouse, 0.0) periodic_data[d.item_code][""balance""][d.warehouse] += value periodic_data[d.item_code][period][d.warehouse] = periodic_data[d.item_code][""balance""][ d.warehouse ] return periodic_data ``` ###Assistant : Structured as: Item 1 - Balance (updated and carried forward): - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value - Jun 2021 (sum of warehouse quantities used in report) - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value - Jul 2021 (sum of warehouse quantities used in report) - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value Item 2 - Balance (updated and carried forward): - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value - Jun 2021 (sum of warehouse quantities used in report) - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value - Jul 2021 (sum of warehouse quantities used in report) - Warehouse A : bal_qty/value - Warehouse B : bal_qty/value " 2681,"def shuffle(*arrays, random_state=None, n_samples=None): return resample( *arrays, replace=False, n_samples=n_samples, random_state=random_state ) ","Shuffle arrays or sparse matrices in a consistent way. This is a convenience alias to ``resample(*arrays, replace=False)`` to do random permutations of the collections. Parameters ---------- *arrays : sequence of indexable data-structures Indexable data-structures can be arrays, lists, dataframes or scipy sparse matrices with consistent first dimension. random_state : int, RandomState instance or None, default=None Determines random number generation for shuffling the data. Pass an int for reproducible results across multiple function calls. See :term:`Glossary `. n_samples : int, default=None Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. It should not be larger than the length of arrays. Returns ------- shuffled_arrays : sequence of indexable data-structures Sequence of shuffled copies of the collections. The original arrays are not impacted. See Also -------- resample : Resample arrays or sparse matrices in a consistent way. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> import numpy as np >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import shuffle >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0) >>> X array([[0., 0.], [2., 1.], [1., 0.]]) >>> X_sparse <3x2 sparse matrix of type '<... 'numpy.float64'>' with 3 stored elements in Compressed Sparse Row format> >>> X_sparse.toarray() array([[0., 0.], [2., 1.], [1., 0.]]) >>> y array([2, 1, 0]) >>> shuffle(y, n_samples=2, random_state=0) array([0, 1]) ",248,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def shuffle(*arrays, random_state=None, n_samples=None): return resample( *arrays, replace=False, n_samples=n_samples, random_state=random_state ) ``` ###Assistant : Shuffle arrays or sparse matrices in a consistent way. This is a convenience alias to ``resample(*arrays, replace=False)`` to do random permutations of the collections. Parameters ---------- *arrays : sequence of indexable data-structures Indexable data-structures can be arrays, lists, dataframes or scipy sparse matrices with consistent first dimension. random_state : int, RandomState instance or None, default=None Determines random number generation for shuffling the data. Pass an int for reproducible results across multiple function calls. See :term:`Glossary `. n_samples : int, default=None Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. It should not be larger than the length of arrays. Returns ------- shuffled_arrays : sequence of indexable data-structures Sequence of shuffled copies of the collections. The original arrays are not impacted. See Also -------- resample : Resample arrays or sparse matrices in a consistent way. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> import numpy as np >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import shuffle >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0) >>> X array([[0., 0.], [2., 1.], [1., 0.]]) >>> X_sparse <3x2 sparse matrix of type '<... 'numpy.float64'>' with 3 stored elements in Compressed Sparse Row format> >>> X_sparse.toarray() array([[0., 0.], [2., 1.], [1., 0.]]) >>> y array([2, 1, 0]) >>> shuffle(y, n_samples=2, random_state=0) array([0, 1]) " 2682,"def check_changes(self, args, results): # type: (SanityConfig, Results) -> None integration_targets = list(walk_integration_targets()) module_targets = list(walk_module_targets()) integration_targets_by_name = dict((target.name, target) for target in integration_targets) module_names_by_path = dict((target.path, target.module) for target in module_targets) disabled_targets = [] unstable_targets = [] unsupported_targets = [] for command in [command for command in args.metadata.change_description.focused_command_targets if 'integration' in command]: for target in args.metadata.change_description.focused_command_targets[command]: if self.DISABLED in integration_targets_by_name[target].aliases: disabled_targets.append(target) elif self.UNSTABLE in integration_targets_by_name[target].aliases: unstable_targets.append(target) elif self.UNSUPPORTED in integration_targets_by_name[target].aliases: unsupported_targets.append(target) untested_modules = [] for path in args.metadata.change_description.no_integration_paths: module = module_names_by_path.get(path) if module: untested_modules.append(module) comments = [ self.format_comment(self.TEMPLATE_DISABLED, disabled_targets), self.format_comment(self.TEMPLATE_UNSTABLE, unstable_targets), self.format_comment(self.TEMPLATE_UNSUPPORTED, unsupported_targets), self.format_comment(self.TEMPLATE_UNTESTED, untested_modules), ] comments = [comment for comment in comments if comment] labels = dict( needs_tests=bool(untested_modules), disabled_tests=bool(disabled_targets), unstable_tests=bool(unstable_targets), unsupported_tests=bool(unsupported_targets), ) results.comments += comments results.labels.update(labels) ",Check changes and store results in the provided result dictionary.,10,118,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_changes(self, args, results): # type: (SanityConfig, Results) -> None integration_targets = list(walk_integration_targets()) module_targets = list(walk_module_targets()) integration_targets_by_name = dict((target.name, target) for target in integration_targets) module_names_by_path = dict((target.path, target.module) for target in module_targets) disabled_targets = [] unstable_targets = [] unsupported_targets = [] for command in [command for command in args.metadata.change_description.focused_command_targets if 'integration' in command]: for target in args.metadata.change_description.focused_command_targets[command]: if self.DISABLED in integration_targets_by_name[target].aliases: disabled_targets.append(target) elif self.UNSTABLE in integration_targets_by_name[target].aliases: unstable_targets.append(target) elif self.UNSUPPORTED in integration_targets_by_name[target].aliases: unsupported_targets.append(target) untested_modules = [] for path in args.metadata.change_description.no_integration_paths: module = module_names_by_path.get(path) if module: untested_modules.append(module) comments = [ self.format_comment(self.TEMPLATE_DISABLED, disabled_targets), self.format_comment(self.TEMPLATE_UNSTABLE, unstable_targets), self.format_comment(self.TEMPLATE_UNSUPPORTED, unsupported_targets), self.format_comment(self.TEMPLATE_UNTESTED, untested_modules), ] comments = [comment for comment in comments if comment] labels = dict( needs_tests=bool(untested_modules), disabled_tests=bool(disabled_targets), unstable_tests=bool(unstable_targets), unsupported_tests=bool(unsupported_targets), ) results.comments += comments results.labels.update(labels) ``` ###Assistant : Check changes and store results in the provided result dictionary. " 2683,"def test_overlap_first(business_client, setup_before_upload, show_overlap_first): c = business_client config = dict( title='test_overlap_first', is_published=True, maximum_annotations=1, show_overlap_first=show_overlap_first, sampling=""Uniform sampling"", label_config= ) project = make_project(config, business_client.user) annotation_result = json.dumps([{ 'from_name': 'text_class', 'to_name': 'text', 'type': 'choices', 'value': {'choices': ['class_A']} }]) num_tasks = 1000 overlap_cohort_percentage = 1 # set up tasks overlap setup_after_upload = True if setup_before_upload: r = c.patch( f'/api/projects/{project.id}/', data=json.dumps({'maximum_annotations': 2, 'overlap_cohort_percentage': overlap_cohort_percentage}), content_type='application/json' ) assert r.status_code == 200 setup_after_upload = False # create tasks tasks = [] for i in range(num_tasks): tasks.append({'data': {'text': f'this is {str(i)}'}}) r = business_client.post( f'/api/projects/{project.id}/tasks/bulk/', data=json.dumps(tasks), content_type='application/json') assert r.status_code == 201 if setup_after_upload: r = c.patch( f'/api/projects/{project.id}/', data=json.dumps({'maximum_annotations': 2, 'overlap_cohort_percentage': overlap_cohort_percentage}), content_type='application/json' ) assert r.status_code == 200 expected_tasks_with_overlap = int(overlap_cohort_percentage / 100. * num_tasks) assert Task.objects.filter(Q(project_id=project.id) & Q(overlap__gt=1)).count() == expected_tasks_with_overlap "," ",13,122,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_overlap_first(business_client, setup_before_upload, show_overlap_first): c = business_client config = dict( title='test_overlap_first', is_published=True, maximum_annotations=1, show_overlap_first=show_overlap_first, sampling=""Uniform sampling"", label_config= ) project = make_project(config, business_client.user) annotation_result = json.dumps([{ 'from_name': 'text_class', 'to_name': 'text', 'type': 'choices', 'value': {'choices': ['class_A']} }]) num_tasks = 1000 overlap_cohort_percentage = 1 # set up tasks overlap setup_after_upload = True if setup_before_upload: r = c.patch( f'/api/projects/{project.id}/', data=json.dumps({'maximum_annotations': 2, 'overlap_cohort_percentage': overlap_cohort_percentage}), content_type='application/json' ) assert r.status_code == 200 setup_after_upload = False # create tasks tasks = [] for i in range(num_tasks): tasks.append({'data': {'text': f'this is {str(i)}'}}) r = business_client.post( f'/api/projects/{project.id}/tasks/bulk/', data=json.dumps(tasks), content_type='application/json') assert r.status_code == 201 if setup_after_upload: r = c.patch( f'/api/projects/{project.id}/', data=json.dumps({'maximum_annotations': 2, 'overlap_cohort_percentage': overlap_cohort_percentage}), content_type='application/json' ) assert r.status_code == 200 expected_tasks_with_overlap = int(overlap_cohort_percentage / 100. * num_tasks) assert Task.objects.filter(Q(project_id=project.id) & Q(overlap__gt=1)).count() == expected_tasks_with_overlap ``` ###Assistant : " 2684,"def svd_flip(u, v, u_based_decision=True): if u_based_decision: # columns of u, rows of v max_abs_cols = np.argmax(np.abs(u), axis=0) signs = np.sign(u[max_abs_cols, range(u.shape[1])]) u *= signs v *= signs[:, np.newaxis] else: # rows of v, columns of u max_abs_rows = np.argmax(np.abs(v), axis=1) signs = np.sign(v[range(v.shape[0]), max_abs_rows]) u *= signs v *= signs[:, np.newaxis] return u, v ","Sign correction to ensure deterministic output from SVD. Adjusts the columns of u and the rows of v such that the loadings in the columns in u that are largest in absolute value are always positive. Parameters ---------- u : ndarray Parameters u and v are the output of `linalg.svd` or :func:`~sklearn.utils.extmath.randomized_svd`, with matching inner dimensions so one can compute `np.dot(u * s, v)`. v : ndarray Parameters u and v are the output of `linalg.svd` or :func:`~sklearn.utils.extmath.randomized_svd`, with matching inner dimensions so one can compute `np.dot(u * s, v)`. The input v should really be called vt to be consistent with scipy's output. u_based_decision : bool, default=True If True, use the columns of u as the basis for sign flipping. Otherwise, use the rows of v. The choice of which variable to base the decision on is generally algorithm dependent. Returns ------- u_adjusted : ndarray Array u with adjusted columns and the same dimensions as u. v_adjusted : ndarray Array v with adjusted rows and the same dimensions as v. ",171,54,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def svd_flip(u, v, u_based_decision=True): if u_based_decision: # columns of u, rows of v max_abs_cols = np.argmax(np.abs(u), axis=0) signs = np.sign(u[max_abs_cols, range(u.shape[1])]) u *= signs v *= signs[:, np.newaxis] else: # rows of v, columns of u max_abs_rows = np.argmax(np.abs(v), axis=1) signs = np.sign(v[range(v.shape[0]), max_abs_rows]) u *= signs v *= signs[:, np.newaxis] return u, v ``` ###Assistant : Sign correction to ensure deterministic output from SVD. Adjusts the columns of u and the rows of v such that the loadings in the columns in u that are largest in absolute value are always positive. Parameters ---------- u : ndarray Parameters u and v are the output of `linalg.svd` or :func:`~sklearn.utils.extmath.randomized_svd`, with matching inner dimensions so one can compute `np.dot(u * s, v)`. v : ndarray Parameters u and v are the output of `linalg.svd` or :func:`~sklearn.utils.extmath.randomized_svd`, with matching inner dimensions so one can compute `np.dot(u * s, v)`. The input v should really be called vt to be consistent with scipy's output. u_based_decision : bool, default=True If True, use the columns of u as the basis for sign flipping. Otherwise, use the rows of v. The choice of which variable to base the decision on is generally algorithm dependent. Returns ------- u_adjusted : ndarray Array u with adjusted columns and the same dimensions as u. v_adjusted : ndarray Array v with adjusted rows and the same dimensions as v. " 2685,"def extra_action_out_fn(self) -> Dict[str, TensorType]: extra_fetches = {} # Action-logp and action-prob. if self._sampled_action_logp is not None: extra_fetches[SampleBatch.ACTION_PROB] = self._sampled_action_prob extra_fetches[SampleBatch.ACTION_LOGP] = self._sampled_action_logp # Action-dist inputs. if self._dist_inputs is not None: extra_fetches[SampleBatch.ACTION_DIST_INPUTS] = self._dist_inputs return extra_fetches ","Extra values to fetch and return from compute_actions(). By default we return action probability/log-likelihood info and action distribution inputs (if present). Returns: Dict[str, TensorType]: An extra fetch-dict to be passed to and returned from the compute_actions() call. ",37,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def extra_action_out_fn(self) -> Dict[str, TensorType]: extra_fetches = {} # Action-logp and action-prob. if self._sampled_action_logp is not None: extra_fetches[SampleBatch.ACTION_PROB] = self._sampled_action_prob extra_fetches[SampleBatch.ACTION_LOGP] = self._sampled_action_logp # Action-dist inputs. if self._dist_inputs is not None: extra_fetches[SampleBatch.ACTION_DIST_INPUTS] = self._dist_inputs return extra_fetches ``` ###Assistant : Extra values to fetch and return from compute_actions(). By default we return action probability/log-likelihood info and action distribution inputs (if present). Returns: Dict[str, TensorType]: An extra fetch-dict to be passed to and returned from the compute_actions() call. " 2686,"async def cleanup_finished_triggers(self): for trigger_id, details in list(self.triggers.items()): if details[""task""].done(): # Check to see if it exited for good reasons saved_exc = None try: result = details[""task""].result() except (asyncio.CancelledError, SystemExit, KeyboardInterrupt): # These are ""expected"" exceptions and we stop processing here # If we don't, then the system requesting a trigger be removed - # which turns into CancelledError - results in a failure. del self.triggers[trigger_id] continue except BaseException as e: # This is potentially bad, so log it. self.log.exception(""Trigger %s exited with error %s"", details[""name""], e) saved_exc = e else: # See if they foolishly returned a TriggerEvent if isinstance(result, TriggerEvent): self.log.error( ""Trigger %s returned a TriggerEvent rather than yielding it"", details[""name""] ) # See if this exited without sending an event, in which case # any task instances depending on it need to be failed if details[""events""] == 0: self.log.error( ""Trigger %s exited without sending an event. Dependent tasks will be failed."", details[""name""], ) self.failed_triggers.append((trigger_id, saved_exc)) del self.triggers[trigger_id] await asyncio.sleep(0) "," Go through all trigger tasks (coroutines) and clean up entries for ones that have exited, optionally warning users if the exit was not normal. ",24,162,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def cleanup_finished_triggers(self): for trigger_id, details in list(self.triggers.items()): if details[""task""].done(): # Check to see if it exited for good reasons saved_exc = None try: result = details[""task""].result() except (asyncio.CancelledError, SystemExit, KeyboardInterrupt): # These are ""expected"" exceptions and we stop processing here # If we don't, then the system requesting a trigger be removed - # which turns into CancelledError - results in a failure. del self.triggers[trigger_id] continue except BaseException as e: # This is potentially bad, so log it. self.log.exception(""Trigger %s exited with error %s"", details[""name""], e) saved_exc = e else: # See if they foolishly returned a TriggerEvent if isinstance(result, TriggerEvent): self.log.error( ""Trigger %s returned a TriggerEvent rather than yielding it"", details[""name""] ) # See if this exited without sending an event, in which case # any task instances depending on it need to be failed if details[""events""] == 0: self.log.error( ""Trigger %s exited without sending an event. Dependent tasks will be failed."", details[""name""], ) self.failed_triggers.append((trigger_id, saved_exc)) del self.triggers[trigger_id] await asyncio.sleep(0) ``` ###Assistant : Go through all trigger tasks (coroutines) and clean up entries for ones that have exited, optionally warning users if the exit was not normal. " 2687,"def get_global_travel_time(self): gtt = 0 for entity in self.tripinfo: gtt += self.get_duration(entity, default=0.0) for entity in self.personinfo: gtt += self.get_duration(entity, default=0.0) return gtt ########################################################################### # ROUTING "," Returns the global travel time computed from SUMO tripinfo data. The functions process_tripinfo_file() needs to be called in advance to initialize the data structures required. ",25,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_global_travel_time(self): gtt = 0 for entity in self.tripinfo: gtt += self.get_duration(entity, default=0.0) for entity in self.personinfo: gtt += self.get_duration(entity, default=0.0) return gtt ########################################################################### # ROUTING ``` ###Assistant : Returns the global travel time computed from SUMO tripinfo data. The functions process_tripinfo_file() needs to be called in advance to initialize the data structures required. " 2688,"def post(self, url, data=None, json=None, **kwargs): r return self.request(""POST"", url, data=data, json=json, **kwargs) ","Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response ",55,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def post(self, url, data=None, json=None, **kwargs): r return self.request(""POST"", url, data=data, json=json, **kwargs) ``` ###Assistant : Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response " 2689,"def __getitem__(self, idx): # type: (int) -> HPackHdrEntry assert idx >= 0 if idx > type(self)._static_entries_last_idx: idx -= type(self)._static_entries_last_idx + 1 if idx >= len(self._dynamic_table): raise KeyError( 'EINVAL: idx: out-of-bound read: {}; maximum index: {}'.format(idx, len(self._dynamic_table)) # noqa: E501 ) return self._dynamic_table[idx] return type(self)._static_entries[idx] ","Gets an element from the header tables (static or dynamic indifferently) :param int idx: the index number of the entry to retrieve. If the index value is superior to the last index of the static entry table, then the dynamic entry type is requested, following the procedure described in RFC 7541 par2.3.3 :return: HPackHdrEntry: the entry defined at this requested index. If the entry does not exist, KeyError is # noqa: E501 raised :raises: KeyError, AssertionError ",76,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __getitem__(self, idx): # type: (int) -> HPackHdrEntry assert idx >= 0 if idx > type(self)._static_entries_last_idx: idx -= type(self)._static_entries_last_idx + 1 if idx >= len(self._dynamic_table): raise KeyError( 'EINVAL: idx: out-of-bound read: {}; maximum index: {}'.format(idx, len(self._dynamic_table)) # noqa: E501 ) return self._dynamic_table[idx] return type(self)._static_entries[idx] ``` ###Assistant : Gets an element from the header tables (static or dynamic indifferently) :param int idx: the index number of the entry to retrieve. If the index value is superior to the last index of the static entry table, then the dynamic entry type is requested, following the procedure described in RFC 7541 par2.3.3 :return: HPackHdrEntry: the entry defined at this requested index. If the entry does not exist, KeyError is # noqa: E501 raised :raises: KeyError, AssertionError " 2690,"def test_pagination(self): parent = Parent.objects.create(name=""anything"") for i in range(1, 31): Child.objects.create(name=""name %s"" % i, parent=parent) Child.objects.create(name=""filtered %s"" % i, parent=parent) request = self.factory.get(""/child/"") request.user = self.superuser # Test default queryset m = ChildAdmin(Child, custom_site) cl = m.get_changelist_instance(request) self.assertEqual(cl.queryset.count(), 60) self.assertEqual(cl.paginator.count, 60) self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6]) # Test custom queryset m = FilteredChildAdmin(Child, custom_site) cl = m.get_changelist_instance(request) self.assertEqual(cl.queryset.count(), 30) self.assertEqual(cl.paginator.count, 30) self.assertEqual(list(cl.paginator.page_range), [1, 2, 3]) "," Regression tests for #12893: Pagination in admins changelist doesn't use queryset set by modeladmin. ",14,67,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_pagination(self): parent = Parent.objects.create(name=""anything"") for i in range(1, 31): Child.objects.create(name=""name %s"" % i, parent=parent) Child.objects.create(name=""filtered %s"" % i, parent=parent) request = self.factory.get(""/child/"") request.user = self.superuser # Test default queryset m = ChildAdmin(Child, custom_site) cl = m.get_changelist_instance(request) self.assertEqual(cl.queryset.count(), 60) self.assertEqual(cl.paginator.count, 60) self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6]) # Test custom queryset m = FilteredChildAdmin(Child, custom_site) cl = m.get_changelist_instance(request) self.assertEqual(cl.queryset.count(), 30) self.assertEqual(cl.paginator.count, 30) self.assertEqual(list(cl.paginator.page_range), [1, 2, 3]) ``` ###Assistant : Regression tests for #12893: Pagination in admins changelist doesn't use queryset set by modeladmin. " 2691,"def transform(self, X): if self.solver == ""lsqr"": raise NotImplementedError( ""transform not implemented for 'lsqr' solver (use 'svd' or 'eigen')."" ) check_is_fitted(self) X = self._validate_data(X, reset=False) if self.solver == ""svd"": X_new = np.dot(X - self.xbar_, self.scalings_) elif self.solver == ""eigen"": X_new = np.dot(X, self.scalings_) return X_new[:, : self._max_components] ","Project data to maximize class separation. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Returns ------- X_new : ndarray of shape (n_samples, n_components) or \ (n_samples, min(rank, n_components)) Transformed data. In the case of the 'svd' solver, the shape is (n_samples, min(rank, n_components)). ",46,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def transform(self, X): if self.solver == ""lsqr"": raise NotImplementedError( ""transform not implemented for 'lsqr' solver (use 'svd' or 'eigen')."" ) check_is_fitted(self) X = self._validate_data(X, reset=False) if self.solver == ""svd"": X_new = np.dot(X - self.xbar_, self.scalings_) elif self.solver == ""eigen"": X_new = np.dot(X, self.scalings_) return X_new[:, : self._max_components] ``` ###Assistant : Project data to maximize class separation. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Returns ------- X_new : ndarray of shape (n_samples, n_components) or \ (n_samples, min(rank, n_components)) Transformed data. In the case of the 'svd' solver, the shape is (n_samples, min(rank, n_components)). " 2692,"async def test_heater_cooler_hvac_mode_vs_hvac_action(hass, utcnow): helper = await setup_test_component(hass, create_heater_cooler_service) # Simulate that current temperature is above target temp # Heating might be on, but hvac_action currently 'off' await helper.async_update( ServicesTypes.HEATER_COOLER, { CharacteristicsTypes.TEMPERATURE_CURRENT: 22, CharacteristicsTypes.TEMPERATURE_HEATING_THRESHOLD: 21, CharacteristicsTypes.CURRENT_HEATER_COOLER_STATE: CurrentHeaterCoolerStateValues.IDLE, CharacteristicsTypes.TARGET_HEATER_COOLER_STATE: TargetHeaterCoolerStateValues.HEAT, CharacteristicsTypes.SWING_MODE: SwingModeValues.DISABLED, }, ) state = await helper.poll_and_get_state() assert state.state == ""heat"" assert state.attributes[""hvac_action""] == ""idle"" # Simulate that current temperature is below target temp # Heating might be on and hvac_action currently 'heat' await helper.async_update( ServicesTypes.HEATER_COOLER, { CharacteristicsTypes.TEMPERATURE_CURRENT: 19, CharacteristicsTypes.TEMPERATURE_HEATING_THRESHOLD: 21, CharacteristicsTypes.CURRENT_HEATER_COOLER_STATE: CurrentHeaterCoolerStateValues.HEATING, CharacteristicsTypes.TARGET_HEATER_COOLER_STATE: TargetHeaterCoolerStateValues.HEAT, CharacteristicsTypes.SWING_MODE: SwingModeValues.DISABLED, }, ) state = await helper.poll_and_get_state() assert state.state == ""heat"" assert state.attributes[""hvac_action""] == ""heating"" ",Check that we haven't conflated hvac_mode and hvac_action.,8,101,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_heater_cooler_hvac_mode_vs_hvac_action(hass, utcnow): helper = await setup_test_component(hass, create_heater_cooler_service) # Simulate that current temperature is above target temp # Heating might be on, but hvac_action currently 'off' await helper.async_update( ServicesTypes.HEATER_COOLER, { CharacteristicsTypes.TEMPERATURE_CURRENT: 22, CharacteristicsTypes.TEMPERATURE_HEATING_THRESHOLD: 21, CharacteristicsTypes.CURRENT_HEATER_COOLER_STATE: CurrentHeaterCoolerStateValues.IDLE, CharacteristicsTypes.TARGET_HEATER_COOLER_STATE: TargetHeaterCoolerStateValues.HEAT, CharacteristicsTypes.SWING_MODE: SwingModeValues.DISABLED, }, ) state = await helper.poll_and_get_state() assert state.state == ""heat"" assert state.attributes[""hvac_action""] == ""idle"" # Simulate that current temperature is below target temp # Heating might be on and hvac_action currently 'heat' await helper.async_update( ServicesTypes.HEATER_COOLER, { CharacteristicsTypes.TEMPERATURE_CURRENT: 19, CharacteristicsTypes.TEMPERATURE_HEATING_THRESHOLD: 21, CharacteristicsTypes.CURRENT_HEATER_COOLER_STATE: CurrentHeaterCoolerStateValues.HEATING, CharacteristicsTypes.TARGET_HEATER_COOLER_STATE: TargetHeaterCoolerStateValues.HEAT, CharacteristicsTypes.SWING_MODE: SwingModeValues.DISABLED, }, ) state = await helper.poll_and_get_state() assert state.state == ""heat"" assert state.attributes[""hvac_action""] == ""heating"" ``` ###Assistant : Check that we haven't conflated hvac_mode and hvac_action. " 2693,"def not_in_timeout(cls, last_triggered, timeout): return ( last_triggered is None or timeout is None or (time.time() - last_triggered > timeout) ) ",Checks if current error lies not in timeout after last trigger (potential reset of connection).,15,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def not_in_timeout(cls, last_triggered, timeout): return ( last_triggered is None or timeout is None or (time.time() - last_triggered > timeout) ) ``` ###Assistant : Checks if current error lies not in timeout after last trigger (potential reset of connection). " 2694,"def get_rescored_finished(self, n_best=None): # if we never actually finished, force one if not self.finished: self.outputs[-1][0] = self.eos self.finished.append( _HypothesisTail( timestep=len(self.outputs) - 1, hypid=0, score=self.all_scores[-1][0], tokenid=self.outputs[-1][0], token_score=self.token_scores[0, -1] if self.token_scores is not None else None, token_rank=self.token_ranks[0, -1] if self.token_ranks is not None else None, ) ) rescored_finished = [] for finished_item in self.finished: current_length = finished_item.timestep + 1 # these weights are from Google NMT paper length_penalty = math.pow((1 + current_length) / 6, self.length_penalty) rescored_finished.append( _HypothesisTail( timestep=finished_item.timestep, hypid=finished_item.hypid, score=finished_item.score / length_penalty, tokenid=finished_item.tokenid, token_score=finished_item.token_score, token_rank=finished_item.token_rank, ) ) # Note: beam size is almost always pretty small, so sorting is cheap enough srted = sorted(rescored_finished, key=attrgetter('score'), reverse=True) if n_best is not None: srted = srted[:n_best] n_best_list = [] for hyp in srted: hyp_data = self._get_hyp_from_finished(hyp) token_ids = self._get_pretty_hypothesis(hyp_data) token_metadata = ( self._get_pretty_token_metadata(hyp_data) if self.verbose else None ) n_best_list.append((token_ids, hyp.score, token_metadata)) # check that there is at least one finished candidate # and assert that each of them contains only one EOS assert ( len(n_best_list) >= 1 ), f'TreeSearch returned {len(n_best_list)} candidates, must be >= 1' for (pred, score, _) in n_best_list: assert (pred == self.eos).sum() == 1, ( f'TreeSearch returned a finalized hypo with multiple end tokens ' f'with score {score.item():.2f}' ) return n_best_list "," Return finished hypotheses according to adjusted scores. Score adjustment is done according to the Google NMT paper, which penalizes long utterances. :param n_best: number of finalized hypotheses to return :return: list of (tokens, score, token_metadata) 3-tuples, in sorted order, where: - tokens is a tensor of token ids - score is the adjusted log probability of the entire utterance - token_metadata dictionary: token_logprobs -> a tensor of conditional log probabilities of tokens token_ranks -> a tensor of ranks of tokens in vocabulator, by probability, when sampled ",86,201,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_rescored_finished(self, n_best=None): # if we never actually finished, force one if not self.finished: self.outputs[-1][0] = self.eos self.finished.append( _HypothesisTail( timestep=len(self.outputs) - 1, hypid=0, score=self.all_scores[-1][0], tokenid=self.outputs[-1][0], token_score=self.token_scores[0, -1] if self.token_scores is not None else None, token_rank=self.token_ranks[0, -1] if self.token_ranks is not None else None, ) ) rescored_finished = [] for finished_item in self.finished: current_length = finished_item.timestep + 1 # these weights are from Google NMT paper length_penalty = math.pow((1 + current_length) / 6, self.length_penalty) rescored_finished.append( _HypothesisTail( timestep=finished_item.timestep, hypid=finished_item.hypid, score=finished_item.score / length_penalty, tokenid=finished_item.tokenid, token_score=finished_item.token_score, token_rank=finished_item.token_rank, ) ) # Note: beam size is almost always pretty small, so sorting is cheap enough srted = sorted(rescored_finished, key=attrgetter('score'), reverse=True) if n_best is not None: srted = srted[:n_best] n_best_list = [] for hyp in srted: hyp_data = self._get_hyp_from_finished(hyp) token_ids = self._get_pretty_hypothesis(hyp_data) token_metadata = ( self._get_pretty_token_metadata(hyp_data) if self.verbose else None ) n_best_list.append((token_ids, hyp.score, token_metadata)) # check that there is at least one finished candidate # and assert that each of them contains only one EOS assert ( len(n_best_list) >= 1 ), f'TreeSearch returned {len(n_best_list)} candidates, must be >= 1' for (pred, score, _) in n_best_list: assert (pred == self.eos).sum() == 1, ( f'TreeSearch returned a finalized hypo with multiple end tokens ' f'with score {score.item():.2f}' ) return n_best_list ``` ###Assistant : Return finished hypotheses according to adjusted scores. Score adjustment is done according to the Google NMT paper, which penalizes long utterances. :param n_best: number of finalized hypotheses to return :return: list of (tokens, score, token_metadata) 3-tuples, in sorted order, where: - tokens is a tensor of token ids - score is the adjusted log probability of the entire utterance - token_metadata dictionary: token_logprobs -> a tensor of conditional log probabilities of tokens token_ranks -> a tensor of ranks of tokens in vocabulator, by probability, when sampled " 2695,"def quantile(self, q=0.5, **kwargs): return self._downsample(""quantile"", q=q, **kwargs) "," Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Returns ------- DataFrame or Series Quantile of values within each group. See Also -------- Series.quantile Return a series, where the index is q and the values are the quantiles. DataFrame.quantile Return a DataFrame, where the columns are the columns of self, and the values are the quantiles. DataFrameGroupBy.quantile Return a DataFrame, where the coulmns are groupby columns, and the values are its quantiles. ",80,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def quantile(self, q=0.5, **kwargs): return self._downsample(""quantile"", q=q, **kwargs) ``` ###Assistant : Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Returns ------- DataFrame or Series Quantile of values within each group. See Also -------- Series.quantile Return a series, where the index is q and the values are the quantiles. DataFrame.quantile Return a DataFrame, where the columns are the columns of self, and the values are the quantiles. DataFrameGroupBy.quantile Return a DataFrame, where the coulmns are groupby columns, and the values are its quantiles. " 2696,"def current_columns(self): return copy.deepcopy(self.custcols) #deepcopy to prevent users from changing it "," Return the currently defined custom columns Return the currently defined custom columns including the ones that haven't yet been created. It is a dict of dicts defined as follows: custcols[lookup_name] = { 'label': lookup_name, 'name': column_heading, 'datatype': datatype, 'display': display, 'normalized': None, 'colnum': an integer used internally, 'is_multiple': is_multiple, } Columns that already exist will have additional attributes that this class doesn't use. See calibre.library.field_metadata.add_custom_field() for the complete list. ",69,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def current_columns(self): return copy.deepcopy(self.custcols) #deepcopy to prevent users from changing it ``` ###Assistant : Return the currently defined custom columns Return the currently defined custom columns including the ones that haven't yet been created. It is a dict of dicts defined as follows: custcols[lookup_name] = { 'label': lookup_name, 'name': column_heading, 'datatype': datatype, 'display': display, 'normalized': None, 'colnum': an integer used internally, 'is_multiple': is_multiple, } Columns that already exist will have additional attributes that this class doesn't use. See calibre.library.field_metadata.add_custom_field() for the complete list. " 2697,"def transform_vector(self, vector): return Vector( (vector.x + self.offset[0]) * self.scale[0], (vector.y + self.offset[1]) * self.scale[1], ) "," Transforms the given vector into the coordinate space of the final image. Use this to find out where a point on the source image would end up in the final image after cropping/resizing has been performed. Returns a new vector. ",40,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def transform_vector(self, vector): return Vector( (vector.x + self.offset[0]) * self.scale[0], (vector.y + self.offset[1]) * self.scale[1], ) ``` ###Assistant : Transforms the given vector into the coordinate space of the final image. Use this to find out where a point on the source image would end up in the final image after cropping/resizing has been performed. Returns a new vector. " 2698,"def _load_from_file(module_path): from imp import PY_SOURCE, load_module imported = None if module_path: with open(module_path, 'r') as openfile: imported = load_module(""mod"", openfile, module_path, ('imported', 'r', PY_SOURCE)) return imported "," Load a python module from its absolute filesystem path ",9,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _load_from_file(module_path): from imp import PY_SOURCE, load_module imported = None if module_path: with open(module_path, 'r') as openfile: imported = load_module(""mod"", openfile, module_path, ('imported', 'r', PY_SOURCE)) return imported ``` ###Assistant : Load a python module from its absolute filesystem path " 2699,"def _alter_column_type_sql(self, table, old_field, new_field, new_type): if not hasattr(old_field, ""dim"") or not hasattr(new_field, ""dim""): return super()._alter_column_type_sql(table, old_field, new_field, new_type) if old_field.dim == 2 and new_field.dim == 3: sql_alter = self.sql_alter_column_to_3d elif old_field.dim == 3 and new_field.dim == 2: sql_alter = self.sql_alter_column_to_2d else: sql_alter = self.sql_alter_column_type return ( ( sql_alter % { ""column"": self.quote_name(new_field.column), ""type"": new_type, }, [], ), [], ) "," Special case when dimension changed. ",5,60,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _alter_column_type_sql(self, table, old_field, new_field, new_type): if not hasattr(old_field, ""dim"") or not hasattr(new_field, ""dim""): return super()._alter_column_type_sql(table, old_field, new_field, new_type) if old_field.dim == 2 and new_field.dim == 3: sql_alter = self.sql_alter_column_to_3d elif old_field.dim == 3 and new_field.dim == 2: sql_alter = self.sql_alter_column_to_2d else: sql_alter = self.sql_alter_column_type return ( ( sql_alter % { ""column"": self.quote_name(new_field.column), ""type"": new_type, }, [], ), [], ) ``` ###Assistant : Special case when dimension changed. " 2700,"def format_command(self) -> str: command = 'ansible-test %s' % self.command if self.test: command += ' --test %s' % self.test if self.python_version: command += ' --python %s' % self.python_version return command ",Return a string representing the CLI command associated with the test failure.,12,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def format_command(self) -> str: command = 'ansible-test %s' % self.command if self.test: command += ' --test %s' % self.test if self.python_version: command += ' --python %s' % self.python_version return command ``` ###Assistant : Return a string representing the CLI command associated with the test failure. " 2701,"def act(self): obs = self.observation reply = {'text': INVALID, 'id': self.getID(), 'episode_done': False} if obs is None or obs['text'] == DO_NOT_RETRIEVE: return Message(reply) # construct the search query labels = obs.get('labels', obs.get('eval_labels', None)) search_query = self.construct_search_query(labels) if ( self.opt['min_num_search_words'] > 0 and len(search_query[0].split()) <= self.opt['min_num_search_words'] ): return Message(reply) # retrieve self.search_engine.set_search_queries(search_query) retrieved, _ = self.search_engine.retrieve_and_score(self.dummy) all_docs = [d.get_tokenization_str() for d in retrieved[0]] # batched # Find the right doc best_f1, best_doc, best_doc_idx = self.get_best_doc(all_docs, labels) if best_doc: assert best_doc_idx is not None reply['knowledge'] = f'{TOKEN_KNOWLEDGE}{best_doc}{TOKEN_END_KNOWLEDGE}' reply['f1_overlap'] = best_f1 reply['text'] = labels[0] reply['retrieved_docs'] = all_docs reply['gold_doc'] = all_docs[best_doc_idx] reply['search_query'] = search_query[0] return Message(reply) "," Search for overlap with the observation label. Return the best fitting document. A document is valid if the f1 is above the threshold AND the f1 is less than 1.0 AND the target label is not in the document. ",39,102,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def act(self): obs = self.observation reply = {'text': INVALID, 'id': self.getID(), 'episode_done': False} if obs is None or obs['text'] == DO_NOT_RETRIEVE: return Message(reply) # construct the search query labels = obs.get('labels', obs.get('eval_labels', None)) search_query = self.construct_search_query(labels) if ( self.opt['min_num_search_words'] > 0 and len(search_query[0].split()) <= self.opt['min_num_search_words'] ): return Message(reply) # retrieve self.search_engine.set_search_queries(search_query) retrieved, _ = self.search_engine.retrieve_and_score(self.dummy) all_docs = [d.get_tokenization_str() for d in retrieved[0]] # batched # Find the right doc best_f1, best_doc, best_doc_idx = self.get_best_doc(all_docs, labels) if best_doc: assert best_doc_idx is not None reply['knowledge'] = f'{TOKEN_KNOWLEDGE}{best_doc}{TOKEN_END_KNOWLEDGE}' reply['f1_overlap'] = best_f1 reply['text'] = labels[0] reply['retrieved_docs'] = all_docs reply['gold_doc'] = all_docs[best_doc_idx] reply['search_query'] = search_query[0] return Message(reply) ``` ###Assistant : Search for overlap with the observation label. Return the best fitting document. A document is valid if the f1 is above the threshold AND the f1 is less than 1.0 AND the target label is not in the document. " 2702,"def print_help(self): help_text = f console.print(text=help_text, menu=""Forex - Quantitative Analysis"") ","Print help[cmds] pick pick target column for analysis[/cmds] [param]Pair: [/param]{self.ticker} [param]Target Column: [/param]{self.target} [cmds] [info]Statistics:[/info] summary brief summary statistics of loaded pair. normality normality statistics and tests unitroot unit root test for stationarity (ADF, KPSS) [info]Plots:[/info] line line plot of selected target hist histogram with density plot cdf cumulative distribution function bw box and whisker plot acf (partial) auto-correlation function differentials of prices qqplot residuals against standard normal curve [info]Rolling Metrics:[/info] rolling rolling mean and std deviation of prices spread rolling variance and std deviation of prices quantile rolling median and quantile of prices skew rolling skewness of distribution of prices kurtosis rolling kurtosis of distribution of prices [info]Risk:[/info] var display value at risk es display expected shortfall [info]Other:[/info] raw print raw data decompose decomposition in cyclic-trend, season, and residuals of prices cusum detects abrupt changes using cumulative sum algorithm of prices[/cmds] ",142,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def print_help(self): help_text = f console.print(text=help_text, menu=""Forex - Quantitative Analysis"") ``` ###Assistant : Print help[cmds] pick pick target column for analysis[/cmds] [param]Pair: [/param]{self.ticker} [param]Target Column: [/param]{self.target} [cmds] [info]Statistics:[/info] summary brief summary statistics of loaded pair. normality normality statistics and tests unitroot unit root test for stationarity (ADF, KPSS) [info]Plots:[/info] line line plot of selected target hist histogram with density plot cdf cumulative distribution function bw box and whisker plot acf (partial) auto-correlation function differentials of prices qqplot residuals against standard normal curve [info]Rolling Metrics:[/info] rolling rolling mean and std deviation of prices spread rolling variance and std deviation of prices quantile rolling median and quantile of prices skew rolling skewness of distribution of prices kurtosis rolling kurtosis of distribution of prices [info]Risk:[/info] var display value at risk es display expected shortfall [info]Other:[/info] raw print raw data decompose decomposition in cyclic-trend, season, and residuals of prices cusum detects abrupt changes using cumulative sum algorithm of prices[/cmds] " 2703,"def chain(self, klass=None): obj = self.clone() if klass and obj.__class__ != klass: obj.__class__ = klass if not obj.filter_is_sticky: obj.used_aliases = set() obj.filter_is_sticky = False if hasattr(obj, ""_setup_query""): obj._setup_query() return obj "," Return a copy of the current Query that's ready for another operation. The klass argument changes the type of the Query, e.g. UpdateQuery. ",23,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def chain(self, klass=None): obj = self.clone() if klass and obj.__class__ != klass: obj.__class__ = klass if not obj.filter_is_sticky: obj.used_aliases = set() obj.filter_is_sticky = False if hasattr(obj, ""_setup_query""): obj._setup_query() return obj ``` ###Assistant : Return a copy of the current Query that's ready for another operation. The klass argument changes the type of the Query, e.g. UpdateQuery. " 2704,"def run_test_gbm_non_number_inputs(tmpdir, backend_config): input_features = [binary_feature(), category_feature(encoder={""reduce_output"": ""sum""})] output_feature = binary_feature() output_features = [output_feature] csv_filename = os.path.join(tmpdir, ""training.csv"") dataset_filename = generate_data(input_features, output_features, csv_filename, num_examples=100) config = { MODEL_TYPE: ""gbm"", ""input_features"": input_features, ""output_features"": output_features, TRAINER: {""num_boost_round"": 2}, } model = LudwigModel(config, backend=backend_config) _, _, output_directory = model.train( dataset=dataset_filename, output_directory=tmpdir, skip_save_processed_input=True, skip_save_progress=True, skip_save_unprocessed_output=True, skip_save_log=True, ) model.load(os.path.join(tmpdir, ""api_experiment_run"", ""model"")) preds, _ = model.predict(dataset=dataset_filename, output_directory=output_directory) prob_col = preds[output_feature[""name""] + ""_probabilities""] if backend_config[""type""] == ""ray"": prob_col = prob_col.compute() assert len(prob_col.iloc[0]) == 2 assert prob_col.apply(sum).mean() == pytest.approx(1.0) ",Test that the GBM model can train and predict with non-number inputs.,12,81,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def run_test_gbm_non_number_inputs(tmpdir, backend_config): input_features = [binary_feature(), category_feature(encoder={""reduce_output"": ""sum""})] output_feature = binary_feature() output_features = [output_feature] csv_filename = os.path.join(tmpdir, ""training.csv"") dataset_filename = generate_data(input_features, output_features, csv_filename, num_examples=100) config = { MODEL_TYPE: ""gbm"", ""input_features"": input_features, ""output_features"": output_features, TRAINER: {""num_boost_round"": 2}, } model = LudwigModel(config, backend=backend_config) _, _, output_directory = model.train( dataset=dataset_filename, output_directory=tmpdir, skip_save_processed_input=True, skip_save_progress=True, skip_save_unprocessed_output=True, skip_save_log=True, ) model.load(os.path.join(tmpdir, ""api_experiment_run"", ""model"")) preds, _ = model.predict(dataset=dataset_filename, output_directory=output_directory) prob_col = preds[output_feature[""name""] + ""_probabilities""] if backend_config[""type""] == ""ray"": prob_col = prob_col.compute() assert len(prob_col.iloc[0]) == 2 assert prob_col.apply(sum).mean() == pytest.approx(1.0) ``` ###Assistant : Test that the GBM model can train and predict with non-number inputs. " 2705,"def i2len(self, pkt, val): fld_len = self.fld.i2len(pkt, val) return fld_len + self.padlen(fld_len, pkt) ","get the length of the field, including the padding length",10,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def i2len(self, pkt, val): fld_len = self.fld.i2len(pkt, val) return fld_len + self.padlen(fld_len, pkt) ``` ###Assistant : get the length of the field, including the padding length " 2706,"def get_kerning(self, next): advance = self._metrics.advance - self.width kern = 0. if isinstance(next, Char): kern = self.fontset.get_kern( self.font, self.font_class, self.c, self.fontsize, next.font, next.font_class, next.c, next.fontsize, self.dpi) return advance + kern "," Return the amount of kerning between this and the given character. This method is called when characters are strung together into `Hlist` to create `Kern` nodes. ",26,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_kerning(self, next): advance = self._metrics.advance - self.width kern = 0. if isinstance(next, Char): kern = self.fontset.get_kern( self.font, self.font_class, self.c, self.fontsize, next.font, next.font_class, next.c, next.fontsize, self.dpi) return advance + kern ``` ###Assistant : Return the amount of kerning between this and the given character. This method is called when characters are strung together into `Hlist` to create `Kern` nodes. " 2707,"def _get_num_samples_or_steps(data, steps_per_epoch): flat_inputs = tf.nest.flatten(data) if hasattr(flat_inputs[0], ""shape""): return int(flat_inputs[0].shape[0]), False return steps_per_epoch, True ","Returns number of samples or steps, and whether to use steps count mode.",13,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_num_samples_or_steps(data, steps_per_epoch): flat_inputs = tf.nest.flatten(data) if hasattr(flat_inputs[0], ""shape""): return int(flat_inputs[0].shape[0]), False return steps_per_epoch, True ``` ###Assistant : Returns number of samples or steps, and whether to use steps count mode. " 2708,"def test_retrieve_product_attributes_input_type(staff_api_client, product, channel_USD): query = variables = {""channel"": channel_USD.slug} found_products = get_graphql_content( staff_api_client.post_graphql(query, variables) )[""data""][""products""][""edges""] assert len(found_products) == 1 for gql_attr in found_products[0][""node""][""attributes""]: assert len(gql_attr[""values""]) == 1 assert gql_attr[""values""][0][""inputType""] == ""DROPDOWN"" ATTRIBUTES_RESORT_QUERY = "," query ($channel: String){ products(first: 10, channel: $channel) { edges { node { attributes { values { inputType } } } } } } mutation ProductTypeReorderAttributes( $productTypeId: ID! $moves: [ReorderInput!]! $type: ProductAttributeType! ) { productTypeReorderAttributes( productTypeId: $productTypeId moves: $moves type: $type ) { productType { id variantAttributes { id slug } productAttributes { id } } errors { field message code attributes } } } ",64,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_retrieve_product_attributes_input_type(staff_api_client, product, channel_USD): query = variables = {""channel"": channel_USD.slug} found_products = get_graphql_content( staff_api_client.post_graphql(query, variables) )[""data""][""products""][""edges""] assert len(found_products) == 1 for gql_attr in found_products[0][""node""][""attributes""]: assert len(gql_attr[""values""]) == 1 assert gql_attr[""values""][0][""inputType""] == ""DROPDOWN"" ATTRIBUTES_RESORT_QUERY = ``` ###Assistant : query ($channel: String){ products(first: 10, channel: $channel) { edges { node { attributes { values { inputType } } } } } } mutation ProductTypeReorderAttributes( $productTypeId: ID! $moves: [ReorderInput!]! $type: ProductAttributeType! ) { productTypeReorderAttributes( productTypeId: $productTypeId moves: $moves type: $type ) { productType { id variantAttributes { id slug } productAttributes { id } } errors { field message code attributes } } } " 2709,"def apply(self, project_state, schema_editor, collect_sql=False): for operation in self.operations: # If this operation cannot be represented as SQL, place a comment # there instead if collect_sql: schema_editor.collected_sql.append(""--"") if not operation.reduces_to_sql: schema_editor.collected_sql.append( ""-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS SQL:"" ) schema_editor.collected_sql.append(""-- %s"" % operation.describe()) schema_editor.collected_sql.append(""--"") if not operation.reduces_to_sql: continue # Save the state before the operation has run old_state = project_state.clone() operation.state_forwards(self.app_label, project_state) # Run the operation atomic_operation = operation.atomic or ( self.atomic and operation.atomic is not False ) if not schema_editor.atomic_migration and atomic_operation: # Force a transaction on a non-transactional-DDL backend or an # atomic operation inside a non-atomic migration. with atomic(schema_editor.connection.alias): operation.database_forwards( self.app_label, schema_editor, old_state, project_state ) else: # Normal behaviour operation.database_forwards( self.app_label, schema_editor, old_state, project_state ) return project_state "," Take a project_state representing all migrations prior to this one and a schema_editor for a live database and apply the migration in a forwards order. Return the resulting project state for efficient reuse by following Migrations. ",36,124,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def apply(self, project_state, schema_editor, collect_sql=False): for operation in self.operations: # If this operation cannot be represented as SQL, place a comment # there instead if collect_sql: schema_editor.collected_sql.append(""--"") if not operation.reduces_to_sql: schema_editor.collected_sql.append( ""-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS SQL:"" ) schema_editor.collected_sql.append(""-- %s"" % operation.describe()) schema_editor.collected_sql.append(""--"") if not operation.reduces_to_sql: continue # Save the state before the operation has run old_state = project_state.clone() operation.state_forwards(self.app_label, project_state) # Run the operation atomic_operation = operation.atomic or ( self.atomic and operation.atomic is not False ) if not schema_editor.atomic_migration and atomic_operation: # Force a transaction on a non-transactional-DDL backend or an # atomic operation inside a non-atomic migration. with atomic(schema_editor.connection.alias): operation.database_forwards( self.app_label, schema_editor, old_state, project_state ) else: # Normal behaviour operation.database_forwards( self.app_label, schema_editor, old_state, project_state ) return project_state ``` ###Assistant : Take a project_state representing all migrations prior to this one and a schema_editor for a live database and apply the migration in a forwards order. Return the resulting project state for efficient reuse by following Migrations. " 2710,"def safe_to_scale(self) -> bool: # Get the list of nodes. node_set = set(self.node_data_dict.keys()) worker_groups = self._raycluster[""spec""].get(""workerGroupSpecs"", []) # Accumulates the indices of worker groups with non-empty workersToDelete non_empty_worker_group_indices = [] for group_index, worker_group in enumerate(worker_groups): workersToDelete = worker_group.get(""scaleStrategy"", {}).get( ""workersToDelete"", [] ) if workersToDelete: non_empty_worker_group_indices.append(group_index) for worker in workersToDelete: if worker in node_set: # The operator hasn't removed this worker yet. Abort # the autoscaler update. logger.warning(f""Waiting for operator to remove worker {worker}."") return False # All required workersToDelete have been removed. # Clean up the workersToDelete field. patch_payload = [] for group_index in non_empty_worker_group_indices: patch = worker_delete_patch(group_index, workers_to_delete=[]) patch_payload.append(patch) if patch_payload: logger.info(""Cleaning up workers to delete."") logger.info(f""Submitting patch {patch_payload}."") self._submit_raycluster_patch(patch_payload) # It's safe to proceed with the autoscaler update. return True ","Returns False iff non_terminated_nodes contains any pods in the RayCluster's workersToDelete lists. Explanation: If there are any workersToDelete which are non-terminated, we should wait for the operator to do its job and delete those pods. Therefore, we back off the autoscaler update. If, on the other hand, all of the workersToDelete have already been cleaned up, then we patch away the workersToDelete lists and return True. In the future, we may consider having the operator clean up workersToDelete on it own: https://github.com/ray-project/kuberay/issues/733 Note (Dmitri): It is stylistically bad that this function has a side effect. ",95,122,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def safe_to_scale(self) -> bool: # Get the list of nodes. node_set = set(self.node_data_dict.keys()) worker_groups = self._raycluster[""spec""].get(""workerGroupSpecs"", []) # Accumulates the indices of worker groups with non-empty workersToDelete non_empty_worker_group_indices = [] for group_index, worker_group in enumerate(worker_groups): workersToDelete = worker_group.get(""scaleStrategy"", {}).get( ""workersToDelete"", [] ) if workersToDelete: non_empty_worker_group_indices.append(group_index) for worker in workersToDelete: if worker in node_set: # The operator hasn't removed this worker yet. Abort # the autoscaler update. logger.warning(f""Waiting for operator to remove worker {worker}."") return False # All required workersToDelete have been removed. # Clean up the workersToDelete field. patch_payload = [] for group_index in non_empty_worker_group_indices: patch = worker_delete_patch(group_index, workers_to_delete=[]) patch_payload.append(patch) if patch_payload: logger.info(""Cleaning up workers to delete."") logger.info(f""Submitting patch {patch_payload}."") self._submit_raycluster_patch(patch_payload) # It's safe to proceed with the autoscaler update. return True ``` ###Assistant : Returns False iff non_terminated_nodes contains any pods in the RayCluster's workersToDelete lists. Explanation: If there are any workersToDelete which are non-terminated, we should wait for the operator to do its job and delete those pods. Therefore, we back off the autoscaler update. If, on the other hand, all of the workersToDelete have already been cleaned up, then we patch away the workersToDelete lists and return True. In the future, we may consider having the operator clean up workersToDelete on it own: https://github.com/ray-project/kuberay/issues/733 Note (Dmitri): It is stylistically bad that this function has a side effect. " 2711,"def get_distance(self, f, value, lookup_type): if not value: return [] value = value[0] if isinstance(value, Distance): if f.geodetic(self.connection): dist_param = value.m else: dist_param = getattr( value, Distance.unit_attname(f.units_name(self.connection)) ) else: dist_param = value # dwithin lookups on Oracle require a special string parameter # that starts with ""distance="". if lookup_type == ""dwithin"": dist_param = ""distance=%s"" % dist_param return [dist_param] "," Return the distance parameters given the value and the lookup type. On Oracle, geometry columns with a geodetic coordinate system behave implicitly like a geography column, and thus meters will be used as the distance parameter on them. ",38,58,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_distance(self, f, value, lookup_type): if not value: return [] value = value[0] if isinstance(value, Distance): if f.geodetic(self.connection): dist_param = value.m else: dist_param = getattr( value, Distance.unit_attname(f.units_name(self.connection)) ) else: dist_param = value # dwithin lookups on Oracle require a special string parameter # that starts with ""distance="". if lookup_type == ""dwithin"": dist_param = ""distance=%s"" % dist_param return [dist_param] ``` ###Assistant : Return the distance parameters given the value and the lookup type. On Oracle, geometry columns with a geodetic coordinate system behave implicitly like a geography column, and thus meters will be used as the distance parameter on them. " 2712,"def test_standard_get_document_model(self): del settings.WAGTAILDOCS_DOCUMENT_MODEL from wagtail.documents.models import Document self.assertIs(get_document_model(), Document) ",Test get_document_model with no WAGTAILDOCS_DOCUMENT_MODEL,5,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_standard_get_document_model(self): del settings.WAGTAILDOCS_DOCUMENT_MODEL from wagtail.documents.models import Document self.assertIs(get_document_model(), Document) ``` ###Assistant : Test get_document_model with no WAGTAILDOCS_DOCUMENT_MODEL " 2713,"def _safe_assign(X, values, *, row_indexer=None, column_indexer=None): row_indexer = slice(None, None, None) if row_indexer is None else row_indexer column_indexer = ( slice(None, None, None) if column_indexer is None else column_indexer ) if hasattr(X, ""iloc""): # pandas dataframe X.iloc[row_indexer, column_indexer] = values else: # numpy array or sparse matrix X[row_indexer, column_indexer] = values ","Safe assignment to a numpy array, sparse matrix, or pandas dataframe. Parameters ---------- X : {ndarray, sparse-matrix, dataframe} Array to be modified. It is expected to be 2-dimensional. values : ndarray The values to be assigned to `X`. row_indexer : array-like, dtype={int, bool}, default=None A 1-dimensional array to select the rows of interest. If `None`, all rows are selected. column_indexer : array-like, dtype={int, bool}, default=None A 1-dimensional array to select the columns of interest. If `None`, all columns are selected. ",80,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _safe_assign(X, values, *, row_indexer=None, column_indexer=None): row_indexer = slice(None, None, None) if row_indexer is None else row_indexer column_indexer = ( slice(None, None, None) if column_indexer is None else column_indexer ) if hasattr(X, ""iloc""): # pandas dataframe X.iloc[row_indexer, column_indexer] = values else: # numpy array or sparse matrix X[row_indexer, column_indexer] = values ``` ###Assistant : Safe assignment to a numpy array, sparse matrix, or pandas dataframe. Parameters ---------- X : {ndarray, sparse-matrix, dataframe} Array to be modified. It is expected to be 2-dimensional. values : ndarray The values to be assigned to `X`. row_indexer : array-like, dtype={int, bool}, default=None A 1-dimensional array to select the rows of interest. If `None`, all rows are selected. column_indexer : array-like, dtype={int, bool}, default=None A 1-dimensional array to select the columns of interest. If `None`, all columns are selected. " 2714,"def copy_sign(self, a, b): a = _convert_other(a, raiseit=True) return a.copy_sign(b) ","Copies the second operand's sign to the first one. In detail, it returns a copy of the first operand with the sign equal to the sign of the second operand. >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33')) Decimal('1.50') >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33')) Decimal('1.50') >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33')) Decimal('-1.50') >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33')) Decimal('-1.50') >>> ExtendedContext.copy_sign(1, -2) Decimal('-1') >>> ExtendedContext.copy_sign(Decimal(1), -2) Decimal('-1') >>> ExtendedContext.copy_sign(1, Decimal(-2)) Decimal('-1') ",60,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def copy_sign(self, a, b): a = _convert_other(a, raiseit=True) return a.copy_sign(b) ``` ###Assistant : Copies the second operand's sign to the first one. In detail, it returns a copy of the first operand with the sign equal to the sign of the second operand. >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33')) Decimal('1.50') >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33')) Decimal('1.50') >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33')) Decimal('-1.50') >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33')) Decimal('-1.50') >>> ExtendedContext.copy_sign(1, -2) Decimal('-1') >>> ExtendedContext.copy_sign(Decimal(1), -2) Decimal('-1') >>> ExtendedContext.copy_sign(1, Decimal(-2)) Decimal('-1') " 2715,"def get_validated_ordering(self): orderable_fields = self.orderable_fields or () ordering = {} if self.is_export: # Revert to CSV order_by submit_time ascending for backwards compatibility default_ordering = self.ordering_csv or () else: default_ordering = self.ordering or () if isinstance(default_ordering, str): default_ordering = (default_ordering,) ordering_strs = self.request.GET.getlist('order_by') or list(default_ordering) for order in ordering_strs: try: _, prefix, field_name = order.rpartition('-') if field_name in orderable_fields: ordering[field_name] = ( prefix, 'descending' if prefix == '-' else 'ascending' ) except (IndexError, ValueError): continue # invalid ordering specified, skip it return ordering ", Return a dict of field names with ordering labels if ordering is valid ,13,82,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_validated_ordering(self): orderable_fields = self.orderable_fields or () ordering = {} if self.is_export: # Revert to CSV order_by submit_time ascending for backwards compatibility default_ordering = self.ordering_csv or () else: default_ordering = self.ordering or () if isinstance(default_ordering, str): default_ordering = (default_ordering,) ordering_strs = self.request.GET.getlist('order_by') or list(default_ordering) for order in ordering_strs: try: _, prefix, field_name = order.rpartition('-') if field_name in orderable_fields: ordering[field_name] = ( prefix, 'descending' if prefix == '-' else 'ascending' ) except (IndexError, ValueError): continue # invalid ordering specified, skip it return ordering ``` ###Assistant : Return a dict of field names with ordering labels if ordering is valid " 2716,"def test_chordal_cycle_graph(p): G = nx.chordal_cycle_graph(p) assert len(G) == p # TODO The second largest eigenvalue should be smaller than a constant, # independent of the number of nodes in the graph: # # eigs = sorted(sp.linalg.eigvalsh(nx.adjacency_matrix(G).toarray())) # assert_less(eigs[-2], ...) # @pytest.mark.parametrize(""p"", (3, 5, 7, 11, 13)) # Primes",Test for the :func:`networkx.chordal_cycle_graph` function.,5,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_chordal_cycle_graph(p): G = nx.chordal_cycle_graph(p) assert len(G) == p # TODO The second largest eigenvalue should be smaller than a constant, # independent of the number of nodes in the graph: # # eigs = sorted(sp.linalg.eigvalsh(nx.adjacency_matrix(G).toarray())) # assert_less(eigs[-2], ...) # @pytest.mark.parametrize(""p"", (3, 5, 7, 11, 13)) # Primes ``` ###Assistant : Test for the :func:`networkx.chordal_cycle_graph` function. " 2717,"def convert_dataset_split_sizes(left_size,right_size,total_size): left_size_type = type(left_size) right_size_type = type(right_size) if left_size is not None and left_size_type not in [int,float]: raise ValueError(f'Invalid `left_size` type Got {left_size_type}' 'It should be one of float,int or None') if right_size is not None and right_size_type not in [int,float]: raise ValueError(f'Invalid `right_size` type Got {right_size_type}' 'It should be one of float,int or None') if (left_size_type == int and (left_size <= 0 or left_size>= total_size) or left_size_type == float and (left_size <= 0 or left_size>= 1) ): raise ValueError('`left_size` should be either a positive integer' f'and smaller than {total_size} or a float ' 'within the range `[0, 1]`') if (right_size_type == int and (right_size <= 0 or right_size>= total_size) or right_size_type == float and (right_size <= 0 or right_size>= 1)): raise ValueError('`right_size` should be either a positive integer ' f'and smaller than {total_size} or' 'a float within the range `[0, 1]`') if right_size_type == left_size_type == float and right_size + left_size > 1: raise ValueError('sum of `left_size` and `right_size`' ' should be within `[0,1]`' f'Got {right_size + left_size} ,' 'reduce the `left_size` or `right_size`') if left_size_type == float: left_size = math.ceil(left_size*total_size) else: left_size = float(left_size) if right_size_type == float: right_size = math.ceil(right_size*total_size) else: right_size = float(right_size) if left_size is None: left_size = total_size - right_size elif right_size is None: right_size = total_size - left_size if left_size + right_size > total_size: raise ValueError('The sum of `left_size` and `right_size`' f' should be smaller than the samples {total_size} ' ' reduce `left_size` or `right_size` ' ) if left_size == 0: raise ValueError(f'with dataset of length={total_size}' '`left_size`={left_size} and `right_size`={right_size} ' 'resulting left dataset split will be empty, ' 'adjust any of the aforementioned parameters') left_size,right_size = int(left_size) ,int(right_size) return left_size,right_size ","Helper function to convert left_size/right_size relative to dataset's size ",9,278,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def convert_dataset_split_sizes(left_size,right_size,total_size): left_size_type = type(left_size) right_size_type = type(right_size) if left_size is not None and left_size_type not in [int,float]: raise ValueError(f'Invalid `left_size` type Got {left_size_type}' 'It should be one of float,int or None') if right_size is not None and right_size_type not in [int,float]: raise ValueError(f'Invalid `right_size` type Got {right_size_type}' 'It should be one of float,int or None') if (left_size_type == int and (left_size <= 0 or left_size>= total_size) or left_size_type == float and (left_size <= 0 or left_size>= 1) ): raise ValueError('`left_size` should be either a positive integer' f'and smaller than {total_size} or a float ' 'within the range `[0, 1]`') if (right_size_type == int and (right_size <= 0 or right_size>= total_size) or right_size_type == float and (right_size <= 0 or right_size>= 1)): raise ValueError('`right_size` should be either a positive integer ' f'and smaller than {total_size} or' 'a float within the range `[0, 1]`') if right_size_type == left_size_type == float and right_size + left_size > 1: raise ValueError('sum of `left_size` and `right_size`' ' should be within `[0,1]`' f'Got {right_size + left_size} ,' 'reduce the `left_size` or `right_size`') if left_size_type == float: left_size = math.ceil(left_size*total_size) else: left_size = float(left_size) if right_size_type == float: right_size = math.ceil(right_size*total_size) else: right_size = float(right_size) if left_size is None: left_size = total_size - right_size elif right_size is None: right_size = total_size - left_size if left_size + right_size > total_size: raise ValueError('The sum of `left_size` and `right_size`' f' should be smaller than the samples {total_size} ' ' reduce `left_size` or `right_size` ' ) if left_size == 0: raise ValueError(f'with dataset of length={total_size}' '`left_size`={left_size} and `right_size`={right_size} ' 'resulting left dataset split will be empty, ' 'adjust any of the aforementioned parameters') left_size,right_size = int(left_size) ,int(right_size) return left_size,right_size ``` ###Assistant : Helper function to convert left_size/right_size relative to dataset's size " 2718,"def active_count(self): return self.order_by().exclude(inventory_sources__source='controller').values(name_lower=Lower('name')).distinct().count() ","Return count of active, unique hosts for licensing. Construction of query involves: - remove any ordering specified in model's Meta - Exclude hosts sourced from another Tower - Restrict the query to only return the name column - Only consider results that are unique - Return the count of this query ",51,4,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def active_count(self): return self.order_by().exclude(inventory_sources__source='controller').values(name_lower=Lower('name')).distinct().count() ``` ###Assistant : Return count of active, unique hosts for licensing. Construction of query involves: - remove any ordering specified in model's Meta - Exclude hosts sourced from another Tower - Restrict the query to only return the name column - Only consider results that are unique - Return the count of this query " 2719,"async def test_set_avatar_incorrect_mime_type(self) -> None: handler = self.hs.get_sso_handler() # any random user works since image check is supposed to fail user_id = ""@sso-user:test"" self.assertFalse( self.get_success(handler.set_avatar(user_id, ""http://my.server/me.png"")) ) ",Tests that saving an avatar fails when its mime type is not allowed,13,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_set_avatar_incorrect_mime_type(self) -> None: handler = self.hs.get_sso_handler() # any random user works since image check is supposed to fail user_id = ""@sso-user:test"" self.assertFalse( self.get_success(handler.set_avatar(user_id, ""http://my.server/me.png"")) ) ``` ###Assistant : Tests that saving an avatar fails when its mime type is not allowed " 2720,"def layer_uses_training_bool(layer): if layer._expects_training_arg: # pylint: disable=protected-access return True visited = {layer} to_visit = list_all_layers(layer) while to_visit: layer = to_visit.pop() if layer in visited: continue if getattr(layer, ""_expects_training_arg"", True): return True visited.add(layer) to_visit.extend(list_all_layers(layer)) return False ",Returns whether this layer or any of its children uses the training arg.,13,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def layer_uses_training_bool(layer): if layer._expects_training_arg: # pylint: disable=protected-access return True visited = {layer} to_visit = list_all_layers(layer) while to_visit: layer = to_visit.pop() if layer in visited: continue if getattr(layer, ""_expects_training_arg"", True): return True visited.add(layer) to_visit.extend(list_all_layers(layer)) return False ``` ###Assistant : Returns whether this layer or any of its children uses the training arg. " 2721,"async def test_edgeql_for_in_computable_09(self): # This is basically test_edgeql_for_in_computable_01 but with # a WITH binding in front of the whole shape await self.assert_query_result( r"," WITH U := ( SELECT User { select_deck := ( FOR letter IN {'I', 'B'} UNION ( SELECT User.deck { name, # just define an ad-hoc link prop",28,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_edgeql_for_in_computable_09(self): # This is basically test_edgeql_for_in_computable_01 but with # a WITH binding in front of the whole shape await self.assert_query_result( r ``` ###Assistant : WITH U := ( SELECT User { select_deck := ( FOR letter IN {'I', 'B'} UNION ( SELECT User.deck { name, # just define an ad-hoc link prop " 2722,"def get_actual_details(name, filters): budget_against = frappe.scrub(filters.get(""budget_against"")) cond = """" if filters.get(""budget_against"") == ""Cost Center"": cc_lft, cc_rgt = frappe.db.get_value(""Cost Center"", name, [""lft"", ""rgt""]) cond = .format( lft=cc_lft, rgt=cc_rgt ) ac_details = frappe.db.sql( .format( tab=filters.budget_against, budget_against=budget_against, cond=cond ), (filters.from_fiscal_year, filters.to_fiscal_year, name), as_dict=1, ) cc_actual_details = {} for d in ac_details: cc_actual_details.setdefault(d.account, []).append(d) return cc_actual_details "," and lft >= ""{lft}"" and rgt <= ""{rgt}"" select gl.account, gl.debit, gl.credit, gl.fiscal_year, MONTHNAME(gl.posting_date) as month_name, b.{budget_against} as budget_against from `tabGL Entry` gl, `tabBudget Account` ba, `tabBudget` b where b.name = ba.parent and b.docstatus = 1 and ba.account=gl.account and b.{budget_against} = gl.{budget_against} and gl.fiscal_year between %s and %s and b.{budget_against} = %s and exists( select name from `tab{tab}` where name = gl.{budget_against} {cond} ) group by gl.name order by gl.fiscal_year ",70,52,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_actual_details(name, filters): budget_against = frappe.scrub(filters.get(""budget_against"")) cond = """" if filters.get(""budget_against"") == ""Cost Center"": cc_lft, cc_rgt = frappe.db.get_value(""Cost Center"", name, [""lft"", ""rgt""]) cond = .format( lft=cc_lft, rgt=cc_rgt ) ac_details = frappe.db.sql( .format( tab=filters.budget_against, budget_against=budget_against, cond=cond ), (filters.from_fiscal_year, filters.to_fiscal_year, name), as_dict=1, ) cc_actual_details = {} for d in ac_details: cc_actual_details.setdefault(d.account, []).append(d) return cc_actual_details ``` ###Assistant : and lft >= ""{lft}"" and rgt <= ""{rgt}"" select gl.account, gl.debit, gl.credit, gl.fiscal_year, MONTHNAME(gl.posting_date) as month_name, b.{budget_against} as budget_against from `tabGL Entry` gl, `tabBudget Account` ba, `tabBudget` b where b.name = ba.parent and b.docstatus = 1 and ba.account=gl.account and b.{budget_against} = gl.{budget_against} and gl.fiscal_year between %s and %s and b.{budget_against} = %s and exists( select name from `tab{tab}` where name = gl.{budget_against} {cond} ) group by gl.name order by gl.fiscal_year " 2723,"def print_index(toc): dash = ""-""*(100 - 7) space = "" ""*47 print(f""{space}INDEX"") print(f""\n\nName : {dash} PageNo.\n\n\n"") for topic in toc: eq_dash = ""-""*(100 - len(topic[1])) print(f""{topic[1]} {eq_dash} {topic[2]}"") ","Prints out the index in proper format with title name and page number Args: toc (nested list): toc[1] - Topic name toc[2] - Page number ",25,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def print_index(toc): dash = ""-""*(100 - 7) space = "" ""*47 print(f""{space}INDEX"") print(f""\n\nName : {dash} PageNo.\n\n\n"") for topic in toc: eq_dash = ""-""*(100 - len(topic[1])) print(f""{topic[1]} {eq_dash} {topic[2]}"") ``` ###Assistant : Prints out the index in proper format with title name and page number Args: toc (nested list): toc[1] - Topic name toc[2] - Page number " 2724,"def pie(self, X, win=None, env=None, opts=None): X = np.squeeze(X) assert X.ndim == 1, ""X should be one-dimensional"" assert np.all(np.greater_equal(X, 0)), ""X cannot contain negative values"" opts = {} if opts is None else opts _title2str(opts) _assert_opts(opts) data = [ { ""values"": X.tolist(), ""labels"": opts.get(""legend""), ""type"": ""pie"", } ] return self._send( { ""data"": data, ""win"": win, ""eid"": env, ""layout"": _opts2layout(opts), ""opts"": opts, } ) "," This function draws a pie chart based on the `N` tensor `X`. The following `opts` are supported: - `opts.legend`: `list` containing legend names ",23,63,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def pie(self, X, win=None, env=None, opts=None): X = np.squeeze(X) assert X.ndim == 1, ""X should be one-dimensional"" assert np.all(np.greater_equal(X, 0)), ""X cannot contain negative values"" opts = {} if opts is None else opts _title2str(opts) _assert_opts(opts) data = [ { ""values"": X.tolist(), ""labels"": opts.get(""legend""), ""type"": ""pie"", } ] return self._send( { ""data"": data, ""win"": win, ""eid"": env, ""layout"": _opts2layout(opts), ""opts"": opts, } ) ``` ###Assistant : This function draws a pie chart based on the `N` tensor `X`. The following `opts` are supported: - `opts.legend`: `list` containing legend names " 2725,"def dry_run(self) -> None: pod = self.build_pod_request_obj() print(yaml.dump(prune_dict(pod.to_dict(), mode='strict'))) "," Prints out the pod definition that would be created by this operator. Does not include labels specific to the task instance (since there isn't one in a dry_run) and excludes all empty elements. ",33,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dry_run(self) -> None: pod = self.build_pod_request_obj() print(yaml.dump(prune_dict(pod.to_dict(), mode='strict'))) ``` ###Assistant : Prints out the pod definition that would be created by this operator. Does not include labels specific to the task instance (since there isn't one in a dry_run) and excludes all empty elements. " 2726,"def ExponentialPower(name, mu, alpha, beta): r return rv(name, ExponentialPowerDistribution, (mu, alpha, beta)) #------------------------------------------------------------------------------- # F distribution --------------------------------------------------------------- "," Create a Continuous Random Variable with Exponential Power distribution. This distribution is known also as Generalized Normal distribution version 1. Explanation =========== The density of the Exponential Power distribution is given by .. math:: f(x) := \frac{\beta}{2\alpha\Gamma(\frac{1}{\beta})} e^{{-(\frac{|x - \mu|}{\alpha})^{\beta}}} with :math:`x \in [ - \infty, \infty ]`. Parameters ========== mu : Real number A location. alpha : Real number,`\alpha > 0` A scale. beta : Real number, `\beta > 0` A shape. Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import ExponentialPower, density, cdf >>> from sympy import Symbol, pprint >>> z = Symbol(""z"") >>> mu = Symbol(""mu"") >>> alpha = Symbol(""alpha"", positive=True) >>> beta = Symbol(""beta"", positive=True) >>> X = ExponentialPower(""x"", mu, alpha, beta) >>> pprint(density(X)(z), use_unicode=False) beta /|mu - z|\ -|--------| \ alpha / beta*e --------------------- / 1 \ 2*alpha*Gamma|----| \beta/ >>> cdf(X)(z) 1/2 + lowergamma(1/beta, (Abs(mu - z)/alpha)**beta)*sign(-mu + z)/(2*gamma(1/beta)) References ========== .. [1] https://reference.wolfram.com/language/ref/ExponentialPowerDistribution.html .. [2] https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1 ",152,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ExponentialPower(name, mu, alpha, beta): r return rv(name, ExponentialPowerDistribution, (mu, alpha, beta)) #------------------------------------------------------------------------------- # F distribution --------------------------------------------------------------- ``` ###Assistant : Create a Continuous Random Variable with Exponential Power distribution. This distribution is known also as Generalized Normal distribution version 1. Explanation =========== The density of the Exponential Power distribution is given by .. math:: f(x) := \frac{\beta}{2\alpha\Gamma(\frac{1}{\beta})} e^{{-(\frac{|x - \mu|}{\alpha})^{\beta}}} with :math:`x \in [ - \infty, \infty ]`. Parameters ========== mu : Real number A location. alpha : Real number,`\alpha > 0` A scale. beta : Real number, `\beta > 0` A shape. Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import ExponentialPower, density, cdf >>> from sympy import Symbol, pprint >>> z = Symbol(""z"") >>> mu = Symbol(""mu"") >>> alpha = Symbol(""alpha"", positive=True) >>> beta = Symbol(""beta"", positive=True) >>> X = ExponentialPower(""x"", mu, alpha, beta) >>> pprint(density(X)(z), use_unicode=False) beta /|mu - z|\ -|--------| \ alpha / beta*e --------------------- / 1 \ 2*alpha*Gamma|----| \beta/ >>> cdf(X)(z) 1/2 + lowergamma(1/beta, (Abs(mu - z)/alpha)**beta)*sign(-mu + z)/(2*gamma(1/beta)) References ========== .. [1] https://reference.wolfram.com/language/ref/ExponentialPowerDistribution.html .. [2] https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1 " 2727,"def get_palette(num_cls): n = num_cls palette = [0] * (n * 3) for j in range(0, n): lab = j palette[j * 3 + 0] = 0 palette[j * 3 + 1] = 0 palette[j * 3 + 2] = 0 i = 0 while lab: palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i)) palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i)) palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i)) i += 1 lab >>= 3 return palette "," Returns the color map for visualizing the segmentation mask. Args: num_cls: Number of classes Returns: The color map ",18,99,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_palette(num_cls): n = num_cls palette = [0] * (n * 3) for j in range(0, n): lab = j palette[j * 3 + 0] = 0 palette[j * 3 + 1] = 0 palette[j * 3 + 2] = 0 i = 0 while lab: palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i)) palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i)) palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i)) i += 1 lab >>= 3 return palette ``` ###Assistant : Returns the color map for visualizing the segmentation mask. Args: num_cls: Number of classes Returns: The color map " 2728,"def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs): # Batch blobs and diffs. all_outs = {out: [] for out in set(self.outputs + (blobs or []))} all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))} forward_batches = self._batch({in_: kwargs[in_] for in_ in self.inputs if in_ in kwargs}) backward_batches = self._batch({out: kwargs[out] for out in self.outputs if out in kwargs}) # Collect outputs from batches (and heed lack of forward/backward batches). for fb, bb in izip_longest(forward_batches, backward_batches, fillvalue={}): batch_blobs = self.forward(blobs=blobs, **fb) batch_diffs = self.backward(diffs=diffs, **bb) for out, out_blobs in six.iteritems(batch_blobs): all_outs[out].extend(out_blobs.copy()) for diff, out_diffs in six.iteritems(batch_diffs): all_diffs[diff].extend(out_diffs.copy()) # Package in ndarray. for out, diff in zip(all_outs, all_diffs): all_outs[out] = np.asarray(all_outs[out]) all_diffs[diff] = np.asarray(all_diffs[diff]) # Discard padding at the end and package in ndarray. pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs))) if pad: for out, diff in zip(all_outs, all_diffs): all_outs[out] = all_outs[out][:-pad] all_diffs[diff] = all_diffs[diff][:-pad] return all_outs, all_diffs "," Run net forward + backward in batches. Parameters ---------- blobs: list of blobs to extract as in forward() diffs: list of diffs to extract as in backward() kwargs: Keys are input (for forward) and output (for backward) blob names and values are ndarrays. Refer to forward() and backward(). Prefilled variants are called for lack of input or output blobs. Returns ------- all_blobs: {blob name: blob ndarray} dict. all_diffs: {blob name: diff ndarray} dict. ",73,144,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs): # Batch blobs and diffs. all_outs = {out: [] for out in set(self.outputs + (blobs or []))} all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))} forward_batches = self._batch({in_: kwargs[in_] for in_ in self.inputs if in_ in kwargs}) backward_batches = self._batch({out: kwargs[out] for out in self.outputs if out in kwargs}) # Collect outputs from batches (and heed lack of forward/backward batches). for fb, bb in izip_longest(forward_batches, backward_batches, fillvalue={}): batch_blobs = self.forward(blobs=blobs, **fb) batch_diffs = self.backward(diffs=diffs, **bb) for out, out_blobs in six.iteritems(batch_blobs): all_outs[out].extend(out_blobs.copy()) for diff, out_diffs in six.iteritems(batch_diffs): all_diffs[diff].extend(out_diffs.copy()) # Package in ndarray. for out, diff in zip(all_outs, all_diffs): all_outs[out] = np.asarray(all_outs[out]) all_diffs[diff] = np.asarray(all_diffs[diff]) # Discard padding at the end and package in ndarray. pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs))) if pad: for out, diff in zip(all_outs, all_diffs): all_outs[out] = all_outs[out][:-pad] all_diffs[diff] = all_diffs[diff][:-pad] return all_outs, all_diffs ``` ###Assistant : Run net forward + backward in batches. Parameters ---------- blobs: list of blobs to extract as in forward() diffs: list of diffs to extract as in backward() kwargs: Keys are input (for forward) and output (for backward) blob names and values are ndarrays. Refer to forward() and backward(). Prefilled variants are called for lack of input or output blobs. Returns ------- all_blobs: {blob name: blob ndarray} dict. all_diffs: {blob name: diff ndarray} dict. " 2729,"def test_user_does_not_exist(self) -> None: url = ""/_synapse/admin/v2/users/@unknown_person:test/devices"" channel = self.make_request( ""GET"", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body[""errcode""]) "," Tests that a lookup for a user that does not exist returns a 404 ",14,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_user_does_not_exist(self) -> None: url = ""/_synapse/admin/v2/users/@unknown_person:test/devices"" channel = self.make_request( ""GET"", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body[""errcode""]) ``` ###Assistant : Tests that a lookup for a user that does not exist returns a 404 " 2730,"def send_ping(self) -> None: now = self.clock.time_msec() if self.time_we_closed: if now - self.time_we_closed > PING_TIMEOUT_MS: logger.info( ""[%s] Failed to close connection gracefully, aborting"", self.id() ) assert self.transport is not None self.transport.abortConnection() else: if now - self.last_sent_command >= PING_TIME: self.send_command(PingCommand(str(now))) if ( self.received_ping and now - self.last_received_command > PING_TIMEOUT_MS ): logger.info( ""[%s] Connection hasn't received command in %r ms. Closing."", self.id(), now - self.last_received_command, ) self.send_error(""ping timeout"") ","Periodically sends a ping and checks if we should close the connection due to the other side timing out. ",19,66,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def send_ping(self) -> None: now = self.clock.time_msec() if self.time_we_closed: if now - self.time_we_closed > PING_TIMEOUT_MS: logger.info( ""[%s] Failed to close connection gracefully, aborting"", self.id() ) assert self.transport is not None self.transport.abortConnection() else: if now - self.last_sent_command >= PING_TIME: self.send_command(PingCommand(str(now))) if ( self.received_ping and now - self.last_received_command > PING_TIMEOUT_MS ): logger.info( ""[%s] Connection hasn't received command in %r ms. Closing."", self.id(), now - self.last_received_command, ) self.send_error(""ping timeout"") ``` ###Assistant : Periodically sends a ping and checks if we should close the connection due to the other side timing out. " 2731,"def workflow_logging_context(job_id) -> None: node = ray.worker._global_node original_out_file, original_err_file = node.get_log_file_handles( get_worker_log_file_name(""WORKER"") ) out_file, err_file = node.get_log_file_handles( get_worker_log_file_name(""WORKER"", job_id) ) try: configure_log_file(out_file, err_file) yield finally: configure_log_file(original_out_file, original_err_file) ","Initialize the workflow logging context. Workflow executions are running as remote functions from WorkflowManagementActor. Without logging redirection, workflow inner execution logs will be pushed to the driver that initially created WorkflowManagementActor rather than the driver that actually submits the current workflow execution. We use this conext manager to re-configure the log files to send the logs to the correct driver, and to restore the log files once the execution is done. Args: job_id: The ID of the job that submits the workflow execution. ",83,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def workflow_logging_context(job_id) -> None: node = ray.worker._global_node original_out_file, original_err_file = node.get_log_file_handles( get_worker_log_file_name(""WORKER"") ) out_file, err_file = node.get_log_file_handles( get_worker_log_file_name(""WORKER"", job_id) ) try: configure_log_file(out_file, err_file) yield finally: configure_log_file(original_out_file, original_err_file) ``` ###Assistant : Initialize the workflow logging context. Workflow executions are running as remote functions from WorkflowManagementActor. Without logging redirection, workflow inner execution logs will be pushed to the driver that initially created WorkflowManagementActor rather than the driver that actually submits the current workflow execution. We use this conext manager to re-configure the log files to send the logs to the correct driver, and to restore the log files once the execution is done. Args: job_id: The ID of the job that submits the workflow execution. " 2732,"def console_entry_point(): if ""--profile"" in sys.argv: with cProfile.Profile() as profile: entry_point() stats = pstats.Stats(profile) stats.sort_stats(pstats.SortKey.TIME) # Use snakeviz to visualize the profile stats.dump_stats(""spotdl.profile"") else: entry_point() "," Wrapper around `entry_point` so we can profile the code ",9,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def console_entry_point(): if ""--profile"" in sys.argv: with cProfile.Profile() as profile: entry_point() stats = pstats.Stats(profile) stats.sort_stats(pstats.SortKey.TIME) # Use snakeviz to visualize the profile stats.dump_stats(""spotdl.profile"") else: entry_point() ``` ###Assistant : Wrapper around `entry_point` so we can profile the code " 2733,"def callbacks(self, callbacks_class) -> ""TrainerConfig"": self.callbacks_class = callbacks_class return self ","Sets the callbacks configuration. Args: callbacks_class: Callbacks class, whose methods will be run during various phases of training and environment sample collection. See the `DefaultCallbacks` class and `examples/custom_metrics_and_callbacks.py` for more usage information. Returns: This updated TrainerConfig object. ",37,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def callbacks(self, callbacks_class) -> ""TrainerConfig"": self.callbacks_class = callbacks_class return self ``` ###Assistant : Sets the callbacks configuration. Args: callbacks_class: Callbacks class, whose methods will be run during various phases of training and environment sample collection. See the `DefaultCallbacks` class and `examples/custom_metrics_and_callbacks.py` for more usage information. Returns: This updated TrainerConfig object. " 2734,"def test_get_dynamic_sampling_after_migrating_to_new_plan_default_biases(self): self.project.update_option(""sentry:dynamic_sampling"", self.dynamic_sampling_data) with Feature( { self.universal_ds_flag: True, self.old_ds_flag: True, self.new_ds_flag: True, } ): response = self.get_success_response( self.organization.slug, self.project.slug, method=""get"" ) assert response.data[""dynamicSampling""] is None assert response.data[""dynamicSamplingBiases""] == DEFAULT_BIASES "," Tests the case when an organization was in EA/LA and has setup previously Dynamic Sampling rules, and now they have migrated to an AM2 plan, but haven't manipulated the bias toggles yet so they get the default biases. This also ensures that they no longer receive the deprecated dynamic sampling rules. ",51,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_dynamic_sampling_after_migrating_to_new_plan_default_biases(self): self.project.update_option(""sentry:dynamic_sampling"", self.dynamic_sampling_data) with Feature( { self.universal_ds_flag: True, self.old_ds_flag: True, self.new_ds_flag: True, } ): response = self.get_success_response( self.organization.slug, self.project.slug, method=""get"" ) assert response.data[""dynamicSampling""] is None assert response.data[""dynamicSamplingBiases""] == DEFAULT_BIASES ``` ###Assistant : Tests the case when an organization was in EA/LA and has setup previously Dynamic Sampling rules, and now they have migrated to an AM2 plan, but haven't manipulated the bias toggles yet so they get the default biases. This also ensures that they no longer receive the deprecated dynamic sampling rules. " 2735,"def strtobool(val): # type: (str) -> int val = val.lower() if val in (""y"", ""yes"", ""t"", ""true"", ""on"", ""1""): return 1 elif val in (""n"", ""no"", ""f"", ""false"", ""off"", ""0""): return 0 else: raise ValueError(f""invalid truth value {val!r}"") ","Convert a string representation of truth to true (1) or false (0). True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if 'val' is anything else. ",39,38,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def strtobool(val): # type: (str) -> int val = val.lower() if val in (""y"", ""yes"", ""t"", ""true"", ""on"", ""1""): return 1 elif val in (""n"", ""no"", ""f"", ""false"", ""off"", ""0""): return 0 else: raise ValueError(f""invalid truth value {val!r}"") ``` ###Assistant : Convert a string representation of truth to true (1) or false (0). True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if 'val' is anything else. " 2736,"def convert_xunits(self, x): ax = getattr(self, 'axes', None) if ax is None or ax.xaxis is None: return x return ax.xaxis.convert_units(x) "," Convert *x* using the unit type of the xaxis. If the artist is not contained in an Axes or if the xaxis does not have units, *x* itself is returned. ",30,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def convert_xunits(self, x): ax = getattr(self, 'axes', None) if ax is None or ax.xaxis is None: return x return ax.xaxis.convert_units(x) ``` ###Assistant : Convert *x* using the unit type of the xaxis. If the artist is not contained in an Axes or if the xaxis does not have units, *x* itself is returned. " 2737,"def value_as_datetime(self) -> tp.Tuple[datetime, datetime] | None: if self.value is None: return None v1, v2 = self.value if isinstance(v1, numbers.Number): d1 = datetime.utcfromtimestamp(v1 / 1000) else: d1 = v1 if isinstance(v2, numbers.Number): d2 = datetime.utcfromtimestamp(v2 / 1000) else: d2 = v2 return d1, d2 value = NonNullable(Tuple(Datetime, Datetime), help=) value_throttled = Readonly(NonNullable(Tuple(Datetime, Datetime)), help=) start = NonNullable(Datetime, help=) end = NonNullable(Datetime, help=) step = Int(default=3_600_000, help=) format = Override(default=""%d %b %Y %H:%M:%S"") #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- "," Convenience property to retrieve the value tuple as a tuple of datetime objects. Initial or selected range. Initial or selected value, throttled to report only on mouseup. The minimum allowable value. The maximum allowable value. The step between consecutive values, in units of milliseconds. Default is one hour. ",48,81,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def value_as_datetime(self) -> tp.Tuple[datetime, datetime] | None: if self.value is None: return None v1, v2 = self.value if isinstance(v1, numbers.Number): d1 = datetime.utcfromtimestamp(v1 / 1000) else: d1 = v1 if isinstance(v2, numbers.Number): d2 = datetime.utcfromtimestamp(v2 / 1000) else: d2 = v2 return d1, d2 value = NonNullable(Tuple(Datetime, Datetime), help=) value_throttled = Readonly(NonNullable(Tuple(Datetime, Datetime)), help=) start = NonNullable(Datetime, help=) end = NonNullable(Datetime, help=) step = Int(default=3_600_000, help=) format = Override(default=""%d %b %Y %H:%M:%S"") #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- ``` ###Assistant : Convenience property to retrieve the value tuple as a tuple of datetime objects. Initial or selected range. Initial or selected value, throttled to report only on mouseup. The minimum allowable value. The maximum allowable value. The step between consecutive values, in units of milliseconds. Default is one hour. " 2738,"def test_first_event_with_minified_stack_trace_received(self, record_analytics): now = timezone.now() project = self.create_project(first_event=now) project_created.send(project=project, user=self.user, sender=type(project)) url = ""http://localhost:3000"" data = load_data(""javascript"") data[""tags""] = [(""url"", url)] data[""exception""] = { ""values"": [ { **data[""exception""][""values""][0], ""raw_stacktrace"": { ""frames"": [ { ""function"": ""o"", ""filename"": ""/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js"", ""abs_path"": ""https://s1.sentry-cdn.com/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js"", ""lineno"": 2, ""colno"": 37098, ""pre_context"": [ ""/*! For license information please see vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd. {snip}"" ], ""context_line"": ""{snip} .apply(this,arguments);const i=o.map((e=>c(e,t)));return e.apply(this,i)}catch(e){throw l(),(0,i.$e)((n=>{n.addEventProcessor((e=>(t.mechani {snip}"", ""post_context"": [ ""//# sourceMappingURL=../sourcemaps/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.fe32 {snip}"" ], ""in_app"": False, }, ], }, } ] } self.store_event( project_id=project.id, data=data, ) record_analytics.assert_called_with( ""first_event_with_minified_stack_trace_for_project.sent"", user_id=self.user.id, organization_id=project.organization_id, project_id=project.id, platform=data[""platform""], url=url, ) "," Test that an analytics event is recorded when a first event with minified stack trace is received ",17,88,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_first_event_with_minified_stack_trace_received(self, record_analytics): now = timezone.now() project = self.create_project(first_event=now) project_created.send(project=project, user=self.user, sender=type(project)) url = ""http://localhost:3000"" data = load_data(""javascript"") data[""tags""] = [(""url"", url)] data[""exception""] = { ""values"": [ { **data[""exception""][""values""][0], ""raw_stacktrace"": { ""frames"": [ { ""function"": ""o"", ""filename"": ""/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js"", ""abs_path"": ""https://s1.sentry-cdn.com/_static/dist/sentry/chunks/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.255071ceadabfb67483c.js"", ""lineno"": 2, ""colno"": 37098, ""pre_context"": [ ""/*! For license information please see vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd. {snip}"" ], ""context_line"": ""{snip} .apply(this,arguments);const i=o.map((e=>c(e,t)));return e.apply(this,i)}catch(e){throw l(),(0,i.$e)((n=>{n.addEventProcessor((e=>(t.mechani {snip}"", ""post_context"": [ ""//# sourceMappingURL=../sourcemaps/vendors-node_modules_emotion_is-prop-valid_node_modules_emotion_memoize_dist_memoize_browser_-4fe4bd.fe32 {snip}"" ], ""in_app"": False, }, ], }, } ] } self.store_event( project_id=project.id, data=data, ) record_analytics.assert_called_with( ""first_event_with_minified_stack_trace_for_project.sent"", user_id=self.user.id, organization_id=project.organization_id, project_id=project.id, platform=data[""platform""], url=url, ) ``` ###Assistant : Test that an analytics event is recorded when a first event with minified stack trace is received " 2739,"def arange(start, /, stop=None, step=1, *, dtype=None, meta=None, **kwargs): raise NotImplementedError ","Create an ascending or descending array Returns evenly spaced values within the half-open interval ``[start, stop)`` as a one-dimensional array. ",20,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def arange(start, /, stop=None, step=1, *, dtype=None, meta=None, **kwargs): raise NotImplementedError ``` ###Assistant : Create an ascending or descending array Returns evenly spaced values within the half-open interval ``[start, stop)`` as a one-dimensional array. " 2740,"def best_checkpoints(self): checkpoints = sorted(self._top_persisted_checkpoints, key=lambda c: c.priority) return [wrapped.tracked_checkpoint for wrapped in checkpoints] ","Returns best PERSISTENT checkpoints, sorted by score.",7,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def best_checkpoints(self): checkpoints = sorted(self._top_persisted_checkpoints, key=lambda c: c.priority) return [wrapped.tracked_checkpoint for wrapped in checkpoints] ``` ###Assistant : Returns best PERSISTENT checkpoints, sorted by score. " 2741,"def get_config_directory() -> Path: if os.getenv('NNI_CONFIG_DIR') is not None: config_dir = Path(os.getenv('NNI_CONFIG_DIR')) # type: ignore elif sys.prefix != sys.base_prefix or Path(sys.prefix, 'conda-meta').is_dir(): config_dir = Path(sys.prefix, 'nni') elif sys.platform == 'win32': config_dir = Path(os.environ['APPDATA'], 'nni') else: config_dir = Path.home() / '.config/nni' config_dir.mkdir(parents=True, exist_ok=True) return config_dir "," Get NNI config directory. Create it if not exist. ",9,44,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_config_directory() -> Path: if os.getenv('NNI_CONFIG_DIR') is not None: config_dir = Path(os.getenv('NNI_CONFIG_DIR')) # type: ignore elif sys.prefix != sys.base_prefix or Path(sys.prefix, 'conda-meta').is_dir(): config_dir = Path(sys.prefix, 'nni') elif sys.platform == 'win32': config_dir = Path(os.environ['APPDATA'], 'nni') else: config_dir = Path.home() / '.config/nni' config_dir.mkdir(parents=True, exist_ok=True) return config_dir ``` ###Assistant : Get NNI config directory. Create it if not exist. " 2742,"def test_background_update_min_batch_set_in_config(self): # a very long-running individual update duration_ms = 50 self.get_success( self.store.db_pool.simple_insert( ""background_updates"", values={""update_name"": ""test_update"", ""progress_json"": '{""my_key"": 1}'}, ) ) # Run the update with the long-running update item"," Test that the minimum batch size set in the config is used ",12,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_background_update_min_batch_set_in_config(self): # a very long-running individual update duration_ms = 50 self.get_success( self.store.db_pool.simple_insert( ""background_updates"", values={""update_name"": ""test_update"", ""progress_json"": '{""my_key"": 1}'}, ) ) # Run the update with the long-running update item ``` ###Assistant : Test that the minimum batch size set in the config is used " 2743,"def swap_memory(): mem = cext.virtual_mem() total_phys = mem[0] free_phys = mem[1] total_system = mem[2] free_system = mem[3] # Despite the name PageFile refers to total system memory here # thus physical memory values need to be subtracted to get swap values total = total_system - total_phys free = min(total, free_system - free_phys) used = total - free percent = usage_percent(used, total, round_=1) return _common.sswap(total, used, free, percent, 0, 0) # ===================================================================== # --- disk # ===================================================================== disk_io_counters = cext.disk_io_counters ","Swap system memory as a (total, used, free, sin, sout) tuple.",11,79,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def swap_memory(): mem = cext.virtual_mem() total_phys = mem[0] free_phys = mem[1] total_system = mem[2] free_system = mem[3] # Despite the name PageFile refers to total system memory here # thus physical memory values need to be subtracted to get swap values total = total_system - total_phys free = min(total, free_system - free_phys) used = total - free percent = usage_percent(used, total, round_=1) return _common.sswap(total, used, free, percent, 0, 0) # ===================================================================== # --- disk # ===================================================================== disk_io_counters = cext.disk_io_counters ``` ###Assistant : Swap system memory as a (total, used, free, sin, sout) tuple. " 2744,"def get_supplier_invoice_details(): inv_details = {} for d in frappe.db.sql( , as_dict=1, ): inv_details[d.name] = d.bill_no return inv_details "," select name, bill_no from `tabPurchase Invoice` where docstatus = 1 and bill_no is not null and bill_no != '' ",19,17,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_supplier_invoice_details(): inv_details = {} for d in frappe.db.sql( , as_dict=1, ): inv_details[d.name] = d.bill_no return inv_details ``` ###Assistant : select name, bill_no from `tabPurchase Invoice` where docstatus = 1 and bill_no is not null and bill_no != '' " 2745,"def dce_rpc_endianess(pkt): if pkt.endianness == 0: # big endian return "">"" elif pkt.endianness == 1: # little endian return ""<"" else: return ""!"" ",Determine the right endianness sign for a given DCE/RPC packet,10,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dce_rpc_endianess(pkt): if pkt.endianness == 0: # big endian return "">"" elif pkt.endianness == 1: # little endian return ""<"" else: return ""!"" ``` ###Assistant : Determine the right endianness sign for a given DCE/RPC packet " 2746,"def deserialize_object(model, fields, pk=None): content_type = ContentType.objects.get_for_model(model) if 'custom_fields' in fields: fields['custom_field_data'] = fields.pop('custom_fields') data = { 'model': '.'.join(content_type.natural_key()), 'pk': pk, 'fields': fields, } instance = list(serializers.deserialize('python', [data]))[0] return instance "," Instantiate an object from the given model and field data. Functions as the complement to serialize_object(). ",16,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def deserialize_object(model, fields, pk=None): content_type = ContentType.objects.get_for_model(model) if 'custom_fields' in fields: fields['custom_field_data'] = fields.pop('custom_fields') data = { 'model': '.'.join(content_type.natural_key()), 'pk': pk, 'fields': fields, } instance = list(serializers.deserialize('python', [data]))[0] return instance ``` ###Assistant : Instantiate an object from the given model and field data. Functions as the complement to serialize_object(). " 2747,"async def test_component_not_installed_if_requirement_fails(hass): hass.config.skip_pip = False mock_integration(hass, MockModule(""comp"", requirements=[""package==0.0.1""])) with patch(""homeassistant.util.package.install_package"", return_value=False): assert not await setup.async_setup_component(hass, ""comp"", {}) assert ""comp"" not in hass.config.components ",Component setup should fail if requirement can't install.,8,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_component_not_installed_if_requirement_fails(hass): hass.config.skip_pip = False mock_integration(hass, MockModule(""comp"", requirements=[""package==0.0.1""])) with patch(""homeassistant.util.package.install_package"", return_value=False): assert not await setup.async_setup_component(hass, ""comp"", {}) assert ""comp"" not in hass.config.components ``` ###Assistant : Component setup should fail if requirement can't install. " 2748,"def use_numba_cb(key) -> None: from pandas.core.util import numba_ numba_.set_use_numba(cf.get_option(key)) with cf.config_prefix(""compute""): cf.register_option( ""use_bottleneck"", True, use_bottleneck_doc, validator=is_bool, cb=use_bottleneck_cb, ) cf.register_option( ""use_numexpr"", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb ) cf.register_option( ""use_numba"", False, use_numba_doc, validator=is_bool, cb=use_numba_cb ) # # options from the ""display"" namespace pc_precision_doc = pc_colspace_doc = pc_max_rows_doc = pc_min_rows_doc = pc_max_cols_doc = pc_max_categories_doc = pc_max_info_cols_doc = pc_nb_repr_h_doc = pc_pprint_nest_depth = pc_multi_sparse_doc = float_format_doc = max_colwidth_doc = colheader_justify_doc = pc_expand_repr_doc = pc_show_dimensions_doc = pc_east_asian_width_doc = pc_ambiguous_as_wide_doc = pc_latex_repr_doc = pc_table_schema_doc = pc_html_border_doc = pc_html_use_mathjax_doc = pc_max_dir_items = pc_width_doc = pc_chop_threshold_doc = pc_max_seq_items = pc_max_info_rows_doc = pc_large_repr_doc = pc_memory_usage_doc = pc_latex_escape = pc_latex_longtable = pc_latex_multicolumn = pc_latex_multicolumn_format = pc_latex_multirow = "," : int Floating point output precision in terms of number of places after the decimal, for regular formatting as well as scientific notation. Similar to ``precision`` in :meth:`numpy.set_printoptions`. : int Default space for DataFrame columns. : int If max_rows is exceeded, switch to truncate view. Depending on `large_repr`, objects are either centrally truncated or printed as a summary view. 'None' value means unlimited. In case python/IPython is running in a terminal and `large_repr` equals 'truncate' this can be set to 0 and pandas will auto-detect the height of the terminal and print a truncated object which fits the screen height. The IPython notebook, IPython qtconsole, or IDLE do not run in a terminal and hence it is not possible to do correct auto-detection. : int The numbers of rows to show in a truncated view (when `max_rows` is exceeded). Ignored when `max_rows` is set to None or 0. When set to None, follows the value of `max_rows`. : int If max_cols is exceeded, switch to truncate view. Depending on `large_repr`, objects are either centrally truncated or printed as a summary view. 'None' value means unlimited. In case python/IPython is running in a terminal and `large_repr` equals 'truncate' this can be set to 0 and pandas will auto-detect the width of the terminal and print a truncated object which fits the screen width. The IPython notebook, IPython qtconsole, or IDLE do not run in a terminal and hence it is not possible to do correct auto-detection. : int This sets the maximum number of categories pandas should output when printing out a `Categorical` or a Series of dtype ""category"". : int max_info_columns is used in DataFrame.info method to decide if per column information will be printed. : boolean When True, IPython notebook will use html representation for pandas objects (if it is available). : int Controls the number of nested levels to process when pretty-printing : boolean ""sparsify"" MultiIndex display (don't display repeated elements in outer levels within groups) : callable The callable should accept a floating point number and return a string with the desired format of the number. This is used in some places like SeriesFormatter. See formats.format.EngFormatter for an example. : int or None The maximum width in characters of a column in the repr of a pandas data structure. When the column overflows, a ""..."" placeholder is embedded in the output. A 'None' value means unlimited. : 'left'/'right' Controls the justification of column headers. used by DataFrameFormatter. : boolean Whether to print out the full DataFrame repr for wide DataFrames across multiple lines, `max_columns` is still respected, but the output will wrap-around across multiple ""pages"" if its width exceeds `display.width`. : boolean or 'truncate' Whether to print out dimensions at the end of DataFrame repr. If 'truncate' is specified, only print out the dimensions if the frame is truncated (e.g. not display all rows and/or columns) : boolean Whether to use the Unicode East Asian Width to calculate the display text width. Enabling this may affect to the performance (default: False) : boolean Whether to handle Unicode characters belong to Ambiguous as Wide (width=2) (default: False) : boolean Whether to produce a latex DataFrame representation for jupyter environments that support it. (default: False) : boolean Whether to publish a Table Schema representation for frontends that support it. (default: False) : int A ``border=value`` attribute is inserted in the ```` tag for the DataFrame HTML repr. \ : boolean When True, Jupyter notebook will process table contents using MathJax, rendering mathematical expressions enclosed by the dollar symbol. (default: True) \ : int The number of items that will be added to `dir(...)`. 'None' value means unlimited. Because dir is cached, changing this option will not immediately affect already existing dataframes until a column is deleted or added. This is for instance used to suggest columns from a dataframe to tab completion. : int Width of the display in characters. In case python/IPython is running in a terminal this can be set to None and pandas will correctly auto-detect the width. Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a terminal and hence it is not possible to correctly detect the width. : float or None if set to a float value, all float values smaller then the given threshold will be displayed as exactly 0 by repr and friends. : int or None When pretty-printing a long sequence, no more then `max_seq_items` will be printed. If items are omitted, they will be denoted by the addition of ""..."" to the resulting string. If set to None, the number of items to be printed is unlimited. : int or None df.info() will usually show null-counts for each column. For large frames this can be quite slow. max_info_rows and max_info_cols limit this null check only to frames with smaller dimensions than specified. : 'truncate'/'info' For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can show a truncated table (the default from 0.13), or switch to the view from df.info() (the behaviour in earlier versions of pandas). : bool, string or None This specifies if the memory usage of a DataFrame should be displayed when df.info() is called. Valid values True,False,'deep' : bool This specifies if the to_latex method of a Dataframe uses escapes special characters. Valid values: False,True :bool This specifies if the to_latex method of a Dataframe uses the longtable format. Valid values: False,True : bool This specifies if the to_latex method of a Dataframe uses multicolumns to pretty-print MultiIndex columns. Valid values: False,True : string This specifies the format for multicolumn headers. Can be surrounded with '|'. Valid values: 'l', 'c', 'r', 'p{}' : bool This specifies if the to_latex method of a Dataframe uses multirows to pretty-print MultiIndex rows. Valid values: False,True ",960,105,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def use_numba_cb(key) -> None: from pandas.core.util import numba_ numba_.set_use_numba(cf.get_option(key)) with cf.config_prefix(""compute""): cf.register_option( ""use_bottleneck"", True, use_bottleneck_doc, validator=is_bool, cb=use_bottleneck_cb, ) cf.register_option( ""use_numexpr"", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb ) cf.register_option( ""use_numba"", False, use_numba_doc, validator=is_bool, cb=use_numba_cb ) # # options from the ""display"" namespace pc_precision_doc = pc_colspace_doc = pc_max_rows_doc = pc_min_rows_doc = pc_max_cols_doc = pc_max_categories_doc = pc_max_info_cols_doc = pc_nb_repr_h_doc = pc_pprint_nest_depth = pc_multi_sparse_doc = float_format_doc = max_colwidth_doc = colheader_justify_doc = pc_expand_repr_doc = pc_show_dimensions_doc = pc_east_asian_width_doc = pc_ambiguous_as_wide_doc = pc_latex_repr_doc = pc_table_schema_doc = pc_html_border_doc = pc_html_use_mathjax_doc = pc_max_dir_items = pc_width_doc = pc_chop_threshold_doc = pc_max_seq_items = pc_max_info_rows_doc = pc_large_repr_doc = pc_memory_usage_doc = pc_latex_escape = pc_latex_longtable = pc_latex_multicolumn = pc_latex_multicolumn_format = pc_latex_multirow = ``` ###Assistant : : int Floating point output precision in terms of number of places after the decimal, for regular formatting as well as scientific notation. Similar to ``precision`` in :meth:`numpy.set_printoptions`. : int Default space for DataFrame columns. : int If max_rows is exceeded, switch to truncate view. Depending on `large_repr`, objects are either centrally truncated or printed as a summary view. 'None' value means unlimited. In case python/IPython is running in a terminal and `large_repr` equals 'truncate' this can be set to 0 and pandas will auto-detect the height of the terminal and print a truncated object which fits the screen height. The IPython notebook, IPython qtconsole, or IDLE do not run in a terminal and hence it is not possible to do correct auto-detection. : int The numbers of rows to show in a truncated view (when `max_rows` is exceeded). Ignored when `max_rows` is set to None or 0. When set to None, follows the value of `max_rows`. : int If max_cols is exceeded, switch to truncate view. Depending on `large_repr`, objects are either centrally truncated or printed as a summary view. 'None' value means unlimited. In case python/IPython is running in a terminal and `large_repr` equals 'truncate' this can be set to 0 and pandas will auto-detect the width of the terminal and print a truncated object which fits the screen width. The IPython notebook, IPython qtconsole, or IDLE do not run in a terminal and hence it is not possible to do correct auto-detection. : int This sets the maximum number of categories pandas should output when printing out a `Categorical` or a Series of dtype ""category"". : int max_info_columns is used in DataFrame.info method to decide if per column information will be printed. : boolean When True, IPython notebook will use html representation for pandas objects (if it is available). : int Controls the number of nested levels to process when pretty-printing : boolean ""sparsify"" MultiIndex display (don't display repeated elements in outer levels within groups) : callable The callable should accept a floating point number and return a string with the desired format of the number. This is used in some places like SeriesFormatter. See formats.format.EngFormatter for an example. : int or None The maximum width in characters of a column in the repr of a pandas data structure. When the column overflows, a ""..."" placeholder is embedded in the output. A 'None' value means unlimited. : 'left'/'right' Controls the justification of column headers. used by DataFrameFormatter. : boolean Whether to print out the full DataFrame repr for wide DataFrames across multiple lines, `max_columns` is still respected, but the output will wrap-around across multiple ""pages"" if its width exceeds `display.width`. : boolean or 'truncate' Whether to print out dimensions at the end of DataFrame repr. If 'truncate' is specified, only print out the dimensions if the frame is truncated (e.g. not display all rows and/or columns) : boolean Whether to use the Unicode East Asian Width to calculate the display text width. Enabling this may affect to the performance (default: False) : boolean Whether to handle Unicode characters belong to Ambiguous as Wide (width=2) (default: False) : boolean Whether to produce a latex DataFrame representation for jupyter environments that support it. (default: False) : boolean Whether to publish a Table Schema representation for frontends that support it. (default: False) : int A ``border=value`` attribute is inserted in the ``
    `` tag for the DataFrame HTML repr. \ : boolean When True, Jupyter notebook will process table contents using MathJax, rendering mathematical expressions enclosed by the dollar symbol. (default: True) \ : int The number of items that will be added to `dir(...)`. 'None' value means unlimited. Because dir is cached, changing this option will not immediately affect already existing dataframes until a column is deleted or added. This is for instance used to suggest columns from a dataframe to tab completion. : int Width of the display in characters. In case python/IPython is running in a terminal this can be set to None and pandas will correctly auto-detect the width. Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a terminal and hence it is not possible to correctly detect the width. : float or None if set to a float value, all float values smaller then the given threshold will be displayed as exactly 0 by repr and friends. : int or None When pretty-printing a long sequence, no more then `max_seq_items` will be printed. If items are omitted, they will be denoted by the addition of ""..."" to the resulting string. If set to None, the number of items to be printed is unlimited. : int or None df.info() will usually show null-counts for each column. For large frames this can be quite slow. max_info_rows and max_info_cols limit this null check only to frames with smaller dimensions than specified. : 'truncate'/'info' For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can show a truncated table (the default from 0.13), or switch to the view from df.info() (the behaviour in earlier versions of pandas). : bool, string or None This specifies if the memory usage of a DataFrame should be displayed when df.info() is called. Valid values True,False,'deep' : bool This specifies if the to_latex method of a Dataframe uses escapes special characters. Valid values: False,True :bool This specifies if the to_latex method of a Dataframe uses the longtable format. Valid values: False,True : bool This specifies if the to_latex method of a Dataframe uses multicolumns to pretty-print MultiIndex columns. Valid values: False,True : string This specifies the format for multicolumn headers. Can be surrounded with '|'. Valid values: 'l', 'c', 'r', 'p{}' : bool This specifies if the to_latex method of a Dataframe uses multirows to pretty-print MultiIndex rows. Valid values: False,True " 2749,"def test_twitter_tag(self) -> None: html = b tree = decode_body(html, ""http://example.com/test.html"") og = parse_html_to_open_graph(tree) self.assertEqual( og, { ""og:title"": None, ""og:description"": ""Description"", ""og:site_name"": ""@matrixdotorg"", }, ) # But they shouldn't override Open Graph values. html = b tree = decode_body(html, ""http://example.com/test.html"") og = parse_html_to_open_graph(tree) self.assertEqual( og, { ""og:title"": None, ""og:description"": ""Real Description"", ""og:site_name"": ""matrix.org"", }, ) ","Twitter card tags should be used if nothing else is available. ",40,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_twitter_tag(self) -> None: html = b tree = decode_body(html, ""http://example.com/test.html"") og = parse_html_to_open_graph(tree) self.assertEqual( og, { ""og:title"": None, ""og:description"": ""Description"", ""og:site_name"": ""@matrixdotorg"", }, ) # But they shouldn't override Open Graph values. html = b tree = decode_body(html, ""http://example.com/test.html"") og = parse_html_to_open_graph(tree) self.assertEqual( og, { ""og:title"": None, ""og:description"": ""Real Description"", ""og:site_name"": ""matrix.org"", }, ) ``` ###Assistant : Twitter card tags should be used if nothing else is available. " 2750,"def get_tax_template(posting_date, args): args = frappe._dict(args) conditions = [] if posting_date: conditions.append( f ) else: conditions.append(""(from_date is null) and (to_date is null)"") conditions.append( ""ifnull(tax_category, '') = {0}"".format(frappe.db.escape(cstr(args.get(""tax_category"")))) ) if ""tax_category"" in args.keys(): del args[""tax_category""] for key, value in args.items(): if key == ""use_for_shopping_cart"": conditions.append(""use_for_shopping_cart = {0}"".format(1 if value else 0)) elif key == ""customer_group"": if not value: value = get_root_of(""Customer Group"") customer_group_condition = get_customer_group_condition(value) conditions.append(""ifnull({0}, '') in ('', {1})"".format(key, customer_group_condition)) else: conditions.append(""ifnull({0}, '') in ('', {1})"".format(key, frappe.db.escape(cstr(value)))) tax_rule = frappe.db.sql( .format( "" and "".join(conditions) ), as_dict=True, ) if not tax_rule: return None for rule in tax_rule: rule.no_of_keys_matched = 0 for key in args: if rule.get(key): rule.no_of_keys_matched += 1 def cmp(a, b): # refernce: https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons return int(a > b) - int(a < b) rule = sorted( tax_rule, key=functools.cmp_to_key( lambda b, a: cmp(a.no_of_keys_matched, b.no_of_keys_matched) or cmp(a.priority, b.priority) ), )[0] tax_template = rule.sales_tax_template or rule.purchase_tax_template doctype = ""{0} Taxes and Charges Template"".format(rule.tax_type) if frappe.db.get_value(doctype, tax_template, ""disabled"") == 1: return None return tax_template ","Get matching tax rule(from_date is null or from_date <= '{posting_date}') and (to_date is null or to_date >= '{posting_date}')select * from `tabTax Rule` where {0}",24,159,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_tax_template(posting_date, args): args = frappe._dict(args) conditions = [] if posting_date: conditions.append( f ) else: conditions.append(""(from_date is null) and (to_date is null)"") conditions.append( ""ifnull(tax_category, '') = {0}"".format(frappe.db.escape(cstr(args.get(""tax_category"")))) ) if ""tax_category"" in args.keys(): del args[""tax_category""] for key, value in args.items(): if key == ""use_for_shopping_cart"": conditions.append(""use_for_shopping_cart = {0}"".format(1 if value else 0)) elif key == ""customer_group"": if not value: value = get_root_of(""Customer Group"") customer_group_condition = get_customer_group_condition(value) conditions.append(""ifnull({0}, '') in ('', {1})"".format(key, customer_group_condition)) else: conditions.append(""ifnull({0}, '') in ('', {1})"".format(key, frappe.db.escape(cstr(value)))) tax_rule = frappe.db.sql( .format( "" and "".join(conditions) ), as_dict=True, ) if not tax_rule: return None for rule in tax_rule: rule.no_of_keys_matched = 0 for key in args: if rule.get(key): rule.no_of_keys_matched += 1 def cmp(a, b): # refernce: https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons return int(a > b) - int(a < b) rule = sorted( tax_rule, key=functools.cmp_to_key( lambda b, a: cmp(a.no_of_keys_matched, b.no_of_keys_matched) or cmp(a.priority, b.priority) ), )[0] tax_template = rule.sales_tax_template or rule.purchase_tax_template doctype = ""{0} Taxes and Charges Template"".format(rule.tax_type) if frappe.db.get_value(doctype, tax_template, ""disabled"") == 1: return None return tax_template ``` ###Assistant : Get matching tax rule(from_date is null or from_date <= '{posting_date}') and (to_date is null or to_date >= '{posting_date}')select * from `tabTax Rule` where {0} " 2751,"def wheel_graph(n, create_using=None): _, nodes = n G = empty_graph(nodes, create_using) if G.is_directed(): raise NetworkXError(""Directed Graph not supported"") if len(nodes) > 1: hub, *rim = nodes G.add_edges_from((hub, node) for node in rim) if len(rim) > 1: G.add_edges_from(pairwise(rim, cyclic=True)) return G ","Return the wheel graph The wheel graph consists of a hub node connected to a cycle of (n-1) nodes. Parameters ---------- n : int or iterable If an integer, node labels are 0 to n with center 0. If an iterable of nodes, the center is the first. create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Node labels are the integers 0 to n - 1. ",76,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def wheel_graph(n, create_using=None): _, nodes = n G = empty_graph(nodes, create_using) if G.is_directed(): raise NetworkXError(""Directed Graph not supported"") if len(nodes) > 1: hub, *rim = nodes G.add_edges_from((hub, node) for node in rim) if len(rim) > 1: G.add_edges_from(pairwise(rim, cyclic=True)) return G ``` ###Assistant : Return the wheel graph The wheel graph consists of a hub node connected to a cycle of (n-1) nodes. Parameters ---------- n : int or iterable If an integer, node labels are 0 to n with center 0. If an iterable of nodes, the center is the first. create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Node labels are the integers 0 to n - 1. " 2752,"def get_changes(): with open(HISTORY) as f: lines = f.readlines() block = [] # eliminate the part preceding the first block for i, line in enumerate(lines): line = lines.pop(0) if line.startswith('===='): break lines.pop(0) for i, line in enumerate(lines): line = lines.pop(0) line = line.rstrip() if re.match(r""^- \d+_"", line): line = re.sub(r""^- (\d+)_"", r""- #\1"", line) if line.startswith('===='): break block.append(line) # eliminate bottom empty lines block.pop(-1) while not block[-1]: block.pop(-1) return ""\n"".join(block) ","Get the most recent changes for this release by parsing HISTORY.rst file. ",12,70,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_changes(): with open(HISTORY) as f: lines = f.readlines() block = [] # eliminate the part preceding the first block for i, line in enumerate(lines): line = lines.pop(0) if line.startswith('===='): break lines.pop(0) for i, line in enumerate(lines): line = lines.pop(0) line = line.rstrip() if re.match(r""^- \d+_"", line): line = re.sub(r""^- (\d+)_"", r""- #\1"", line) if line.startswith('===='): break block.append(line) # eliminate bottom empty lines block.pop(-1) while not block[-1]: block.pop(-1) return ""\n"".join(block) ``` ###Assistant : Get the most recent changes for this release by parsing HISTORY.rst file. " 2753,"def _check_valid_data(self) -> bool: logger.debug(""Validating data. %s"", {key: len(val) for key, val in self._display_data.stats.items()}) if any(len(val) == 0 # pylint:disable=len-as-condition for val in self._display_data.stats.values()): return False return True "," Check that the selections holds valid data to display NB: len-as-condition is used as data could be a list or a numpy array Returns ------- bool ``True` if there is data to be displayed, otherwise ``False`` ",36,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_valid_data(self) -> bool: logger.debug(""Validating data. %s"", {key: len(val) for key, val in self._display_data.stats.items()}) if any(len(val) == 0 # pylint:disable=len-as-condition for val in self._display_data.stats.values()): return False return True ``` ###Assistant : Check that the selections holds valid data to display NB: len-as-condition is used as data could be a list or a numpy array Returns ------- bool ``True` if there is data to be displayed, otherwise ``False`` " 2754,"def _async_check_unavailable_groups_with_random_macs(self) -> None: now = MONOTONIC_TIME() gone_unavailable = [ group_id for group_id in self._group_ids_random_macs if group_id not in self._unavailable_group_ids and (service_info := self._last_seen_by_group_id.get(group_id)) and ( # We will not be callbacks for iBeacons with random macs # that rotate infrequently since their advertisement data is # does not change as the bluetooth.async_register_callback API # suppresses callbacks for duplicate advertisements to avoid # exposing integrations to the firehose of bluetooth advertisements. # # To solve this we need to ask for the latest service info for # the address we last saw to get the latest timestamp. # # If there is no last service info for the address we know that # the device is no longer advertising. not ( latest_service_info := bluetooth.async_last_service_info( self.hass, service_info.address, connectable=False ) ) or now - latest_service_info.time > UNAVAILABLE_TIMEOUT ) ] for group_id in gone_unavailable: self._unavailable_group_ids.add(group_id) async_dispatcher_send(self.hass, signal_unavailable(group_id)) ",Check for random mac groups that have not been seen in a while and mark them as unavailable.,18,144,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _async_check_unavailable_groups_with_random_macs(self) -> None: now = MONOTONIC_TIME() gone_unavailable = [ group_id for group_id in self._group_ids_random_macs if group_id not in self._unavailable_group_ids and (service_info := self._last_seen_by_group_id.get(group_id)) and ( # We will not be callbacks for iBeacons with random macs # that rotate infrequently since their advertisement data is # does not change as the bluetooth.async_register_callback API # suppresses callbacks for duplicate advertisements to avoid # exposing integrations to the firehose of bluetooth advertisements. # # To solve this we need to ask for the latest service info for # the address we last saw to get the latest timestamp. # # If there is no last service info for the address we know that # the device is no longer advertising. not ( latest_service_info := bluetooth.async_last_service_info( self.hass, service_info.address, connectable=False ) ) or now - latest_service_info.time > UNAVAILABLE_TIMEOUT ) ] for group_id in gone_unavailable: self._unavailable_group_ids.add(group_id) async_dispatcher_send(self.hass, signal_unavailable(group_id)) ``` ###Assistant : Check for random mac groups that have not been seen in a while and mark them as unavailable. " 2755,"def restore_optimizers_and_schedulers(self) -> None: if not self._loaded_checkpoint: return if self.trainer.strategy.lightning_restore_optimizer: # validation if ""optimizer_states"" not in self._loaded_checkpoint: raise KeyError( ""Trying to restore optimizer state but checkpoint contains only the model."" "" This is probably due to `ModelCheckpoint.save_weights_only` being set to `True`."" ) self.restore_optimizers() if ""lr_schedulers"" not in self._loaded_checkpoint: raise KeyError( ""Trying to restore learning rate scheduler state but checkpoint contains only the model."" "" This is probably due to `ModelCheckpoint.save_weights_only` being set to `True`."" ) self.restore_lr_schedulers() ",Restores the optimizers and learning rate scheduler states from the pre-loaded checkpoint.,12,76,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def restore_optimizers_and_schedulers(self) -> None: if not self._loaded_checkpoint: return if self.trainer.strategy.lightning_restore_optimizer: # validation if ""optimizer_states"" not in self._loaded_checkpoint: raise KeyError( ""Trying to restore optimizer state but checkpoint contains only the model."" "" This is probably due to `ModelCheckpoint.save_weights_only` being set to `True`."" ) self.restore_optimizers() if ""lr_schedulers"" not in self._loaded_checkpoint: raise KeyError( ""Trying to restore learning rate scheduler state but checkpoint contains only the model."" "" This is probably due to `ModelCheckpoint.save_weights_only` being set to `True`."" ) self.restore_lr_schedulers() ``` ###Assistant : Restores the optimizers and learning rate scheduler states from the pre-loaded checkpoint. " 2756,"def urldefragauth(url): scheme, netloc, path, params, query, fragment = urlparse(url) # see func:`prepend_scheme_if_needed` if not netloc: netloc, path = path, netloc netloc = netloc.rsplit(""@"", 1)[-1] return urlunparse((scheme, netloc, path, params, query, """")) "," Given a url remove the fragment and the authentication part. :rtype: str ",12,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def urldefragauth(url): scheme, netloc, path, params, query, fragment = urlparse(url) # see func:`prepend_scheme_if_needed` if not netloc: netloc, path = path, netloc netloc = netloc.rsplit(""@"", 1)[-1] return urlunparse((scheme, netloc, path, params, query, """")) ``` ###Assistant : Given a url remove the fragment and the authentication part. :rtype: str " 2757,"def test_approval_not_required(self) -> None: self.get_success(self.store.register_user(self.user_id, self.pwhash)) user = self.get_success(self.store.get_user_by_id(self.user_id)) assert user is not None self.assertTrue(user[""approved""]) approved = self.get_success(self.store.is_user_approved(self.user_id)) self.assertTrue(approved) ","Tests that if we don't require approval for new accounts, newly created accounts are automatically marked as approved. ",18,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_approval_not_required(self) -> None: self.get_success(self.store.register_user(self.user_id, self.pwhash)) user = self.get_success(self.store.get_user_by_id(self.user_id)) assert user is not None self.assertTrue(user[""approved""]) approved = self.get_success(self.store.is_user_approved(self.user_id)) self.assertTrue(approved) ``` ###Assistant : Tests that if we don't require approval for new accounts, newly created accounts are automatically marked as approved. " 2758,"def get_all(self, name, failobj=None): values = [] name = name.lower() for k, v in self._headers: if k.lower() == name: values.append(self.policy.header_fetch_parse(k, v)) if not values: return failobj return values ","Return a list of all the values for the named field. These will be sorted in the order they appeared in the original message, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list. If no such fields exist, failobj is returned (defaults to None). ",51,28,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_all(self, name, failobj=None): values = [] name = name.lower() for k, v in self._headers: if k.lower() == name: values.append(self.policy.header_fetch_parse(k, v)) if not values: return failobj return values ``` ###Assistant : Return a list of all the values for the named field. These will be sorted in the order they appeared in the original message, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list. If no such fields exist, failobj is returned (defaults to None). " 2759,"def test_explorer_private_child(self): response = self.client.get( reverse(""wagtailadmin_explore"", args=(self.private_child_page.id,)) ) # Check the response self.assertEqual(response.status_code, 200) # Check the privacy indicator is public self.assertTemplateUsed(response, ""wagtailadmin/pages/_privacy_switch.html"") self.assertContains(response, '
    ') self.assertNotContains(response, '
    ') "," This tests that the privacy indicator on the private child pages explore view is set to ""PRIVATE"" ",17,31,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_explorer_private_child(self): response = self.client.get( reverse(""wagtailadmin_explore"", args=(self.private_child_page.id,)) ) # Check the response self.assertEqual(response.status_code, 200) # Check the privacy indicator is public self.assertTemplateUsed(response, ""wagtailadmin/pages/_privacy_switch.html"") self.assertContains(response, '
    ') self.assertNotContains(response, '
    ') ``` ###Assistant : This tests that the privacy indicator on the private child pages explore view is set to ""PRIVATE"" " 2760,"def find_backend(line): if _re_test_backend.search(line) is None: return None backends = [b[0] for b in _re_backend.findall(line)] backends.sort() return ""_and_"".join(backends) ",Find one (or multiple) backend in a code line of the init.,12,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_backend(line): if _re_test_backend.search(line) is None: return None backends = [b[0] for b in _re_backend.findall(line)] backends.sort() return ""_and_"".join(backends) ``` ###Assistant : Find one (or multiple) backend in a code line of the init. " 2761,"def subgraph_view(G, filter_node=no_filter, filter_edge=no_filter): newG = nx.freeze(G.__class__()) newG._NODE_OK = filter_node newG._EDGE_OK = filter_edge # create view by assigning attributes from G newG._graph = G newG.graph = G.graph newG._node = FilterAtlas(G._node, filter_node) if G.is_multigraph(): Adj = FilterMultiAdjacency ","View of `G` applying a filter on nodes and edges. `subgraph_view` provides a read-only view of the input graph that excludes nodes and edges based on the outcome of two filter functions `filter_node` and `filter_edge`. The `filter_node` function takes one argument --- the node --- and returns `True` if the node should be included in the subgraph, and `False` if it should not be included. The `filter_edge` function takes two (or three arguments if `G` is a multi-graph) --- the nodes describing an edge, plus the edge-key if parallel edges are possible --- and returns `True` if the edge should be included in the subgraph, and `False` if it should not be included. Both node and edge filter functions are called on graph elements as they are queried, meaning there is no up-front cost to creating the view. Parameters ---------- G : networkx.Graph A directed/undirected graph/multigraph filter_node : callable, optional A function taking a node as input, which returns `True` if the node should appear in the view. filter_edge : callable, optional A function taking as input the two nodes describing an edge (plus the edge-key if `G` is a multi-graph), which returns `True` if the edge should appear in the view. Returns ------- graph : networkx.Graph A read-only graph view of the input graph. Examples -------- >>> G = nx.path_graph(6) Filter functions operate on the node, and return `True` if the node should appear in the view: >>> def filter_node(n1): ... return n1 != 5 ... >>> view = nx.subgraph_view(G, filter_node=filter_node) >>> view.nodes() NodeView((0, 1, 2, 3, 4)) We can use a closure pattern to filter graph elements based on additional data --- for example, filtering on edge data attached to the graph: >>> G[3][4][""cross_me""] = False >>> def filter_edge(n1, n2): ... return G[n1][n2].get(""cross_me"", True) ... >>> view = nx.subgraph_view(G, filter_edge=filter_edge) >>> view.edges() EdgeView([(0, 1), (1, 2), (2, 3), (4, 5)]) >>> view = nx.subgraph_view(G, filter_node=filter_node, filter_edge=filter_edge,) >>> view.nodes() NodeView((0, 1, 2, 3, 4)) >>> view.edges() EdgeView([(0, 1), (1, 2), (2, 3)]) ",333,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def subgraph_view(G, filter_node=no_filter, filter_edge=no_filter): newG = nx.freeze(G.__class__()) newG._NODE_OK = filter_node newG._EDGE_OK = filter_edge # create view by assigning attributes from G newG._graph = G newG.graph = G.graph newG._node = FilterAtlas(G._node, filter_node) if G.is_multigraph(): Adj = FilterMultiAdjacency ``` ###Assistant : View of `G` applying a filter on nodes and edges. `subgraph_view` provides a read-only view of the input graph that excludes nodes and edges based on the outcome of two filter functions `filter_node` and `filter_edge`. The `filter_node` function takes one argument --- the node --- and returns `True` if the node should be included in the subgraph, and `False` if it should not be included. The `filter_edge` function takes two (or three arguments if `G` is a multi-graph) --- the nodes describing an edge, plus the edge-key if parallel edges are possible --- and returns `True` if the edge should be included in the subgraph, and `False` if it should not be included. Both node and edge filter functions are called on graph elements as they are queried, meaning there is no up-front cost to creating the view. Parameters ---------- G : networkx.Graph A directed/undirected graph/multigraph filter_node : callable, optional A function taking a node as input, which returns `True` if the node should appear in the view. filter_edge : callable, optional A function taking as input the two nodes describing an edge (plus the edge-key if `G` is a multi-graph), which returns `True` if the edge should appear in the view. Returns ------- graph : networkx.Graph A read-only graph view of the input graph. Examples -------- >>> G = nx.path_graph(6) Filter functions operate on the node, and return `True` if the node should appear in the view: >>> def filter_node(n1): ... return n1 != 5 ... >>> view = nx.subgraph_view(G, filter_node=filter_node) >>> view.nodes() NodeView((0, 1, 2, 3, 4)) We can use a closure pattern to filter graph elements based on additional data --- for example, filtering on edge data attached to the graph: >>> G[3][4][""cross_me""] = False >>> def filter_edge(n1, n2): ... return G[n1][n2].get(""cross_me"", True) ... >>> view = nx.subgraph_view(G, filter_edge=filter_edge) >>> view.edges() EdgeView([(0, 1), (1, 2), (2, 3), (4, 5)]) >>> view = nx.subgraph_view(G, filter_node=filter_node, filter_edge=filter_edge,) >>> view.nodes() NodeView((0, 1, 2, 3, 4)) >>> view.edges() EdgeView([(0, 1), (1, 2), (2, 3)]) " 2762,"def _deprecate_ci(errorbar, ci): if ci != ""deprecated"": if ci is None: errorbar = None elif ci == ""sd"": errorbar = ""sd"" else: errorbar = (""ci"", ci) msg = ( ""\n\nThe `ci` parameter is deprecated. "" f""Use `errorbar={repr(errorbar)}` for the same effect.\n"" ) warnings.warn(msg, FutureWarning, stacklevel=3) return errorbar "," Warn on usage of ci= and convert to appropriate errorbar= arg. ci was deprecated when errorbar was added in 0.12. It should not be removed completely for some time, but it can be moved out of function definitions (and extracted from kwargs) after one cycle. ",45,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _deprecate_ci(errorbar, ci): if ci != ""deprecated"": if ci is None: errorbar = None elif ci == ""sd"": errorbar = ""sd"" else: errorbar = (""ci"", ci) msg = ( ""\n\nThe `ci` parameter is deprecated. "" f""Use `errorbar={repr(errorbar)}` for the same effect.\n"" ) warnings.warn(msg, FutureWarning, stacklevel=3) return errorbar ``` ###Assistant : Warn on usage of ci= and convert to appropriate errorbar= arg. ci was deprecated when errorbar was added in 0.12. It should not be removed completely for some time, but it can be moved out of function definitions (and extracted from kwargs) after one cycle. " 2763,"def __sub__(self, other): if self._delegate_binop(other): return NotImplemented return np.subtract(self, other) "," Subtract other from self, and return a new masked array. ",10,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __sub__(self, other): if self._delegate_binop(other): return NotImplemented return np.subtract(self, other) ``` ###Assistant : Subtract other from self, and return a new masked array. " 2764,"def mock_smile_adam_2() -> Generator[None, MagicMock, None]: chosen_env = ""m_adam_heating"" with patch( ""homeassistant.components.plugwise.gateway.Smile"", autospec=True ) as smile_mock: smile = smile_mock.return_value smile.gateway_id = ""da224107914542988a88561b4452b0f6"" smile.heater_id = ""056ee145a816487eaa69243c3280f8bf"" smile.smile_version = ""3.6.4"" smile.smile_type = ""thermostat"" smile.smile_hostname = ""smile98765"" smile.smile_name = ""Adam"" smile.connect.return_value = True smile.notifications = _read_json(chosen_env, ""notifications"") smile.async_update.return_value = _read_json(chosen_env, ""all_data"") yield smile @pytest.fixture",Create a 2nd Mock Adam environment for testing exceptions.,9,51,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mock_smile_adam_2() -> Generator[None, MagicMock, None]: chosen_env = ""m_adam_heating"" with patch( ""homeassistant.components.plugwise.gateway.Smile"", autospec=True ) as smile_mock: smile = smile_mock.return_value smile.gateway_id = ""da224107914542988a88561b4452b0f6"" smile.heater_id = ""056ee145a816487eaa69243c3280f8bf"" smile.smile_version = ""3.6.4"" smile.smile_type = ""thermostat"" smile.smile_hostname = ""smile98765"" smile.smile_name = ""Adam"" smile.connect.return_value = True smile.notifications = _read_json(chosen_env, ""notifications"") smile.async_update.return_value = _read_json(chosen_env, ""all_data"") yield smile @pytest.fixture ``` ###Assistant : Create a 2nd Mock Adam environment for testing exceptions. " 2765,"def plot_wireframe(self, X, Y, Z, **kwargs): had_data = self.has_data() if Z.ndim != 2: raise ValueError(""Argument Z must be 2-dimensional."") # FIXME: Support masked arrays X, Y, Z = np.broadcast_arrays(X, Y, Z) rows, cols = Z.shape has_stride = 'rstride' in kwargs or 'cstride' in kwargs has_count = 'rcount' in kwargs or 'ccount' in kwargs if has_stride and has_count: raise ValueError(""Cannot specify both stride and count arguments"") rstride = kwargs.pop('rstride', 1) cstride = kwargs.pop('cstride', 1) rcount = kwargs.pop('rcount', 50) ccount = kwargs.pop('ccount', 50) if rcParams['_internal.classic_mode']: # Strides have priority over counts in classic mode. # So, only compute strides from counts # if counts were explicitly given if has_count: rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0 cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0 else: # If the strides are provided then it has priority. # Otherwise, compute the strides from the counts. if not has_stride: rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0 cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0 # We want two sets of lines, one running along the ""rows"" of # Z and another set of lines running along the ""columns"" of Z. # This transpose will make it easy to obtain the columns. tX, tY, tZ = np.transpose(X), np.transpose(Y), np.transpose(Z) if rstride: rii = list(range(0, rows, rstride)) # Add the last index only if needed if rows > 0 and rii[-1] != (rows - 1): rii += [rows-1] else: rii = [] if cstride: cii = list(range(0, cols, cstride)) # Add the last index only if needed if cols > 0 and cii[-1] != (cols - 1): cii += [cols-1] else: cii = [] if rstride == 0 and cstride == 0: raise ValueError(""Either rstride or cstride must be non zero"") # If the inputs were empty, then just # reset everything. if Z.size == 0: rii = [] cii = [] xlines = [X[i] for i in rii] ylines = [Y[i] for i in rii] zlines = [Z[i] for i in rii] txlines = [tX[i] for i in cii] tylines = [tY[i] for i in cii] tzlines = [tZ[i] for i in cii] lines = ([list(zip(xl, yl, zl)) for xl, yl, zl in zip(xlines, ylines, zlines)] + [list(zip(xl, yl, zl)) for xl, yl, zl in zip(txlines, tylines, tzlines)]) linec = art3d.Line3DCollection(lines, **kwargs) self.add_collection(linec) self.auto_scale_xyz(X, Y, Z, had_data) return linec "," Plot a 3D wireframe. .. note:: The *rcount* and *ccount* kwargs, which both default to 50, determine the maximum number of samples used in each direction. If the input data is larger, it will be downsampled (by slicing) to these numbers of points. Parameters ---------- X, Y, Z : 2D arrays Data values. rcount, ccount : int Maximum number of samples used in each direction. If the input data is larger, it will be downsampled (by slicing) to these numbers of points. Setting a count to zero causes the data to be not sampled in the corresponding direction, producing a 3D line plot rather than a wireframe plot. Defaults to 50. rstride, cstride : int Downsampling stride in each direction. These arguments are mutually exclusive with *rcount* and *ccount*. If only one of *rstride* or *cstride* is set, the other defaults to 1. Setting a stride to zero causes the data to be not sampled in the corresponding direction, producing a 3D line plot rather than a wireframe plot. 'classic' mode uses a default of ``rstride = cstride = 1`` instead of the new default of ``rcount = ccount = 50``. **kwargs Other arguments are forwarded to `.Line3DCollection`. ",198,393,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def plot_wireframe(self, X, Y, Z, **kwargs): had_data = self.has_data() if Z.ndim != 2: raise ValueError(""Argument Z must be 2-dimensional."") # FIXME: Support masked arrays X, Y, Z = np.broadcast_arrays(X, Y, Z) rows, cols = Z.shape has_stride = 'rstride' in kwargs or 'cstride' in kwargs has_count = 'rcount' in kwargs or 'ccount' in kwargs if has_stride and has_count: raise ValueError(""Cannot specify both stride and count arguments"") rstride = kwargs.pop('rstride', 1) cstride = kwargs.pop('cstride', 1) rcount = kwargs.pop('rcount', 50) ccount = kwargs.pop('ccount', 50) if rcParams['_internal.classic_mode']: # Strides have priority over counts in classic mode. # So, only compute strides from counts # if counts were explicitly given if has_count: rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0 cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0 else: # If the strides are provided then it has priority. # Otherwise, compute the strides from the counts. if not has_stride: rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0 cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0 # We want two sets of lines, one running along the ""rows"" of # Z and another set of lines running along the ""columns"" of Z. # This transpose will make it easy to obtain the columns. tX, tY, tZ = np.transpose(X), np.transpose(Y), np.transpose(Z) if rstride: rii = list(range(0, rows, rstride)) # Add the last index only if needed if rows > 0 and rii[-1] != (rows - 1): rii += [rows-1] else: rii = [] if cstride: cii = list(range(0, cols, cstride)) # Add the last index only if needed if cols > 0 and cii[-1] != (cols - 1): cii += [cols-1] else: cii = [] if rstride == 0 and cstride == 0: raise ValueError(""Either rstride or cstride must be non zero"") # If the inputs were empty, then just # reset everything. if Z.size == 0: rii = [] cii = [] xlines = [X[i] for i in rii] ylines = [Y[i] for i in rii] zlines = [Z[i] for i in rii] txlines = [tX[i] for i in cii] tylines = [tY[i] for i in cii] tzlines = [tZ[i] for i in cii] lines = ([list(zip(xl, yl, zl)) for xl, yl, zl in zip(xlines, ylines, zlines)] + [list(zip(xl, yl, zl)) for xl, yl, zl in zip(txlines, tylines, tzlines)]) linec = art3d.Line3DCollection(lines, **kwargs) self.add_collection(linec) self.auto_scale_xyz(X, Y, Z, had_data) return linec ``` ###Assistant : Plot a 3D wireframe. .. note:: The *rcount* and *ccount* kwargs, which both default to 50, determine the maximum number of samples used in each direction. If the input data is larger, it will be downsampled (by slicing) to these numbers of points. Parameters ---------- X, Y, Z : 2D arrays Data values. rcount, ccount : int Maximum number of samples used in each direction. If the input data is larger, it will be downsampled (by slicing) to these numbers of points. Setting a count to zero causes the data to be not sampled in the corresponding direction, producing a 3D line plot rather than a wireframe plot. Defaults to 50. rstride, cstride : int Downsampling stride in each direction. These arguments are mutually exclusive with *rcount* and *ccount*. If only one of *rstride* or *cstride* is set, the other defaults to 1. Setting a stride to zero causes the data to be not sampled in the corresponding direction, producing a 3D line plot rather than a wireframe plot. 'classic' mode uses a default of ``rstride = cstride = 1`` instead of the new default of ``rcount = ccount = 50``. **kwargs Other arguments are forwarded to `.Line3DCollection`. " 2766,"def check_response(self, response, callback, name=None): if not (response is None or asyncio.iscoroutine(response)): return if not name: if isinstance(callback, types.FunctionType): # FBV name = ""The view %s.%s"" % (callback.__module__, callback.__name__) else: # CBV name = ""The view %s.%s.__call__"" % ( callback.__module__, callback.__class__.__name__, ) if response is None: raise ValueError( ""%s didn't return an HttpResponse object. It returned None "" ""instead."" % name ) elif asyncio.iscoroutine(response): raise ValueError( ""%s didn't return an HttpResponse object. It returned an "" ""unawaited coroutine instead. You may need to add an 'await' "" ""into your view."" % name ) # Other utility methods. "," Raise an error if the view returned None or an uncalled coroutine. ",12,97,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_response(self, response, callback, name=None): if not (response is None or asyncio.iscoroutine(response)): return if not name: if isinstance(callback, types.FunctionType): # FBV name = ""The view %s.%s"" % (callback.__module__, callback.__name__) else: # CBV name = ""The view %s.%s.__call__"" % ( callback.__module__, callback.__class__.__name__, ) if response is None: raise ValueError( ""%s didn't return an HttpResponse object. It returned None "" ""instead."" % name ) elif asyncio.iscoroutine(response): raise ValueError( ""%s didn't return an HttpResponse object. It returned an "" ""unawaited coroutine instead. You may need to add an 'await' "" ""into your view."" % name ) # Other utility methods. ``` ###Assistant : Raise an error if the view returned None or an uncalled coroutine. " 2767,"def _meta_from_array(x, columns=None, index=None, meta=None): if x.ndim > 2: raise ValueError( ""from_array does not input more than 2D array, got"" "" array with shape %r"" % (x.shape,) ) if index is not None: if not isinstance(index, Index): raise ValueError(""'index' must be an instance of dask.dataframe.Index"") index = index._meta if meta is None: meta = meta_lib_from_array(x).DataFrame() if getattr(x.dtype, ""names"", None) is not None: # record array has named columns if columns is None: columns = list(x.dtype.names) elif np.isscalar(columns): raise ValueError(""For a struct dtype, columns must be a list."") elif not all(i in x.dtype.names for i in columns): extra = sorted(set(columns).difference(x.dtype.names)) raise ValueError(f""dtype {x.dtype} doesn't have fields {extra}"") fields = x.dtype.fields dtypes = [fields[n][0] if n in fields else ""f8"" for n in columns] elif x.ndim == 1: if np.isscalar(columns) or columns is None: return meta._constructor_sliced( [], name=columns, dtype=x.dtype, index=index ) elif len(columns) == 1: return meta._constructor( np.array([], dtype=x.dtype), columns=columns, index=index ) raise ValueError( ""For a 1d array, columns must be a scalar or single element list"" ) else: if np.isnan(x.shape[1]): raise ValueError(""Shape along axis 1 must be known"") if columns is None: columns = list(range(x.shape[1])) if x.ndim == 2 else [0] elif len(columns) != x.shape[1]: raise ValueError( ""Number of column names must match width of the array. "" f""Got {len(columns)} names for {x.shape[1]} columns"" ) dtypes = [x.dtype] * len(columns) data = {c: np.array([], dtype=dt) for (c, dt) in zip(columns, dtypes)} return meta._constructor(data, columns=columns, index=index) ",Create empty DataFrame or Series which has correct dtype,9,234,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _meta_from_array(x, columns=None, index=None, meta=None): if x.ndim > 2: raise ValueError( ""from_array does not input more than 2D array, got"" "" array with shape %r"" % (x.shape,) ) if index is not None: if not isinstance(index, Index): raise ValueError(""'index' must be an instance of dask.dataframe.Index"") index = index._meta if meta is None: meta = meta_lib_from_array(x).DataFrame() if getattr(x.dtype, ""names"", None) is not None: # record array has named columns if columns is None: columns = list(x.dtype.names) elif np.isscalar(columns): raise ValueError(""For a struct dtype, columns must be a list."") elif not all(i in x.dtype.names for i in columns): extra = sorted(set(columns).difference(x.dtype.names)) raise ValueError(f""dtype {x.dtype} doesn't have fields {extra}"") fields = x.dtype.fields dtypes = [fields[n][0] if n in fields else ""f8"" for n in columns] elif x.ndim == 1: if np.isscalar(columns) or columns is None: return meta._constructor_sliced( [], name=columns, dtype=x.dtype, index=index ) elif len(columns) == 1: return meta._constructor( np.array([], dtype=x.dtype), columns=columns, index=index ) raise ValueError( ""For a 1d array, columns must be a scalar or single element list"" ) else: if np.isnan(x.shape[1]): raise ValueError(""Shape along axis 1 must be known"") if columns is None: columns = list(range(x.shape[1])) if x.ndim == 2 else [0] elif len(columns) != x.shape[1]: raise ValueError( ""Number of column names must match width of the array. "" f""Got {len(columns)} names for {x.shape[1]} columns"" ) dtypes = [x.dtype] * len(columns) data = {c: np.array([], dtype=dt) for (c, dt) in zip(columns, dtypes)} return meta._constructor(data, columns=columns, index=index) ``` ###Assistant : Create empty DataFrame or Series which has correct dtype " 2768,"def rows(self): for row in self.row_data: yield [ column[""block""].bind(value) for column, value in zip(self.columns, row[""values""]) ] "," Iterate over the rows of the table, with each row returned as a list of BoundBlocks ",16,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rows(self): for row in self.row_data: yield [ column[""block""].bind(value) for column, value in zip(self.columns, row[""values""]) ] ``` ###Assistant : Iterate over the rows of the table, with each row returned as a list of BoundBlocks " 2769,"def Uniform(name, left, right): r return rv(name, UniformDistribution, (left, right)) #------------------------------------------------------------------------------- # UniformSum distribution ------------------------------------------------------ "," Create a continuous random variable with a uniform distribution. Explanation =========== The density of the uniform distribution is given by .. math:: f(x) := \begin{cases} \frac{1}{b - a} & \text{for } x \in [a,b] \\ 0 & \text{otherwise} \end{cases} with :math:`x \in [a,b]`. Parameters ========== a : Real number, :math:`-\infty < a`, the left boundary b : Real number, :math:`a < b < \infty`, the right boundary Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Uniform, density, cdf, E, variance >>> from sympy import Symbol, simplify >>> a = Symbol(""a"", negative=True) >>> b = Symbol(""b"", positive=True) >>> z = Symbol(""z"") >>> X = Uniform(""x"", a, b) >>> density(X)(z) Piecewise((1/(-a + b), (b >= z) & (a <= z)), (0, True)) >>> cdf(X)(z) Piecewise((0, a > z), ((-a + z)/(-a + b), b >= z), (1, True)) >>> E(X) a/2 + b/2 >>> simplify(variance(X)) a**2/12 - a*b/6 + b**2/12 References ========== .. [1] https://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29 .. [2] http://mathworld.wolfram.com/UniformDistribution.html ",157,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def Uniform(name, left, right): r return rv(name, UniformDistribution, (left, right)) #------------------------------------------------------------------------------- # UniformSum distribution ------------------------------------------------------ ``` ###Assistant : Create a continuous random variable with a uniform distribution. Explanation =========== The density of the uniform distribution is given by .. math:: f(x) := \begin{cases} \frac{1}{b - a} & \text{for } x \in [a,b] \\ 0 & \text{otherwise} \end{cases} with :math:`x \in [a,b]`. Parameters ========== a : Real number, :math:`-\infty < a`, the left boundary b : Real number, :math:`a < b < \infty`, the right boundary Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Uniform, density, cdf, E, variance >>> from sympy import Symbol, simplify >>> a = Symbol(""a"", negative=True) >>> b = Symbol(""b"", positive=True) >>> z = Symbol(""z"") >>> X = Uniform(""x"", a, b) >>> density(X)(z) Piecewise((1/(-a + b), (b >= z) & (a <= z)), (0, True)) >>> cdf(X)(z) Piecewise((0, a > z), ((-a + z)/(-a + b), b >= z), (1, True)) >>> E(X) a/2 + b/2 >>> simplify(variance(X)) a**2/12 - a*b/6 + b**2/12 References ========== .. [1] https://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29 .. [2] http://mathworld.wolfram.com/UniformDistribution.html " 2770,"def get_ttext(value): m = _non_token_end_matcher(value) if not m: raise errors.HeaderParseError( ""expected ttext but found '{}'"".format(value)) ttext = m.group() value = value[len(ttext):] ttext = ValueTerminal(ttext, 'ttext') _validate_xtext(ttext) return ttext, value ","ttext = We allow any non-TOKEN_ENDS in ttext, but add defects to the token's defects list if we find non-ttext characters. We also register defects for *any* non-printables even though the RFC doesn't exclude all of them, because we follow the spirit of RFC 5322. ",47,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_ttext(value): m = _non_token_end_matcher(value) if not m: raise errors.HeaderParseError( ""expected ttext but found '{}'"".format(value)) ttext = m.group() value = value[len(ttext):] ttext = ValueTerminal(ttext, 'ttext') _validate_xtext(ttext) return ttext, value ``` ###Assistant : ttext = We allow any non-TOKEN_ENDS in ttext, but add defects to the token's defects list if we find non-ttext characters. We also register defects for *any* non-printables even though the RFC doesn't exclude all of them, because we follow the spirit of RFC 5322. " 2771,"def kwargs(self, exclude=(), apply=None): kwargs = {k: getattr(self, k) for k in self._fields if k not in exclude} if apply is not None: return {k: apply(v) for k, v in kwargs.items()} else: return kwargs "," Get instance's attributes as dict of keyword arguments. Parameters ========== exclude : collection of str Collection of keywords to exclude. apply : callable, optional Function to apply to all values. ",30,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def kwargs(self, exclude=(), apply=None): kwargs = {k: getattr(self, k) for k in self._fields if k not in exclude} if apply is not None: return {k: apply(v) for k, v in kwargs.items()} else: return kwargs ``` ###Assistant : Get instance's attributes as dict of keyword arguments. Parameters ========== exclude : collection of str Collection of keywords to exclude. apply : callable, optional Function to apply to all values. " 2772,"def _execute_impl(self, *args, **kwargs) -> RayServeHandle: return self._deployment_handle ","Does not call into anything or produce a new value, as the time this function gets called, all child nodes are already resolved to ObjectRefs. ",25,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _execute_impl(self, *args, **kwargs) -> RayServeHandle: return self._deployment_handle ``` ###Assistant : Does not call into anything or produce a new value, as the time this function gets called, all child nodes are already resolved to ObjectRefs. " 2773,"def test_settings_use_default_site(self): context = {} # This should use the default site template = '{{ settings(""tests.testsetting"", use_default_site=True).title}}' self.assertEqual( self.render(template, context, request_context=False), self.default_site_settings.title, ) "," Check that the {{ settings(use_default_site=True) }} option works with no site in the context ",14,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_settings_use_default_site(self): context = {} # This should use the default site template = '{{ settings(""tests.testsetting"", use_default_site=True).title}}' self.assertEqual( self.render(template, context, request_context=False), self.default_site_settings.title, ) ``` ###Assistant : Check that the {{ settings(use_default_site=True) }} option works with no site in the context " 2774,"def test_stream_square_brackets_and_language(): infos = d = FFmpegInfosParser(infos, ""clip.mp4"").parse() assert d assert len(d[""inputs""][0][""streams""]) == 2 assert d[""inputs""][0][""streams""][0][""language""] == ""eng"" assert d[""inputs""][0][""streams""][1][""language""] is None "," Input #0, mpeg, from 'clip.mp4': Duration: 00:02:15.00, start: 52874.498178, bitrate: 266 kb/s Stream #0:0[0x1e0](eng): Video: ..., 25 tbr, 90k tbn, 50 tbc Stream #0:1[0x1c0](und): Audio: mp2, 0 channels, s16p At least one output file must be specified",37,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_stream_square_brackets_and_language(): infos = d = FFmpegInfosParser(infos, ""clip.mp4"").parse() assert d assert len(d[""inputs""][0][""streams""]) == 2 assert d[""inputs""][0][""streams""][0][""language""] == ""eng"" assert d[""inputs""][0][""streams""][1][""language""] is None ``` ###Assistant : Input #0, mpeg, from 'clip.mp4': Duration: 00:02:15.00, start: 52874.498178, bitrate: 266 kb/s Stream #0:0[0x1e0](eng): Video: ..., 25 tbr, 90k tbn, 50 tbc Stream #0:1[0x1c0](und): Audio: mp2, 0 channels, s16p At least one output file must be specified " 2775,"async def wait(self) -> None: if self._is_set: return if not self._loop: self._loop = get_running_loop() self._event = asyncio.Event() await self._event.wait() "," Wait until the flag has been set. If the flag has already been set when this method is called, it returns immediately. ",22,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def wait(self) -> None: if self._is_set: return if not self._loop: self._loop = get_running_loop() self._event = asyncio.Event() await self._event.wait() ``` ###Assistant : Wait until the flag has been set. If the flag has already been set when this method is called, it returns immediately. " 2776,"async def get_and_submit_flow_runs(self) -> List[FlowRun]: if not self.started: raise RuntimeError(""Agent is not started. Use `async with OrionAgent()...`"") self.logger.debug(""Checking for flow runs..."") before = pendulum.now(""utc"").add( seconds=self.prefetch_seconds or PREFECT_AGENT_PREFETCH_SECONDS.value() ) # Use the work queue id or load one from the name work_queue_id = self.work_queue_id or await self.work_queue_id_from_name() if not work_queue_id: return [] try: submittable_runs = await self.client.get_runs_in_work_queue( id=work_queue_id, limit=10, scheduled_before=before ) except httpx.HTTPStatusError as exc: if exc.response.status_code == status.HTTP_404_NOT_FOUND: raise ValueError( f""No work queue found with id '{work_queue_id}'"" ) from None else: raise # Check for a paused work queue for display purposes if not submittable_runs: work_queue = await self.client.read_work_queue(work_queue_id) if work_queue.is_paused: self.logger.info( f""Work queue {work_queue.name!r} ({work_queue.id}) is paused."" ) for flow_run in submittable_runs: self.logger.info(f""Submitting flow run '{flow_run.id}'"") # don't resubmit a run if flow_run.id in self.submitting_flow_run_ids: continue self.submitting_flow_run_ids.add(flow_run.id) self.task_group.start_soon( self.submit_run, flow_run, ) return submittable_runs "," The principle method on agents. Queries for scheduled flow runs and submits them for execution in parallel. ",17,134,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def get_and_submit_flow_runs(self) -> List[FlowRun]: if not self.started: raise RuntimeError(""Agent is not started. Use `async with OrionAgent()...`"") self.logger.debug(""Checking for flow runs..."") before = pendulum.now(""utc"").add( seconds=self.prefetch_seconds or PREFECT_AGENT_PREFETCH_SECONDS.value() ) # Use the work queue id or load one from the name work_queue_id = self.work_queue_id or await self.work_queue_id_from_name() if not work_queue_id: return [] try: submittable_runs = await self.client.get_runs_in_work_queue( id=work_queue_id, limit=10, scheduled_before=before ) except httpx.HTTPStatusError as exc: if exc.response.status_code == status.HTTP_404_NOT_FOUND: raise ValueError( f""No work queue found with id '{work_queue_id}'"" ) from None else: raise # Check for a paused work queue for display purposes if not submittable_runs: work_queue = await self.client.read_work_queue(work_queue_id) if work_queue.is_paused: self.logger.info( f""Work queue {work_queue.name!r} ({work_queue.id}) is paused."" ) for flow_run in submittable_runs: self.logger.info(f""Submitting flow run '{flow_run.id}'"") # don't resubmit a run if flow_run.id in self.submitting_flow_run_ids: continue self.submitting_flow_run_ids.add(flow_run.id) self.task_group.start_soon( self.submit_run, flow_run, ) return submittable_runs ``` ###Assistant : The principle method on agents. Queries for scheduled flow runs and submits them for execution in parallel. " 2777,"def conv_output_length(input_length, filter_size, padding, stride, dilation=1): if input_length is None: return None assert padding in {""same"", ""valid"", ""full"", ""causal""} dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1) if padding in [""same"", ""causal""]: output_length = input_length elif padding == ""valid"": output_length = input_length - dilated_filter_size + 1 elif padding == ""full"": output_length = input_length + dilated_filter_size - 1 return (output_length + stride - 1) // stride ","Determines output length of a convolution given input length. Args: input_length: integer. filter_size: integer. padding: one of ""same"", ""valid"", ""full"", ""causal"" stride: integer. dilation: dilation rate, integer. Returns: The output length (integer). ",32,68,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def conv_output_length(input_length, filter_size, padding, stride, dilation=1): if input_length is None: return None assert padding in {""same"", ""valid"", ""full"", ""causal""} dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1) if padding in [""same"", ""causal""]: output_length = input_length elif padding == ""valid"": output_length = input_length - dilated_filter_size + 1 elif padding == ""full"": output_length = input_length + dilated_filter_size - 1 return (output_length + stride - 1) // stride ``` ###Assistant : Determines output length of a convolution given input length. Args: input_length: integer. filter_size: integer. padding: one of ""same"", ""valid"", ""full"", ""causal"" stride: integer. dilation: dilation rate, integer. Returns: The output length (integer). " 2778,"def get_split_nodes(self): rearport = path_node_to_object(self._nodes[-1]) return FrontPort.objects.filter(rear_port=rearport) "," Return all available next segments in a split cable path. ",10,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_split_nodes(self): rearport = path_node_to_object(self._nodes[-1]) return FrontPort.objects.filter(rear_port=rearport) ``` ###Assistant : Return all available next segments in a split cable path. " 2779,"def get_template_names(self): try: names = super().get_template_names() except ImproperlyConfigured: # If template_name isn't specified, it's not a problem -- # we just start with an empty list. names = [] # If the list is a queryset, we'll invent a template name based on the # app and model name. This name gets put at the end of the template # name list so that user-supplied names override the automatically- # generated ones. if hasattr(self.object_list, ""model""): opts = self.object_list.model._meta names.append( ""%s/%s%s.html"" % (opts.app_label, opts.model_name, self.template_name_suffix) ) elif not names: raise ImproperlyConfigured( ""%(cls)s requires either a 'template_name' attribute "" ""or a get_queryset() method that returns a QuerySet."" % { ""cls"": self.__class__.__name__, } ) return names "," Return a list of template names to be used for the request. Must return a list. May not be called if render_to_response is overridden. ",24,113,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_template_names(self): try: names = super().get_template_names() except ImproperlyConfigured: # If template_name isn't specified, it's not a problem -- # we just start with an empty list. names = [] # If the list is a queryset, we'll invent a template name based on the # app and model name. This name gets put at the end of the template # name list so that user-supplied names override the automatically- # generated ones. if hasattr(self.object_list, ""model""): opts = self.object_list.model._meta names.append( ""%s/%s%s.html"" % (opts.app_label, opts.model_name, self.template_name_suffix) ) elif not names: raise ImproperlyConfigured( ""%(cls)s requires either a 'template_name' attribute "" ""or a get_queryset() method that returns a QuerySet."" % { ""cls"": self.__class__.__name__, } ) return names ``` ###Assistant : Return a list of template names to be used for the request. Must return a list. May not be called if render_to_response is overridden. " 2780,"def remove_lines(fname, entries): to_remove = [] for entry in entries: msg, issue, lineno, pos, descr = entry # 'module imported but not used' if issue == 'F401' and handle_f401(fname, lineno): to_remove.append(lineno) # 'blank line(s) at end of file' elif issue == 'W391': lines = read_lines(fname) i = len(lines) - 1 while lines[i] == '\n': to_remove.append(i + 1) i -= 1 # 'too many blank lines' elif issue == 'E303': howmany = descr.replace('(', '').replace(')', '') howmany = int(howmany[-1]) for x in range(lineno - howmany, lineno): to_remove.append(x) if to_remove: newlines = [] for i, line in enumerate(read_lines(fname), 1): if i not in to_remove: newlines.append(line) print(""removing line(s) from %s"" % fname) write_file(fname, newlines) return len(to_remove) ","Check if we should remove lines, then do it. Return the number of lines removed. ",15,112,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def remove_lines(fname, entries): to_remove = [] for entry in entries: msg, issue, lineno, pos, descr = entry # 'module imported but not used' if issue == 'F401' and handle_f401(fname, lineno): to_remove.append(lineno) # 'blank line(s) at end of file' elif issue == 'W391': lines = read_lines(fname) i = len(lines) - 1 while lines[i] == '\n': to_remove.append(i + 1) i -= 1 # 'too many blank lines' elif issue == 'E303': howmany = descr.replace('(', '').replace(')', '') howmany = int(howmany[-1]) for x in range(lineno - howmany, lineno): to_remove.append(x) if to_remove: newlines = [] for i, line in enumerate(read_lines(fname), 1): if i not in to_remove: newlines.append(line) print(""removing line(s) from %s"" % fname) write_file(fname, newlines) return len(to_remove) ``` ###Assistant : Check if we should remove lines, then do it. Return the number of lines removed. " 2781,"def _get_one_trial_job(self): if not self.generated_hyper_configs: ret = { 'parameter_id': '-1_0_0', 'parameter_source': 'algorithm', 'parameters': '' } self.send(CommandType.NoMoreTrialJobs, nni.dump(ret)) return None assert self.generated_hyper_configs params = self.generated_hyper_configs.pop(0) ret = { 'parameter_id': params[0], 'parameter_source': 'algorithm', 'parameters': params[1] } self.parameters[params[0]] = params[1] return ret ","get one trial job, i.e., one hyperparameter configuration. If this function is called, Command will be sent by BOHB: a. If there is a parameter need to run, will return ""NewTrialJob"" with a dict: { 'parameter_id': id of new hyperparameter 'parameter_source': 'algorithm' 'parameters': value of new hyperparameter } b. If BOHB don't have parameter waiting, will return ""NoMoreTrialJobs"" with { 'parameter_id': '-1_0_0', 'parameter_source': 'algorithm', 'parameters': '' } ",67,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_one_trial_job(self): if not self.generated_hyper_configs: ret = { 'parameter_id': '-1_0_0', 'parameter_source': 'algorithm', 'parameters': '' } self.send(CommandType.NoMoreTrialJobs, nni.dump(ret)) return None assert self.generated_hyper_configs params = self.generated_hyper_configs.pop(0) ret = { 'parameter_id': params[0], 'parameter_source': 'algorithm', 'parameters': params[1] } self.parameters[params[0]] = params[1] return ret ``` ###Assistant : get one trial job, i.e., one hyperparameter configuration. If this function is called, Command will be sent by BOHB: a. If there is a parameter need to run, will return ""NewTrialJob"" with a dict: { 'parameter_id': id of new hyperparameter 'parameter_source': 'algorithm' 'parameters': value of new hyperparameter } b. If BOHB don't have parameter waiting, will return ""NoMoreTrialJobs"" with { 'parameter_id': '-1_0_0', 'parameter_source': 'algorithm', 'parameters': '' } " 2782,"def global_array_to_host_local_array(global_inputs, global_mesh, pspecs): def _convert(arr, pspec): local_aval = global_mesh._global_to_local( pxla._get_array_mapping(pspec), arr.aval) return array.ArrayImpl( local_aval, MeshPspecSharding(global_mesh.local_mesh, pspec), arr._arrays, committed=True) flattened_inps, out_tree = tree_flatten(global_inputs) out_pspecs = flatten_axis_resources( 'output pspecs', out_tree, pspecs, tupled_args=True) out = tree_map(_convert, tuple(flattened_inps), out_pspecs) return tree_unflatten(out_tree, out) ","Converts a global `jax.Array` to a host local `jax.Array`. You can use this function to transition to `jax.Array`. Using `jax.Array` with `pjit` has the same semantics of using GDA with pjit i.e. all `jax.Array` inputs to pjit should be globally shaped and the output from `pjit` will also be globally shaped `jax.Array`s You can use this function to convert the globally shaped `jax.Array` output from pjit to host local values again so that the transition to jax.Array can be a mechanical change. Example usage: ``` global_inputs = jax.experimental.pjit.host_local_array_to_global_array( host_local_inputs, global_mesh, in_pspecs) with mesh: global_out = pjitted_fun(global_inputs) host_local_output = jax.experimental.pjit.global_array_to_host_local_array( global_out, mesh, out_pspecs) ``` Args: global_inputs: A Pytree of global `jax.Array`s. global_mesh: The global mesh. pspecs: A Pytree of PartitionSpecs. ",119,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def global_array_to_host_local_array(global_inputs, global_mesh, pspecs): def _convert(arr, pspec): local_aval = global_mesh._global_to_local( pxla._get_array_mapping(pspec), arr.aval) return array.ArrayImpl( local_aval, MeshPspecSharding(global_mesh.local_mesh, pspec), arr._arrays, committed=True) flattened_inps, out_tree = tree_flatten(global_inputs) out_pspecs = flatten_axis_resources( 'output pspecs', out_tree, pspecs, tupled_args=True) out = tree_map(_convert, tuple(flattened_inps), out_pspecs) return tree_unflatten(out_tree, out) ``` ###Assistant : Converts a global `jax.Array` to a host local `jax.Array`. You can use this function to transition to `jax.Array`. Using `jax.Array` with `pjit` has the same semantics of using GDA with pjit i.e. all `jax.Array` inputs to pjit should be globally shaped and the output from `pjit` will also be globally shaped `jax.Array`s You can use this function to convert the globally shaped `jax.Array` output from pjit to host local values again so that the transition to jax.Array can be a mechanical change. Example usage: ``` global_inputs = jax.experimental.pjit.host_local_array_to_global_array( host_local_inputs, global_mesh, in_pspecs) with mesh: global_out = pjitted_fun(global_inputs) host_local_output = jax.experimental.pjit.global_array_to_host_local_array( global_out, mesh, out_pspecs) ``` Args: global_inputs: A Pytree of global `jax.Array`s. global_mesh: The global mesh. pspecs: A Pytree of PartitionSpecs. " 2783,"def __setitem__(self, key, value): super(Py27Dict, self).__setitem__(key, value) self.keylist.add(key) "," Override of __setitem__ to track keys and simulate Python2.7 dict Parameters ---------- key: hashable value: Any ",16,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __setitem__(self, key, value): super(Py27Dict, self).__setitem__(key, value) self.keylist.add(key) ``` ###Assistant : Override of __setitem__ to track keys and simulate Python2.7 dict Parameters ---------- key: hashable value: Any " 2784,"def virtualenv_no_global() -> bool: # PEP 405 compliance needs to be checked first since virtualenv >=20 would # return True for both checks, but is only able to use the PEP 405 config. if _running_under_venv(): return _no_global_under_venv() if _running_under_regular_virtualenv(): return _no_global_under_regular_virtualenv() return False ","Returns a boolean, whether running in venv with no system site-packages.",11,43,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def virtualenv_no_global() -> bool: # PEP 405 compliance needs to be checked first since virtualenv >=20 would # return True for both checks, but is only able to use the PEP 405 config. if _running_under_venv(): return _no_global_under_venv() if _running_under_regular_virtualenv(): return _no_global_under_regular_virtualenv() return False ``` ###Assistant : Returns a boolean, whether running in venv with no system site-packages. " 2785,"def autocomplete(self): texts = [] for field in self.search_fields: for current_field, value in self.prepare_field(self.obj, field): if isinstance(current_field, AutocompleteField): texts.append((value)) return "" "".join(texts) "," Returns all values to index as ""autocomplete"". This is the value of all AutocompleteFields ",14,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def autocomplete(self): texts = [] for field in self.search_fields: for current_field, value in self.prepare_field(self.obj, field): if isinstance(current_field, AutocompleteField): texts.append((value)) return "" "".join(texts) ``` ###Assistant : Returns all values to index as ""autocomplete"". This is the value of all AutocompleteFields " 2786,"def list_to_tuple(maybe_list): if isinstance(maybe_list, list): return tuple(maybe_list) return maybe_list ","Datasets will stack the list of tensor, so switch them to tuples.",12,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def list_to_tuple(maybe_list): if isinstance(maybe_list, list): return tuple(maybe_list) return maybe_list ``` ###Assistant : Datasets will stack the list of tensor, so switch them to tuples. " 2787,"def test_not_recorded_for_unused(self, dag_maker, xcom_value): with dag_maker(dag_id=""test_not_recorded_for_unused"") as dag: ",A value not used for task-mapping should not be recorded.,10,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_not_recorded_for_unused(self, dag_maker, xcom_value): with dag_maker(dag_id=""test_not_recorded_for_unused"") as dag: ``` ###Assistant : A value not used for task-mapping should not be recorded. " 2788,"def _ragged_tensor_mse(y_true, y_pred): return _ragged_tensor_apply_loss(mean_squared_error, y_true, y_pred) @keras_export( ""keras.metrics.mean_absolute_error"", ""keras.metrics.mae"", ""keras.metrics.MAE"", ""keras.losses.mean_absolute_error"", ""keras.losses.mae"", ""keras.losses.MAE"", ) @tf.__internal__.dispatch.add_dispatch_support","Implements support for handling RaggedTensors. Args: y_true: RaggedTensor truth values. shape = `[batch_size, d0, .. dN]`. y_pred: RaggedTensor predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean squared error values. shape = `[batch_size, d0, .. dN-1]`. When the number of dimensions of the batch feature vector [d0, .. dN] is greater than one the return value is a RaggedTensor. Otherwise a Dense tensor with dimensions [batch_size] is returned. ",69,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _ragged_tensor_mse(y_true, y_pred): return _ragged_tensor_apply_loss(mean_squared_error, y_true, y_pred) @keras_export( ""keras.metrics.mean_absolute_error"", ""keras.metrics.mae"", ""keras.metrics.MAE"", ""keras.losses.mean_absolute_error"", ""keras.losses.mae"", ""keras.losses.MAE"", ) @tf.__internal__.dispatch.add_dispatch_support ``` ###Assistant : Implements support for handling RaggedTensors. Args: y_true: RaggedTensor truth values. shape = `[batch_size, d0, .. dN]`. y_pred: RaggedTensor predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean squared error values. shape = `[batch_size, d0, .. dN-1]`. When the number of dimensions of the batch feature vector [d0, .. dN] is greater than one the return value is a RaggedTensor. Otherwise a Dense tensor with dimensions [batch_size] is returned. " 2789,"def test_does_not_allow_extra_kwargs() -> None: template = ""This is a {foo} test."" with pytest.raises(KeyError): formatter.format(template, foo=""good"", bar=""oops"") ",Test formatting does not allow extra key word arguments.,9,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_does_not_allow_extra_kwargs() -> None: template = ""This is a {foo} test."" with pytest.raises(KeyError): formatter.format(template, foo=""good"", bar=""oops"") ``` ###Assistant : Test formatting does not allow extra key word arguments. " 2790,"def score_samples(self, X): check_is_fitted(self) X = check_array(X, accept_sparse=""csr"") distances_X, neighbors_indices_X = self.kneighbors( X, n_neighbors=self.n_neighbors_ ) X_lrd = self._local_reachability_density(distances_X, neighbors_indices_X) lrd_ratios_array = self._lrd[neighbors_indices_X] / X_lrd[:, np.newaxis] # as bigger is better: return -np.mean(lrd_ratios_array, axis=1) ","Opposite of the Local Outlier Factor of X. It is the opposite as bigger is better, i.e. large values correspond to inliers. **Only available for novelty detection (when novelty is set to True).** The argument X is supposed to contain *new data*: if X contains a point from training, it considers the later in its own neighborhood. Also, the samples in X are not considered in the neighborhood of any point. Because of this, the scores obtained via ``score_samples`` may differ from the standard LOF scores. The standard LOF scores for the training data is available via the ``negative_outlier_factor_`` attribute. Parameters ---------- X : array-like of shape (n_samples, n_features) The query sample or samples to compute the Local Outlier Factor w.r.t. the training samples. Returns ------- opposite_lof_scores : ndarray of shape (n_samples,) The opposite of the Local Outlier Factor of each input samples. The lower, the more abnormal. ",148,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def score_samples(self, X): check_is_fitted(self) X = check_array(X, accept_sparse=""csr"") distances_X, neighbors_indices_X = self.kneighbors( X, n_neighbors=self.n_neighbors_ ) X_lrd = self._local_reachability_density(distances_X, neighbors_indices_X) lrd_ratios_array = self._lrd[neighbors_indices_X] / X_lrd[:, np.newaxis] # as bigger is better: return -np.mean(lrd_ratios_array, axis=1) ``` ###Assistant : Opposite of the Local Outlier Factor of X. It is the opposite as bigger is better, i.e. large values correspond to inliers. **Only available for novelty detection (when novelty is set to True).** The argument X is supposed to contain *new data*: if X contains a point from training, it considers the later in its own neighborhood. Also, the samples in X are not considered in the neighborhood of any point. Because of this, the scores obtained via ``score_samples`` may differ from the standard LOF scores. The standard LOF scores for the training data is available via the ``negative_outlier_factor_`` attribute. Parameters ---------- X : array-like of shape (n_samples, n_features) The query sample or samples to compute the Local Outlier Factor w.r.t. the training samples. Returns ------- opposite_lof_scores : ndarray of shape (n_samples,) The opposite of the Local Outlier Factor of each input samples. The lower, the more abnormal. " 2791,"def get_payroll_period_days(start_date, end_date, employee, company=None): if not company: company = frappe.db.get_value(""Employee"", employee, ""company"") payroll_period = frappe.db.sql( , {""company"": company, ""start_date"": start_date, ""end_date"": end_date}, ) if len(payroll_period) > 0: actual_no_of_days = date_diff(getdate(payroll_period[0][2]), getdate(payroll_period[0][1])) + 1 working_days = actual_no_of_days if not cint( frappe.db.get_value(""Payroll Settings"", None, ""include_holidays_in_total_working_days"") ): holidays = get_holiday_dates_for_employee( employee, getdate(payroll_period[0][1]), getdate(payroll_period[0][2]) ) working_days -= len(holidays) return payroll_period[0][0], working_days, actual_no_of_days return False, False, False "," select name, start_date, end_date from `tabPayroll Period` where company=%(company)s and %(start_date)s between start_date and end_date and %(end_date)s between start_date and end_date ",21,63,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_payroll_period_days(start_date, end_date, employee, company=None): if not company: company = frappe.db.get_value(""Employee"", employee, ""company"") payroll_period = frappe.db.sql( , {""company"": company, ""start_date"": start_date, ""end_date"": end_date}, ) if len(payroll_period) > 0: actual_no_of_days = date_diff(getdate(payroll_period[0][2]), getdate(payroll_period[0][1])) + 1 working_days = actual_no_of_days if not cint( frappe.db.get_value(""Payroll Settings"", None, ""include_holidays_in_total_working_days"") ): holidays = get_holiday_dates_for_employee( employee, getdate(payroll_period[0][1]), getdate(payroll_period[0][2]) ) working_days -= len(holidays) return payroll_period[0][0], working_days, actual_no_of_days return False, False, False ``` ###Assistant : select name, start_date, end_date from `tabPayroll Period` where company=%(company)s and %(start_date)s between start_date and end_date and %(end_date)s between start_date and end_date " 2792,"def density(B, nodes): n = len(B) m = nx.number_of_edges(B) nb = len(nodes) nt = n - nb if m == 0: # includes cases n==0 and n==1 d = 0.0 else: if B.is_directed(): d = m / (2 * nb * nt) else: d = m / (nb * nt) return d ","Returns density of bipartite graph B. Parameters ---------- B : NetworkX graph nodes: list or container Nodes in one node set of the bipartite graph. Returns ------- d : float The bipartite density Examples -------- >>> from networkx.algorithms import bipartite >>> G = nx.complete_bipartite_graph(3, 2) >>> X = set([0, 1, 2]) >>> bipartite.density(G, X) 1.0 >>> Y = set([3, 4]) >>> bipartite.density(G, Y) 1.0 Notes ----- The container of nodes passed as argument must contain all nodes in one of the two bipartite node sets to avoid ambiguity in the case of disconnected graphs. See :mod:`bipartite documentation ` for further details on how bipartite graphs are handled in NetworkX. See Also -------- color ",113,52,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def density(B, nodes): n = len(B) m = nx.number_of_edges(B) nb = len(nodes) nt = n - nb if m == 0: # includes cases n==0 and n==1 d = 0.0 else: if B.is_directed(): d = m / (2 * nb * nt) else: d = m / (nb * nt) return d ``` ###Assistant : Returns density of bipartite graph B. Parameters ---------- B : NetworkX graph nodes: list or container Nodes in one node set of the bipartite graph. Returns ------- d : float The bipartite density Examples -------- >>> from networkx.algorithms import bipartite >>> G = nx.complete_bipartite_graph(3, 2) >>> X = set([0, 1, 2]) >>> bipartite.density(G, X) 1.0 >>> Y = set([3, 4]) >>> bipartite.density(G, Y) 1.0 Notes ----- The container of nodes passed as argument must contain all nodes in one of the two bipartite node sets to avoid ambiguity in the case of disconnected graphs. See :mod:`bipartite documentation ` for further details on how bipartite graphs are handled in NetworkX. See Also -------- color " 2793,"def redirect_or_json(origin, msg, status=""""): if request.headers.get('Accept') == 'application/json': return {'status': status, 'message': msg} else: if status: flash(msg, status) else: flash(msg) return redirect(origin) ###################################################################################### # Error handlers ###################################################################################### "," Some endpoints are called by javascript, returning json will allow us to more elegantly handle side-effects in-page ",17,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def redirect_or_json(origin, msg, status=""""): if request.headers.get('Accept') == 'application/json': return {'status': status, 'message': msg} else: if status: flash(msg, status) else: flash(msg) return redirect(origin) ###################################################################################### # Error handlers ###################################################################################### ``` ###Assistant : Some endpoints are called by javascript, returning json will allow us to more elegantly handle side-effects in-page " 2794,"def _map_drop_idx_to_infrequent(self, feature_idx, drop_idx): if not self._infrequent_enabled: return drop_idx default_to_infrequent = self._default_to_infrequent_mappings[feature_idx] if default_to_infrequent is None: return drop_idx # Raise error when explicitly dropping a category that is infrequent infrequent_indices = self._infrequent_indices[feature_idx] if infrequent_indices is not None and drop_idx in infrequent_indices: categories = self.categories_[feature_idx] raise ValueError( f""Unable to drop category {categories[drop_idx]!r} from feature"" f"" {feature_idx} because it is infrequent"" ) return default_to_infrequent[drop_idx] ","Convert `drop_idx` into the index for infrequent categories. If there are no infrequent categories, then `drop_idx` is returned. This method is called in `_compute_drop_idx` when the `drop` parameter is an array-like. ",31,62,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _map_drop_idx_to_infrequent(self, feature_idx, drop_idx): if not self._infrequent_enabled: return drop_idx default_to_infrequent = self._default_to_infrequent_mappings[feature_idx] if default_to_infrequent is None: return drop_idx # Raise error when explicitly dropping a category that is infrequent infrequent_indices = self._infrequent_indices[feature_idx] if infrequent_indices is not None and drop_idx in infrequent_indices: categories = self.categories_[feature_idx] raise ValueError( f""Unable to drop category {categories[drop_idx]!r} from feature"" f"" {feature_idx} because it is infrequent"" ) return default_to_infrequent[drop_idx] ``` ###Assistant : Convert `drop_idx` into the index for infrequent categories. If there are no infrequent categories, then `drop_idx` is returned. This method is called in `_compute_drop_idx` when the `drop` parameter is an array-like. " 2795,"def test_dashboard_module_decorator(enable_test_module): head_cls_list = dashboard_utils.get_all_modules(dashboard_utils.DashboardHeadModule) agent_cls_list = dashboard_utils.get_all_modules( dashboard_utils.DashboardAgentModule ) assert any(cls.__name__ == ""TestHead"" for cls in head_cls_list) assert any(cls.__name__ == ""TestAgent"" for cls in agent_cls_list) test_code = run_string_as_driver(test_code) "," import os import ray.dashboard.utils as dashboard_utils os.environ.pop(""RAY_DASHBOARD_MODULE_TEST"") head_cls_list = dashboard_utils.get_all_modules( dashboard_utils.DashboardHeadModule) agent_cls_list = dashboard_utils.get_all_modules( dashboard_utils.DashboardAgentModule) print(head_cls_list) print(agent_cls_list) assert all(cls.__name__ != ""TestHead"" for cls in head_cls_list) assert all(cls.__name__ != ""TestAgent"" for cls in agent_cls_list) print(""success"") ",34,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_dashboard_module_decorator(enable_test_module): head_cls_list = dashboard_utils.get_all_modules(dashboard_utils.DashboardHeadModule) agent_cls_list = dashboard_utils.get_all_modules( dashboard_utils.DashboardAgentModule ) assert any(cls.__name__ == ""TestHead"" for cls in head_cls_list) assert any(cls.__name__ == ""TestAgent"" for cls in agent_cls_list) test_code = run_string_as_driver(test_code) ``` ###Assistant : import os import ray.dashboard.utils as dashboard_utils os.environ.pop(""RAY_DASHBOARD_MODULE_TEST"") head_cls_list = dashboard_utils.get_all_modules( dashboard_utils.DashboardHeadModule) agent_cls_list = dashboard_utils.get_all_modules( dashboard_utils.DashboardAgentModule) print(head_cls_list) print(agent_cls_list) assert all(cls.__name__ != ""TestHead"" for cls in head_cls_list) assert all(cls.__name__ != ""TestAgent"" for cls in agent_cls_list) print(""success"") " 2796,"def parsestr(self, text, headersonly=False): return self.parse(StringIO(text), headersonly=headersonly)","Create a message structure from a string. Returns the root of the message structure. Optional headersonly is a flag specifying whether to stop parsing after reading the headers or not. The default is False, meaning it parses the entire contents of the file. ",43,7,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parsestr(self, text, headersonly=False): return self.parse(StringIO(text), headersonly=headersonly) ``` ###Assistant : Create a message structure from a string. Returns the root of the message structure. Optional headersonly is a flag specifying whether to stop parsing after reading the headers or not. The default is False, meaning it parses the entire contents of the file. " 2797,"def get_prerequisite_model(queryset): if not queryset.exists(): for prereq in getattr(queryset.model, 'prerequisite_models', []): model = apps.get_model(prereq) if not model.objects.exists(): return model "," Return any prerequisite model that must be created prior to creating an instance of the current model. ",17,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_prerequisite_model(queryset): if not queryset.exists(): for prereq in getattr(queryset.model, 'prerequisite_models', []): model = apps.get_model(prereq) if not model.objects.exists(): return model ``` ###Assistant : Return any prerequisite model that must be created prior to creating an instance of the current model. " 2798,"def filter_empty_gradients(grads_and_vars): grads_and_vars = tuple(grads_and_vars) if not grads_and_vars: return grads_and_vars filtered = [] vars_with_empty_grads = [] for grad, var in grads_and_vars: if grad is None: vars_with_empty_grads.append(var) else: filtered.append((grad, var)) filtered = tuple(filtered) if not filtered: variable = ([v.name for _, v in grads_and_vars],) raise ValueError( f""No gradients provided for any variable: {variable}. "" f""Provided `grads_and_vars` is {grads_and_vars}."" ) if vars_with_empty_grads: logging.warning( ( ""Gradients do not exist for variables %s when minimizing the loss. "" ""If you're using `model.compile()`, did you forget to provide a `loss`"" ""argument?"" ), ([v.name for v in vars_with_empty_grads]), ) return filtered ","Filter out `(grad, var)` pairs that have a gradient equal to `None`.",12,95,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def filter_empty_gradients(grads_and_vars): grads_and_vars = tuple(grads_and_vars) if not grads_and_vars: return grads_and_vars filtered = [] vars_with_empty_grads = [] for grad, var in grads_and_vars: if grad is None: vars_with_empty_grads.append(var) else: filtered.append((grad, var)) filtered = tuple(filtered) if not filtered: variable = ([v.name for _, v in grads_and_vars],) raise ValueError( f""No gradients provided for any variable: {variable}. "" f""Provided `grads_and_vars` is {grads_and_vars}."" ) if vars_with_empty_grads: logging.warning( ( ""Gradients do not exist for variables %s when minimizing the loss. "" ""If you're using `model.compile()`, did you forget to provide a `loss`"" ""argument?"" ), ([v.name for v in vars_with_empty_grads]), ) return filtered ``` ###Assistant : Filter out `(grad, var)` pairs that have a gradient equal to `None`. " 2799,"def real_gaunt(l_1, l_2, l_3, m_1, m_2, m_3, prec=None): r l_1, l_2, l_3, m_1, m_2, m_3 = [ as_int(i) for i in (l_1, l_2, l_3, m_1, m_2, m_3)] # check for quick exits if sum(1 for i in (m_1, m_2, m_3) if i < 0) % 2: return S.Zero # odd number of negative m if (l_1 + l_2 + l_3) % 2: return S.Zero # sum of l is odd lmax = l_2 + l_3 lmin = max(abs(l_2 - l_3), min(abs(m_2 + m_3), abs(m_2 - m_3))) if (lmin + lmax) % 2: lmin += 1 if lmin not in range(lmax, lmin - 2, -2): return S.Zero kron_del = lambda i, j: 1 if i == j else 0 s = lambda e: -1 if e % 2 else 1 # (-1)**e to give +/-1, avoiding float when e<0 A = lambda a, b: (-kron_del(a, b)*s(a-b) + kron_del(a, -b)* s(b)) if b < 0 else 0 B = lambda a, b: (kron_del(a, b) + kron_del(a, -b)*s(a)) if b > 0 else 0 C = lambda a, b: kron_del(abs(a), abs(b))*(kron_del(a, 0)*kron_del(b, 0) + (B(a, b) + I*A(a, b))/sqrt(2)) ugnt = 0 for i in range(-l_1, l_1+1): U1 = C(i, m_1) for j in range(-l_2, l_2+1): U2 = C(j, m_2) U3 = C(-i-j, m_3) ugnt = ugnt + re(U1*U2*U3)*gaunt(l_1, l_2, l_3, i, j, -i-j) if prec is not None: ugnt = ugnt.n(prec) return ugnt "," Calculate the real Gaunt coefficient. Explanation =========== The real Gaunt coefficient is defined as the integral over three real spherical harmonics: .. math:: \begin{aligned} \operatorname{RealGaunt}(l_1,l_2,l_3,m_1,m_2,m_3) &=\int Z^{m_1}_{l_1}(\Omega) Z^{m_2}_{l_2}(\Omega) Z^{m_3}_{l_3}(\Omega) \,d\Omega \\ \end{aligned} Alternatively, it can be defined in terms of the standard Gaunt coefficient by relating the real spherical harmonics to the standard spherical harmonics via a unitary transformation `U`, i.e. `Z^{m}_{l}(\Omega)=\sum_{m'}U^{m}_{m'}Y^{m'}_{l}(\Omega)` [Homeier96]_. The real Gaunt coefficient is then defined as .. math:: \begin{aligned} \operatorname{RealGaunt}(l_1,l_2,l_3,m_1,m_2,m_3) &=\int Z^{m_1}_{l_1}(\Omega) Z^{m_2}_{l_2}(\Omega) Z^{m_3}_{l_3}(\Omega) \,d\Omega \\ &=\sum_{m'_1 m'_2 m'_3} U^{m_1}_{m'_1}U^{m_2}_{m'_2}U^{m_3}_{m'_3} \operatorname{Gaunt}(l_1,l_2,l_3,m'_1,m'_2,m'_3) \end{aligned} The unitary matrix `U` has components .. math:: \begin{aligned} U^m_{m'} = \delta_{|m||m'|}*(\delta_{m'0}\delta_{m0} + \frac{1}{\sqrt{2}}\big[\Theta(m) \big(\delta_{m'm}+(-1)^{m'}\delta_{m'-m}\big)+i\Theta(-m)\big((-1)^{-m} \delta_{m'-m}-\delta_{m'm}*(-1)^{m'-m}\big)\big]) \end{aligned} where `\delta_{ij}` is the Kronecker delta symbol and `\Theta` is a step function defined as .. math:: \begin{aligned} \Theta(x) = \begin{cases} 1 \,\text{for}\, x > 0 \\ 0 \,\text{for}\, x \leq 0 \end{cases} \end{aligned} Parameters ========== l_1, l_2, l_3, m_1, m_2, m_3 : Integer. prec - precision, default: ``None``. Providing a precision can drastically speed up the calculation. Returns ======= Rational number times the square root of a rational number. Examples ======== >>> from sympy.physics.wigner import real_gaunt >>> real_gaunt(2,2,4,-1,-1,0) -2/(7*sqrt(pi)) >>> real_gaunt(10,10,20,-9,-9,0).n(64) -0.00002480019791932209313156167... It is an error to use non-integer values for `l` and `m`:: real_gaunt(2.8,0.5,1.3,0,0,0) Traceback (most recent call last): ... ValueError: l values must be integer real_gaunt(2,2,4,0.7,1,-3.4) Traceback (most recent call last): ... ValueError: m values must be integer Notes ===== The real Gaunt coefficient inherits from the standard Gaunt coefficient, the invariance under any permutation of the pairs `(l_i, m_i)` and the requirement that the sum of the `l_i` be even to yield a non-zero value. It also obeys the following symmetry rules: - zero for `l_1`, `l_2`, `l_3` not fulfiling the condition `l_1 \in \{l_{\text{max}}, l_{\text{max}}-2, \ldots, l_{\text{min}}\}`, where `l_{\text{max}} = l_2+l_3`, .. math:: \begin{aligned} l_{\text{min}} = \begin{cases} \kappa(l_2, l_3, m_2, m_3) & \text{if}\, \kappa(l_2, l_3, m_2, m_3) + l_{\text{max}}\, \text{is even} \\ \kappa(l_2, l_3, m_2, m_3)+1 & \text{if}\, \kappa(l_2, l_3, m_2, m_3) + l_{\text{max}}\, \text{is odd}\end{cases} \end{aligned} and `\kappa(l_2, l_3, m_2, m_3) = \max{\big(|l_2-l_3|, \min{\big(|m_2+m_3|, |m_2-m_3|\big)}\big)}` - zero for an odd number of negative `m_i` Algorithms ========== This function uses the algorithms of [Homeier96]_ and [Rasch03]_ to calculate the value of the real Gaunt coefficient exactly. Note that the formula used in [Rasch03]_ contains alternating sums over large factorials and is therefore unsuitable for finite precision arithmetic and only useful for a computer algebra system [Rasch03]_. However, this function can in principle use any algorithm that computes the Gaunt coefficient, so it is suitable for finite precision arithmetic in so far as the algorithm which computes the Gaunt coefficient is. ",429,231,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def real_gaunt(l_1, l_2, l_3, m_1, m_2, m_3, prec=None): r l_1, l_2, l_3, m_1, m_2, m_3 = [ as_int(i) for i in (l_1, l_2, l_3, m_1, m_2, m_3)] # check for quick exits if sum(1 for i in (m_1, m_2, m_3) if i < 0) % 2: return S.Zero # odd number of negative m if (l_1 + l_2 + l_3) % 2: return S.Zero # sum of l is odd lmax = l_2 + l_3 lmin = max(abs(l_2 - l_3), min(abs(m_2 + m_3), abs(m_2 - m_3))) if (lmin + lmax) % 2: lmin += 1 if lmin not in range(lmax, lmin - 2, -2): return S.Zero kron_del = lambda i, j: 1 if i == j else 0 s = lambda e: -1 if e % 2 else 1 # (-1)**e to give +/-1, avoiding float when e<0 A = lambda a, b: (-kron_del(a, b)*s(a-b) + kron_del(a, -b)* s(b)) if b < 0 else 0 B = lambda a, b: (kron_del(a, b) + kron_del(a, -b)*s(a)) if b > 0 else 0 C = lambda a, b: kron_del(abs(a), abs(b))*(kron_del(a, 0)*kron_del(b, 0) + (B(a, b) + I*A(a, b))/sqrt(2)) ugnt = 0 for i in range(-l_1, l_1+1): U1 = C(i, m_1) for j in range(-l_2, l_2+1): U2 = C(j, m_2) U3 = C(-i-j, m_3) ugnt = ugnt + re(U1*U2*U3)*gaunt(l_1, l_2, l_3, i, j, -i-j) if prec is not None: ugnt = ugnt.n(prec) return ugnt ``` ###Assistant : Calculate the real Gaunt coefficient. Explanation =========== The real Gaunt coefficient is defined as the integral over three real spherical harmonics: .. math:: \begin{aligned} \operatorname{RealGaunt}(l_1,l_2,l_3,m_1,m_2,m_3) &=\int Z^{m_1}_{l_1}(\Omega) Z^{m_2}_{l_2}(\Omega) Z^{m_3}_{l_3}(\Omega) \,d\Omega \\ \end{aligned} Alternatively, it can be defined in terms of the standard Gaunt coefficient by relating the real spherical harmonics to the standard spherical harmonics via a unitary transformation `U`, i.e. `Z^{m}_{l}(\Omega)=\sum_{m'}U^{m}_{m'}Y^{m'}_{l}(\Omega)` [Homeier96]_. The real Gaunt coefficient is then defined as .. math:: \begin{aligned} \operatorname{RealGaunt}(l_1,l_2,l_3,m_1,m_2,m_3) &=\int Z^{m_1}_{l_1}(\Omega) Z^{m_2}_{l_2}(\Omega) Z^{m_3}_{l_3}(\Omega) \,d\Omega \\ &=\sum_{m'_1 m'_2 m'_3} U^{m_1}_{m'_1}U^{m_2}_{m'_2}U^{m_3}_{m'_3} \operatorname{Gaunt}(l_1,l_2,l_3,m'_1,m'_2,m'_3) \end{aligned} The unitary matrix `U` has components .. math:: \begin{aligned} U^m_{m'} = \delta_{|m||m'|}*(\delta_{m'0}\delta_{m0} + \frac{1}{\sqrt{2}}\big[\Theta(m) \big(\delta_{m'm}+(-1)^{m'}\delta_{m'-m}\big)+i\Theta(-m)\big((-1)^{-m} \delta_{m'-m}-\delta_{m'm}*(-1)^{m'-m}\big)\big]) \end{aligned} where `\delta_{ij}` is the Kronecker delta symbol and `\Theta` is a step function defined as .. math:: \begin{aligned} \Theta(x) = \begin{cases} 1 \,\text{for}\, x > 0 \\ 0 \,\text{for}\, x \leq 0 \end{cases} \end{aligned} Parameters ========== l_1, l_2, l_3, m_1, m_2, m_3 : Integer. prec - precision, default: ``None``. Providing a precision can drastically speed up the calculation. Returns ======= Rational number times the square root of a rational number. Examples ======== >>> from sympy.physics.wigner import real_gaunt >>> real_gaunt(2,2,4,-1,-1,0) -2/(7*sqrt(pi)) >>> real_gaunt(10,10,20,-9,-9,0).n(64) -0.00002480019791932209313156167... It is an error to use non-integer values for `l` and `m`:: real_gaunt(2.8,0.5,1.3,0,0,0) Traceback (most recent call last): ... ValueError: l values must be integer real_gaunt(2,2,4,0.7,1,-3.4) Traceback (most recent call last): ... ValueError: m values must be integer Notes ===== The real Gaunt coefficient inherits from the standard Gaunt coefficient, the invariance under any permutation of the pairs `(l_i, m_i)` and the requirement that the sum of the `l_i` be even to yield a non-zero value. It also obeys the following symmetry rules: - zero for `l_1`, `l_2`, `l_3` not fulfiling the condition `l_1 \in \{l_{\text{max}}, l_{\text{max}}-2, \ldots, l_{\text{min}}\}`, where `l_{\text{max}} = l_2+l_3`, .. math:: \begin{aligned} l_{\text{min}} = \begin{cases} \kappa(l_2, l_3, m_2, m_3) & \text{if}\, \kappa(l_2, l_3, m_2, m_3) + l_{\text{max}}\, \text{is even} \\ \kappa(l_2, l_3, m_2, m_3)+1 & \text{if}\, \kappa(l_2, l_3, m_2, m_3) + l_{\text{max}}\, \text{is odd}\end{cases} \end{aligned} and `\kappa(l_2, l_3, m_2, m_3) = \max{\big(|l_2-l_3|, \min{\big(|m_2+m_3|, |m_2-m_3|\big)}\big)}` - zero for an odd number of negative `m_i` Algorithms ========== This function uses the algorithms of [Homeier96]_ and [Rasch03]_ to calculate the value of the real Gaunt coefficient exactly. Note that the formula used in [Rasch03]_ contains alternating sums over large factorials and is therefore unsuitable for finite precision arithmetic and only useful for a computer algebra system [Rasch03]_. However, this function can in principle use any algorithm that computes the Gaunt coefficient, so it is suitable for finite precision arithmetic in so far as the algorithm which computes the Gaunt coefficient is. " 2800,"async def test_missing_tones_list(hass): siren = MockSirenEntity(SirenEntityFeature.TONES, [""a"", ""b""]) siren.hass = hass with pytest.raises(ValueError): process_turn_on_params(siren, {""tone"": ""test""}) ",Test ValueError when setting a tone that is missing from available_tones list.,12,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_missing_tones_list(hass): siren = MockSirenEntity(SirenEntityFeature.TONES, [""a"", ""b""]) siren.hass = hass with pytest.raises(ValueError): process_turn_on_params(siren, {""tone"": ""test""}) ``` ###Assistant : Test ValueError when setting a tone that is missing from available_tones list. " 2801,"def task_failed_deps(args): dag = get_dag(args.subdir, args.dag_id) task = dag.get_task(task_id=args.task_id) ti = _get_ti(task, args.execution_date_or_run_id, args.map_index) dep_context = DepContext(deps=SCHEDULER_QUEUED_DEPS) failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context)) # TODO, Do we want to print or log this if failed_deps: print(""Task instance dependencies not met:"") for dep in failed_deps: print(f""{dep.dep_name}: {dep.reason}"") else: print(""Task instance dependencies are all met."") @cli_utils.action_cli(check_db=False) @suppress_logs_and_warning"," Returns the unmet dependencies for a task instance from the perspective of the scheduler (i.e. why a task instance doesn't get scheduled and then queued by the scheduler, and then run by an executor). >>> airflow tasks failed-deps tutorial sleep 2015-01-01 Task instance dependencies not met: Dagrun Running: Task instance's dagrun did not exist: Unknown reason Trigger Rule: Task's trigger rule 'all_success' requires all upstream tasks to have succeeded, but found 1 non-success(es). ",73,52,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def task_failed_deps(args): dag = get_dag(args.subdir, args.dag_id) task = dag.get_task(task_id=args.task_id) ti = _get_ti(task, args.execution_date_or_run_id, args.map_index) dep_context = DepContext(deps=SCHEDULER_QUEUED_DEPS) failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context)) # TODO, Do we want to print or log this if failed_deps: print(""Task instance dependencies not met:"") for dep in failed_deps: print(f""{dep.dep_name}: {dep.reason}"") else: print(""Task instance dependencies are all met."") @cli_utils.action_cli(check_db=False) @suppress_logs_and_warning ``` ###Assistant : Returns the unmet dependencies for a task instance from the perspective of the scheduler (i.e. why a task instance doesn't get scheduled and then queued by the scheduler, and then run by an executor). >>> airflow tasks failed-deps tutorial sleep 2015-01-01 Task instance dependencies not met: Dagrun Running: Task instance's dagrun did not exist: Unknown reason Trigger Rule: Task's trigger rule 'all_success' requires all upstream tasks to have succeeded, but found 1 non-success(es). " 2802,"def load(self, rawdata): if isinstance(rawdata, str): self.__parse_string(rawdata) else: # self.update() wouldn't call our custom __setitem__ for key, value in rawdata.items(): self[key] = value return ","Load cookies from a string (presumably HTTP_COOKIE) or from a dictionary. Loading cookies from a dictionary 'd' is equivalent to calling: map(Cookie.__setitem__, d.keys(), d.values()) ",24,24,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load(self, rawdata): if isinstance(rawdata, str): self.__parse_string(rawdata) else: # self.update() wouldn't call our custom __setitem__ for key, value in rawdata.items(): self[key] = value return ``` ###Assistant : Load cookies from a string (presumably HTTP_COOKIE) or from a dictionary. Loading cookies from a dictionary 'd' is equivalent to calling: map(Cookie.__setitem__, d.keys(), d.values()) " 2803,"def _reset_major_tick_kw(self, keep_tick_and_label_visibility=False): backup = {name: value for name, value in self._major_tick_kw.items() if name in ['tick1On', 'tick2On', 'label1On', 'label2On']} self._major_tick_kw.clear() if keep_tick_and_label_visibility: self._major_tick_kw.update(backup) self._major_tick_kw['gridOn'] = ( mpl.rcParams['axes.grid'] and mpl.rcParams['axes.grid.which'] in ('both', 'major')) "," Reset major tick params to defaults. Shared subplots pre-configure tick and label visibility. To keep this beyond an Axis.clear() operation, we may *keep_tick_and_label_visibility*. ",23,32,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _reset_major_tick_kw(self, keep_tick_and_label_visibility=False): backup = {name: value for name, value in self._major_tick_kw.items() if name in ['tick1On', 'tick2On', 'label1On', 'label2On']} self._major_tick_kw.clear() if keep_tick_and_label_visibility: self._major_tick_kw.update(backup) self._major_tick_kw['gridOn'] = ( mpl.rcParams['axes.grid'] and mpl.rcParams['axes.grid.which'] in ('both', 'major')) ``` ###Assistant : Reset major tick params to defaults. Shared subplots pre-configure tick and label visibility. To keep this beyond an Axis.clear() operation, we may *keep_tick_and_label_visibility*. " 2804,"def test_class_variable_to_instance(self): logger = self.logger self.assertEqual(logger._to_exclude, logger._exclude_results) self.assertEqual(logger._to_system, logger._system_results) self.assertEqual(logger._to_other, logger._other_results) self.assertEqual(logger._to_episodes, logger._episode_results) ","Test that class variables get properly assigned to instance variables. ",10,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_class_variable_to_instance(self): logger = self.logger self.assertEqual(logger._to_exclude, logger._exclude_results) self.assertEqual(logger._to_system, logger._system_results) self.assertEqual(logger._to_other, logger._other_results) self.assertEqual(logger._to_episodes, logger._episode_results) ``` ###Assistant : Test that class variables get properly assigned to instance variables. " 2805,"def detect_all(byte_str, ignore_threshold=False): if not isinstance(byte_str, bytearray): if not isinstance(byte_str, bytes): raise TypeError( f""Expected object of type bytes or bytearray, got: {type(byte_str)}"" ) byte_str = bytearray(byte_str) detector = UniversalDetector() detector.feed(byte_str) detector.close() if detector.input_state == InputState.HIGH_BYTE: results = [] probers = [] for prober in detector.charset_probers: if hasattr(prober, ""probers""): probers.extend(p for p in prober.probers) else: probers.append(prober) for prober in probers: if ignore_threshold or prober.get_confidence() > detector.MINIMUM_THRESHOLD: charset_name = prober.charset_name or """" lower_charset_name = charset_name.lower() # Use Windows encoding name instead of ISO-8859 if we saw any # extra Windows-specific bytes if lower_charset_name.startswith(""iso-8859"") and detector.has_win_bytes: charset_name = detector.ISO_WIN_MAP.get( lower_charset_name, charset_name ) results.append( { ""encoding"": charset_name, ""confidence"": prober.get_confidence(), ""language"": prober.language, } ) if len(results) > 0: return sorted(results, key=lambda result: -result[""confidence""]) return [detector.result] "," Detect all the possible encodings of the given byte string. :param byte_str: The byte sequence to examine. :type byte_str: ``bytes`` or ``bytearray`` :param ignore_threshold: Include encodings that are below ``UniversalDetector.MINIMUM_THRESHOLD`` in results. :type ignore_threshold: ``bool`` ",35,120,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def detect_all(byte_str, ignore_threshold=False): if not isinstance(byte_str, bytearray): if not isinstance(byte_str, bytes): raise TypeError( f""Expected object of type bytes or bytearray, got: {type(byte_str)}"" ) byte_str = bytearray(byte_str) detector = UniversalDetector() detector.feed(byte_str) detector.close() if detector.input_state == InputState.HIGH_BYTE: results = [] probers = [] for prober in detector.charset_probers: if hasattr(prober, ""probers""): probers.extend(p for p in prober.probers) else: probers.append(prober) for prober in probers: if ignore_threshold or prober.get_confidence() > detector.MINIMUM_THRESHOLD: charset_name = prober.charset_name or """" lower_charset_name = charset_name.lower() # Use Windows encoding name instead of ISO-8859 if we saw any # extra Windows-specific bytes if lower_charset_name.startswith(""iso-8859"") and detector.has_win_bytes: charset_name = detector.ISO_WIN_MAP.get( lower_charset_name, charset_name ) results.append( { ""encoding"": charset_name, ""confidence"": prober.get_confidence(), ""language"": prober.language, } ) if len(results) > 0: return sorted(results, key=lambda result: -result[""confidence""]) return [detector.result] ``` ###Assistant : Detect all the possible encodings of the given byte string. :param byte_str: The byte sequence to examine. :type byte_str: ``bytes`` or ``bytearray`` :param ignore_threshold: Include encodings that are below ``UniversalDetector.MINIMUM_THRESHOLD`` in results. :type ignore_threshold: ``bool`` " 2806,"def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1): if names is None: # simple value lookup return cls.__new__(cls, value) # otherwise, functional API: we're creating a new Enum type return cls._create_( value, names, module=module, qualname=qualname, type=type, start=start, ) "," Either returns an existing member, or creates a new enum class. This method is used both when an enum class is given a value to match to an enumeration member (i.e. Color(3)) and for the functional API (i.e. Color = Enum('Color', names='RED GREEN BLUE')). When used for the functional API: `value` will be the name of the new class. `names` should be either a string of white-space/comma delimited names (values will start at `start`), or an iterator/mapping of name, value pairs. `module` should be set to the module this class is being created in; if it is not set, an attempt to find that module will be made, but if it fails the class will not be picklable. `qualname` should be set to the actual location this class can be found at in its module; by default it is set to the global scope. If this is not correct, unpickling will fail in some circumstances. `type`, if set, will be mixed in as the first base class. ",167,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1): if names is None: # simple value lookup return cls.__new__(cls, value) # otherwise, functional API: we're creating a new Enum type return cls._create_( value, names, module=module, qualname=qualname, type=type, start=start, ) ``` ###Assistant : Either returns an existing member, or creates a new enum class. This method is used both when an enum class is given a value to match to an enumeration member (i.e. Color(3)) and for the functional API (i.e. Color = Enum('Color', names='RED GREEN BLUE')). When used for the functional API: `value` will be the name of the new class. `names` should be either a string of white-space/comma delimited names (values will start at `start`), or an iterator/mapping of name, value pairs. `module` should be set to the module this class is being created in; if it is not set, an attempt to find that module will be made, but if it fails the class will not be picklable. `qualname` should be set to the actual location this class can be found at in its module; by default it is set to the global scope. If this is not correct, unpickling will fail in some circumstances. `type`, if set, will be mixed in as the first base class. " 2807,"def edges2dot(edges, shapes=None, attr=None): if not shapes: shapes = dict() if not attr: attr = dict() dot_string = ""digraph G {\n"" for pair in attr.items(): dot_string += f""{pair[0]} = {pair[1]};\n"" for edge in edges: for shape in shapes.items(): for node in range(2): if shape[0] in repr(edge[node]): dot_string += f'""{edge[node]}"" [shape = {shape[1]}];\n' dot_string += f'""{edge[0]}"" -> ""{edge[1]}"";\n' dot_string += ""}\n"" return dot_string "," :param edges: the set (or list) of edges of a directed graph. :return dot_string: a representation of 'edges' as a string in the DOT graph language, which can be converted to an image by the 'dot' program from the Graphviz package, or nltk.parse.dependencygraph.dot2img(dot_string). :param shapes: dictionary of strings that trigger a specified shape. :param attr: dictionary with global graph attributes >>> import nltk >>> from nltk.util import edges2dot >>> print(edges2dot([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'B')])) digraph G { ""A"" -> ""B""; ""A"" -> ""C""; ""B"" -> ""C""; ""C"" -> ""B""; } ",94,62,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def edges2dot(edges, shapes=None, attr=None): if not shapes: shapes = dict() if not attr: attr = dict() dot_string = ""digraph G {\n"" for pair in attr.items(): dot_string += f""{pair[0]} = {pair[1]};\n"" for edge in edges: for shape in shapes.items(): for node in range(2): if shape[0] in repr(edge[node]): dot_string += f'""{edge[node]}"" [shape = {shape[1]}];\n' dot_string += f'""{edge[0]}"" -> ""{edge[1]}"";\n' dot_string += ""}\n"" return dot_string ``` ###Assistant : :param edges: the set (or list) of edges of a directed graph. :return dot_string: a representation of 'edges' as a string in the DOT graph language, which can be converted to an image by the 'dot' program from the Graphviz package, or nltk.parse.dependencygraph.dot2img(dot_string). :param shapes: dictionary of strings that trigger a specified shape. :param attr: dictionary with global graph attributes >>> import nltk >>> from nltk.util import edges2dot >>> print(edges2dot([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'B')])) digraph G { ""A"" -> ""B""; ""A"" -> ""C""; ""B"" -> ""C""; ""C"" -> ""B""; } " 2808,"def _draw_line_numbers(self): if not self.line_numbers: return for p in range(self.maxlineno): n = p + self.line_number_start if (n % self.line_number_step) == 0: self._draw_linenumber(p, n) "," Create drawables for the line numbers. ",6,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _draw_line_numbers(self): if not self.line_numbers: return for p in range(self.maxlineno): n = p + self.line_number_start if (n % self.line_number_step) == 0: self._draw_linenumber(p, n) ``` ###Assistant : Create drawables for the line numbers. " 2809,"def export(ctx, directory): directory_path = Path(directory) if directory_path.absolute == Path.cwd().absolute: console.print( , style=""red bold"", end="""", ) proceed = input().lower() == ""y"" else: proceed = True if proceed: if not directory_path.is_dir(): console.print(f""Creating folder: {directory}."", style=""red bold"") directory_path.mkdir(parents=True) ctx.invoke(write) from_path = Path.cwd() / ""manim.cfg"" to_path = directory_path / ""manim.cfg"" console.print(f""Exported final Config at {from_path} to {to_path}."") else: console.print(""Aborted..."", style=""red bold"") ","You are reading the config from the same directory you are exporting to. This means that the exported config will overwrite the config for this directory. Are you sure you want to continue? (y/n)",34,58,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def export(ctx, directory): directory_path = Path(directory) if directory_path.absolute == Path.cwd().absolute: console.print( , style=""red bold"", end="""", ) proceed = input().lower() == ""y"" else: proceed = True if proceed: if not directory_path.is_dir(): console.print(f""Creating folder: {directory}."", style=""red bold"") directory_path.mkdir(parents=True) ctx.invoke(write) from_path = Path.cwd() / ""manim.cfg"" to_path = directory_path / ""manim.cfg"" console.print(f""Exported final Config at {from_path} to {to_path}."") else: console.print(""Aborted..."", style=""red bold"") ``` ###Assistant : You are reading the config from the same directory you are exporting to. This means that the exported config will overwrite the config for this directory. Are you sure you want to continue? (y/n) " 2810,"def update_keymap(self, name, key): if name not in self._tools: raise KeyError(f'{name!r} not in Tools') self._remove_keys(name) if isinstance(key, str): key = [key] for k in key: if k in self._keys: _api.warn_external( f'Key {k} changed from {self._keys[k]} to {name}') self._keys[k] = name "," Set the keymap to associate with the specified tool. Parameters ---------- name : str Name of the Tool. key : str or list of str Keys to associate with the tool. ",31,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update_keymap(self, name, key): if name not in self._tools: raise KeyError(f'{name!r} not in Tools') self._remove_keys(name) if isinstance(key, str): key = [key] for k in key: if k in self._keys: _api.warn_external( f'Key {k} changed from {self._keys[k]} to {name}') self._keys[k] = name ``` ###Assistant : Set the keymap to associate with the specified tool. Parameters ---------- name : str Name of the Tool. key : str or list of str Keys to associate with the tool. " 2811,"def named_parameters(self, *args, **kwargs): arch = kwargs.pop('arch', False) for name, p in super().named_parameters(*args, **kwargs): if any(name == par_name for par_name in self._arch_parameter_names): if arch: yield name, p else: if not arch: yield name, p ",Named parameters excluding architecture parameters.,5,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def named_parameters(self, *args, **kwargs): arch = kwargs.pop('arch', False) for name, p in super().named_parameters(*args, **kwargs): if any(name == par_name for par_name in self._arch_parameter_names): if arch: yield name, p else: if not arch: yield name, p ``` ###Assistant : Named parameters excluding architecture parameters. " 2812,"def unzip3(xyzs): # Note: we deliberately don't use zip(*xyzs) because it is lazily evaluated, # is too permissive about inputs, and does not guarantee a length-3 output. xs = [] ys = [] zs = [] for x, y, z in xyzs: xs.append(x) ys.append(y) zs.append(z) return tuple(xs), tuple(ys), tuple(zs) ",Unzip sequence of length-3 tuples into three tuples.,8,49,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def unzip3(xyzs): # Note: we deliberately don't use zip(*xyzs) because it is lazily evaluated, # is too permissive about inputs, and does not guarantee a length-3 output. xs = [] ys = [] zs = [] for x, y, z in xyzs: xs.append(x) ys.append(y) zs.append(z) return tuple(xs), tuple(ys), tuple(zs) ``` ###Assistant : Unzip sequence of length-3 tuples into three tuples. " 2813,"def test_json_get_subscribers_for_guest_user(self) -> None: guest_user = self.example_user(""polonius"") never_subscribed = gather_subscriptions_helper(guest_user, True).never_subscribed # A guest user can only see never subscribed streams that are web-public. # For Polonius, the only web-public stream that he is not subscribed at # this point is Rome. self.assert_length(never_subscribed, 1) web_public_stream_id = never_subscribed[0][""stream_id""] result = self.client_get(f""/json/streams/{web_public_stream_id}/members"") self.assert_json_success(result) result_dict = result.json() self.assertIn(""subscribers"", result_dict) self.assertIsInstance(result_dict[""subscribers""], list) self.assertGreater(len(result_dict[""subscribers""]), 0) "," Guest users should have access to subscribers of web-public streams, even if they aren't subscribed or have never subscribed to that stream. ",22,60,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_json_get_subscribers_for_guest_user(self) -> None: guest_user = self.example_user(""polonius"") never_subscribed = gather_subscriptions_helper(guest_user, True).never_subscribed # A guest user can only see never subscribed streams that are web-public. # For Polonius, the only web-public stream that he is not subscribed at # this point is Rome. self.assert_length(never_subscribed, 1) web_public_stream_id = never_subscribed[0][""stream_id""] result = self.client_get(f""/json/streams/{web_public_stream_id}/members"") self.assert_json_success(result) result_dict = result.json() self.assertIn(""subscribers"", result_dict) self.assertIsInstance(result_dict[""subscribers""], list) self.assertGreater(len(result_dict[""subscribers""]), 0) ``` ###Assistant : Guest users should have access to subscribers of web-public streams, even if they aren't subscribed or have never subscribed to that stream. " 2814,"def _reconnect(self): if not self._isconnected(): try: self._connect() except http_client.socket.error as e: # Attempt to reconnect if the connection was refused if e.errno == 61 or e.errno == 10061: # errno 61 is the ""Connection Refused"" error time.sleep(self._delay) self._delay += self._delay # fibonacii delays self._tries += 1 if self._tries < self.maxtries: self._reconnect() else: self._reset_retries() raise e else: # Unknown scenario raise e # Reconnect worked - reset _closed self._closed = False ","Connect if disconnected. Retry self.maxtries times with delays ",8,70,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _reconnect(self): if not self._isconnected(): try: self._connect() except http_client.socket.error as e: # Attempt to reconnect if the connection was refused if e.errno == 61 or e.errno == 10061: # errno 61 is the ""Connection Refused"" error time.sleep(self._delay) self._delay += self._delay # fibonacii delays self._tries += 1 if self._tries < self.maxtries: self._reconnect() else: self._reset_retries() raise e else: # Unknown scenario raise e # Reconnect worked - reset _closed self._closed = False ``` ###Assistant : Connect if disconnected. Retry self.maxtries times with delays " 2815,"def list_master(saltenv=None, prefix=""""): if not saltenv: saltenv = __opts__[""saltenv""] or ""base"" return _client().file_list(saltenv, prefix) "," .. versionchanged:: 3005 ``saltenv`` will use value from config if not explicitly set List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master ",30,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def list_master(saltenv=None, prefix=""""): if not saltenv: saltenv = __opts__[""saltenv""] or ""base"" return _client().file_list(saltenv, prefix) ``` ###Assistant : .. versionchanged:: 3005 ``saltenv`` will use value from config if not explicitly set List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master " 2816,"def test_bert_hf_tokenizer_parity(tmpdir, pretrained_model_name_or_path): from ludwig.utils.tokenizers import get_hf_tokenizer, HFTokenizer inputs = ""Hello, ``I'm'' ónë of 1,205,000 sentences!"" hf_tokenizer = HFTokenizer(pretrained_model_name_or_path) torchtext_tokenizer = get_hf_tokenizer(pretrained_model_name_or_path) # Ensure that the tokenizer is scriptable tokenizer_path = os.path.join(tmpdir, ""tokenizer.pt"") torch.jit.script(torchtext_tokenizer).save(tokenizer_path) torchtext_tokenizer = torch.jit.load(tokenizer_path) token_ids_expected = hf_tokenizer(inputs) token_ids = torchtext_tokenizer(inputs) assert token_ids_expected == token_ids ","Tests the BERTTokenizer implementation. Asserts both tokens and token IDs are the same by initializing the BERTTokenizer as a standalone tokenizer and as a HF tokenizer. ",26,47,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_bert_hf_tokenizer_parity(tmpdir, pretrained_model_name_or_path): from ludwig.utils.tokenizers import get_hf_tokenizer, HFTokenizer inputs = ""Hello, ``I'm'' ónë of 1,205,000 sentences!"" hf_tokenizer = HFTokenizer(pretrained_model_name_or_path) torchtext_tokenizer = get_hf_tokenizer(pretrained_model_name_or_path) # Ensure that the tokenizer is scriptable tokenizer_path = os.path.join(tmpdir, ""tokenizer.pt"") torch.jit.script(torchtext_tokenizer).save(tokenizer_path) torchtext_tokenizer = torch.jit.load(tokenizer_path) token_ids_expected = hf_tokenizer(inputs) token_ids = torchtext_tokenizer(inputs) assert token_ids_expected == token_ids ``` ###Assistant : Tests the BERTTokenizer implementation. Asserts both tokens and token IDs are the same by initializing the BERTTokenizer as a standalone tokenizer and as a HF tokenizer. " 2817,"def get_pe_matching_query(amount_condition, account_from_to, transaction): # get matching payment entries query if transaction.deposit > 0: currency_field = ""paid_to_account_currency as currency"" else: currency_field = ""paid_from_account_currency as currency"" return f "," SELECT (CASE WHEN reference_no=%(reference_no)s THEN 1 ELSE 0 END + CASE WHEN (party_type = %(party_type)s AND party = %(party)s ) THEN 1 ELSE 0 END + 1 ) AS rank, 'Payment Entry' as doctype, name, paid_amount, reference_no, reference_date, party, party_type, posting_date, {currency_field} FROM `tabPayment Entry` WHERE paid_amount {amount_condition} %(amount)s AND docstatus = 1 AND payment_type IN (%(payment_type)s, 'Internal Transfer') AND ifnull(clearance_date, '') = """" AND {account_from_to} = %(bank_account)s ",68,27,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_pe_matching_query(amount_condition, account_from_to, transaction): # get matching payment entries query if transaction.deposit > 0: currency_field = ""paid_to_account_currency as currency"" else: currency_field = ""paid_from_account_currency as currency"" return f ``` ###Assistant : SELECT (CASE WHEN reference_no=%(reference_no)s THEN 1 ELSE 0 END + CASE WHEN (party_type = %(party_type)s AND party = %(party)s ) THEN 1 ELSE 0 END + 1 ) AS rank, 'Payment Entry' as doctype, name, paid_amount, reference_no, reference_date, party, party_type, posting_date, {currency_field} FROM `tabPayment Entry` WHERE paid_amount {amount_condition} %(amount)s AND docstatus = 1 AND payment_type IN (%(payment_type)s, 'Internal Transfer') AND ifnull(clearance_date, '') = """" AND {account_from_to} = %(bank_account)s " 2818,"def _check_list_display_links(self, obj): from django.contrib.admin.options import ModelAdmin if obj.list_display_links is None: return [] elif not isinstance(obj.list_display_links, (list, tuple)): return must_be( ""a list, a tuple, or None"", option=""list_display_links"", obj=obj, id=""admin.E110"", ) # Check only if ModelAdmin.get_list_display() isn't overridden. elif obj.get_list_display.__func__ is ModelAdmin.get_list_display: return list( chain.from_iterable( self._check_list_display_links_item( obj, field_name, ""list_display_links[%d]"" % index ) for index, field_name in enumerate(obj.list_display_links) ) ) return [] ",Check that list_display_links is a unique subset of list_display.,9,60,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_list_display_links(self, obj): from django.contrib.admin.options import ModelAdmin if obj.list_display_links is None: return [] elif not isinstance(obj.list_display_links, (list, tuple)): return must_be( ""a list, a tuple, or None"", option=""list_display_links"", obj=obj, id=""admin.E110"", ) # Check only if ModelAdmin.get_list_display() isn't overridden. elif obj.get_list_display.__func__ is ModelAdmin.get_list_display: return list( chain.from_iterable( self._check_list_display_links_item( obj, field_name, ""list_display_links[%d]"" % index ) for index, field_name in enumerate(obj.list_display_links) ) ) return [] ``` ###Assistant : Check that list_display_links is a unique subset of list_display. " 2819,"def min_mireds(self) -> int: min_mireds = self.cluster.get(""color_temp_physical_min"", self.MIN_MIREDS) if min_mireds == 0: self.warning( ""[Min mireds is 0, setting to %s] Please open an issue on the quirks repo to have this device corrected"", self.MIN_MIREDS, ) min_mireds = self.MIN_MIREDS return min_mireds ",Return the coldest color_temp that this channel supports.,8,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def min_mireds(self) -> int: min_mireds = self.cluster.get(""color_temp_physical_min"", self.MIN_MIREDS) if min_mireds == 0: self.warning( ""[Min mireds is 0, setting to %s] Please open an issue on the quirks repo to have this device corrected"", self.MIN_MIREDS, ) min_mireds = self.MIN_MIREDS return min_mireds ``` ###Assistant : Return the coldest color_temp that this channel supports. " 2820,"def pop_screen(self) -> Screen: screen_stack = self._screen_stack if len(screen_stack) <= 1: raise ScreenStackError( ""Can't pop screen; there must be at least one screen on the stack"" ) screen = screen_stack.pop() screen.post_message_no_wait(events.ScreenSuspend(self)) self.screen._screen_resized(self.size) self.screen.post_message_no_wait(events.ScreenResume(self)) return screen ","Pop the current screen from the stack, and switch to the previous screen. Returns: Screen: The screen that was replaced. ",20,35,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def pop_screen(self) -> Screen: screen_stack = self._screen_stack if len(screen_stack) <= 1: raise ScreenStackError( ""Can't pop screen; there must be at least one screen on the stack"" ) screen = screen_stack.pop() screen.post_message_no_wait(events.ScreenSuspend(self)) self.screen._screen_resized(self.size) self.screen.post_message_no_wait(events.ScreenResume(self)) return screen ``` ###Assistant : Pop the current screen from the stack, and switch to the previous screen. Returns: Screen: The screen that was replaced. " 2821,"def test_notify_interested_services_ephemeral(self): interested_service = self._mkservice(is_interested=True) services = [interested_service] self.mock_store.get_app_services.return_value = services self.mock_store.get_type_stream_id_for_appservice.return_value = make_awaitable( 579 ) event = Mock(event_id=""event_1"") self.event_source.sources.receipt.get_new_events_as.return_value = ( make_awaitable(([event], None)) ) self.handler.notify_interested_services_ephemeral( ""receipt_key"", 580, [""@fakerecipient:example.com""] ) self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( interested_service, ephemeral=[event] ) self.mock_store.set_appservice_stream_type_pos.assert_called_once_with( interested_service, ""read_receipt"", 580, ) "," Test sending ephemeral events to the appservice handler are scheduled to be pushed out to interested appservices, and that the stream ID is updated accordingly. ",25,39,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_notify_interested_services_ephemeral(self): interested_service = self._mkservice(is_interested=True) services = [interested_service] self.mock_store.get_app_services.return_value = services self.mock_store.get_type_stream_id_for_appservice.return_value = make_awaitable( 579 ) event = Mock(event_id=""event_1"") self.event_source.sources.receipt.get_new_events_as.return_value = ( make_awaitable(([event], None)) ) self.handler.notify_interested_services_ephemeral( ""receipt_key"", 580, [""@fakerecipient:example.com""] ) self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( interested_service, ephemeral=[event] ) self.mock_store.set_appservice_stream_type_pos.assert_called_once_with( interested_service, ""read_receipt"", 580, ) ``` ###Assistant : Test sending ephemeral events to the appservice handler are scheduled to be pushed out to interested appservices, and that the stream ID is updated accordingly. " 2822,"def convert(self, value): if not isinstance(value, ConvertingDict) and isinstance(value, dict): value = ConvertingDict(value) value.configurator = self elif not isinstance(value, ConvertingList) and isinstance(value, list): value = ConvertingList(value) value.configurator = self elif not isinstance(value, ConvertingTuple) and\ isinstance(value, tuple): value = ConvertingTuple(value) value.configurator = self elif isinstance(value, string_types): m = self.CONVERT_PATTERN.match(value) if m: d = m.groupdict() prefix = d['prefix'] converter = self.value_converters.get(prefix, None) if converter: suffix = d['suffix'] converter = getattr(self, converter) value = converter(suffix) return value "," Convert values to an appropriate type. dicts, lists and tuples are replaced by their converting alternatives. Strings are checked to see if they have a conversion format and are converted if they do. ",33,74,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def convert(self, value): if not isinstance(value, ConvertingDict) and isinstance(value, dict): value = ConvertingDict(value) value.configurator = self elif not isinstance(value, ConvertingList) and isinstance(value, list): value = ConvertingList(value) value.configurator = self elif not isinstance(value, ConvertingTuple) and\ isinstance(value, tuple): value = ConvertingTuple(value) value.configurator = self elif isinstance(value, string_types): m = self.CONVERT_PATTERN.match(value) if m: d = m.groupdict() prefix = d['prefix'] converter = self.value_converters.get(prefix, None) if converter: suffix = d['suffix'] converter = getattr(self, converter) value = converter(suffix) return value ``` ###Assistant : Convert values to an appropriate type. dicts, lists and tuples are replaced by their converting alternatives. Strings are checked to see if they have a conversion format and are converted if they do. " 2823,"def test_parse_transition(duration, parsed_duration): css = f stylesheet = Stylesheet() stylesheet.parse(css) rule = stylesheet.rules[0].styles assert len(stylesheet.rules) == 1 assert len(stylesheet.rules[0].errors) == 0 assert rule.transitions == { ""offset"": Transition(duration=parsed_duration, easing=""in_out_cubic"", delay=0.0) } ","#some-widget {{ transition: offset {duration} in_out_cubic; }} ",7,30,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_parse_transition(duration, parsed_duration): css = f stylesheet = Stylesheet() stylesheet.parse(css) rule = stylesheet.rules[0].styles assert len(stylesheet.rules) == 1 assert len(stylesheet.rules[0].errors) == 0 assert rule.transitions == { ""offset"": Transition(duration=parsed_duration, easing=""in_out_cubic"", delay=0.0) } ``` ###Assistant : #some-widget {{ transition: offset {duration} in_out_cubic; }} " 2824,"def test_meta_charset(self) -> None: encodings = _get_html_media_encodings( b, ""text/html"", ) self.assertEqual(list(encodings), [""ascii"", ""utf-8"", ""cp1252""]) # A less well-formed version. encodings = _get_html_media_encodings( b, ""text/html"", ) self.assertEqual(list(encodings), [""ascii"", ""utf-8"", ""cp1252""]) ","A character encoding is found via the meta tag. < meta charset = ascii> ",22,29,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_meta_charset(self) -> None: encodings = _get_html_media_encodings( b, ""text/html"", ) self.assertEqual(list(encodings), [""ascii"", ""utf-8"", ""cp1252""]) # A less well-formed version. encodings = _get_html_media_encodings( b, ""text/html"", ) self.assertEqual(list(encodings), [""ascii"", ""utf-8"", ""cp1252""]) ``` ###Assistant : A character encoding is found via the meta tag. < meta charset = ascii> " 2825,"def _build_amps_list(self, amp_value, processlist): ret = [] try: # Search in both cmdline and name (for kernel thread, see #1261) for p in processlist: if (re.search(amp_value.regex(), p['name']) is not None) or ( p['cmdline'] is not None and p['cmdline'] != [] and re.search(amp_value.regex(), ' '.join(p['cmdline'])) is not None ): ret.append( {'pid': p['pid'], 'cpu_percent': p['cpu_percent'], 'memory_percent': p['memory_percent']} ) except (TypeError, KeyError) as e: logger.debug(""Can not build AMPS list ({})"".format(e)) return ret ","Return the AMPS process list according to the amp_value Search application monitored processes by a regular expression ",17,69,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _build_amps_list(self, amp_value, processlist): ret = [] try: # Search in both cmdline and name (for kernel thread, see #1261) for p in processlist: if (re.search(amp_value.regex(), p['name']) is not None) or ( p['cmdline'] is not None and p['cmdline'] != [] and re.search(amp_value.regex(), ' '.join(p['cmdline'])) is not None ): ret.append( {'pid': p['pid'], 'cpu_percent': p['cpu_percent'], 'memory_percent': p['memory_percent']} ) except (TypeError, KeyError) as e: logger.debug(""Can not build AMPS list ({})"".format(e)) return ret ``` ###Assistant : Return the AMPS process list according to the amp_value Search application monitored processes by a regular expression " 2826,"def call(self, inputs, *args, **kwargs): # pylint: disable=unused-argument return inputs ","This is where the layer's logic lives. The `call()` method may not create state (except in its first invocation, wrapping the creation of variables or other resources in `tf.init_scope()`). It is recommended to create state in `__init__()`, or the `build()` method that is called automatically before `call()` executes the first time. Args: inputs: Input tensor, or dict/list/tuple of input tensors. The first positional `inputs` argument is subject to special rules: - `inputs` must be explicitly passed. A layer cannot have zero arguments, and `inputs` cannot be provided via the default value of a keyword argument. - NumPy array or Python scalar values in `inputs` get cast as tensors. - Keras mask metadata is only collected from `inputs`. - Layers are built (`build(input_shape)` method) using shape info from `inputs` only. - `input_spec` compatibility is only checked against `inputs`. - Mixed precision input casting is only applied to `inputs`. If a layer has tensor arguments in `*args` or `**kwargs`, their casting behavior in mixed precision should be handled manually. - The SavedModel input specification is generated using `inputs` only. - Integration with various ecosystem packages like TFMOT, TFLite, TF.js, etc is only supported for `inputs` and not for tensors in positional and keyword arguments. *args: Additional positional arguments. May contain tensors, although this is not recommended, for the reasons above. **kwargs: Additional keyword arguments. May contain tensors, although this is not recommended, for the reasons above. The following optional keyword arguments are reserved: - `training`: Boolean scalar tensor of Python boolean indicating whether the `call` is meant for training or inference. - `mask`: Boolean input mask. If the layer's `call()` method takes a `mask` argument, its default value will be set to the mask generated for `inputs` by the previous layer (if `input` did come from a layer that generated a corresponding mask, i.e. if it came from a Keras layer with masking support). Returns: A tensor or list/tuple of tensors. ",319,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call(self, inputs, *args, **kwargs): # pylint: disable=unused-argument return inputs ``` ###Assistant : This is where the layer's logic lives. The `call()` method may not create state (except in its first invocation, wrapping the creation of variables or other resources in `tf.init_scope()`). It is recommended to create state in `__init__()`, or the `build()` method that is called automatically before `call()` executes the first time. Args: inputs: Input tensor, or dict/list/tuple of input tensors. The first positional `inputs` argument is subject to special rules: - `inputs` must be explicitly passed. A layer cannot have zero arguments, and `inputs` cannot be provided via the default value of a keyword argument. - NumPy array or Python scalar values in `inputs` get cast as tensors. - Keras mask metadata is only collected from `inputs`. - Layers are built (`build(input_shape)` method) using shape info from `inputs` only. - `input_spec` compatibility is only checked against `inputs`. - Mixed precision input casting is only applied to `inputs`. If a layer has tensor arguments in `*args` or `**kwargs`, their casting behavior in mixed precision should be handled manually. - The SavedModel input specification is generated using `inputs` only. - Integration with various ecosystem packages like TFMOT, TFLite, TF.js, etc is only supported for `inputs` and not for tensors in positional and keyword arguments. *args: Additional positional arguments. May contain tensors, although this is not recommended, for the reasons above. **kwargs: Additional keyword arguments. May contain tensors, although this is not recommended, for the reasons above. The following optional keyword arguments are reserved: - `training`: Boolean scalar tensor of Python boolean indicating whether the `call` is meant for training or inference. - `mask`: Boolean input mask. If the layer's `call()` method takes a `mask` argument, its default value will be set to the mask generated for `inputs` by the previous layer (if `input` did come from a layer that generated a corresponding mask, i.e. if it came from a Keras layer with masking support). Returns: A tensor or list/tuple of tensors. " 2827,"def test_use_predictor_with_view(self, mock_handler): # set integration data df = pd.DataFrame([ {'a': 1, 'b': 'one'}, {'a': 2, 'b': 'two'}, {'a': 1, 'b': 'three'}, ]) self.set_handler(mock_handler, name='pg', tables={'tasks': df}) view_name = 'vtasks' # --- create view --- ret = self.command_executor.execute_command(parse_sql( f'create view {view_name} (select * from pg (select * from tasks))', dialect='mindsdb') ) assert ret.error_code is None # --- use predictor --- predicted_value = 3.14 predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical }, 'predicted_value': predicted_value } self.set_predictor(predictor) ret = self.command_executor.execute_command(parse_sql(f, dialect='mindsdb')) assert ret.error_code is None # native query was called assert mock_handler().native_query.mock_calls[0].args[0] == 'select * from tasks' # check predictor call # model was called assert self.mock_model_interface.predict.mock_calls[0].args[0] == 'task_model' # input = one row whit a==2 when_data = self.mock_model_interface.predict.mock_calls[0].args[1] assert len(when_data) == 1 assert when_data[0]['a'] == 2 # check prediction assert ret.data[0][0] == predicted_value assert len(ret.data) == 1 "," select task_model.p from views.{view_name} join mindsdb.task_model where {view_name}.a = 2 ",10,144,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_use_predictor_with_view(self, mock_handler): # set integration data df = pd.DataFrame([ {'a': 1, 'b': 'one'}, {'a': 2, 'b': 'two'}, {'a': 1, 'b': 'three'}, ]) self.set_handler(mock_handler, name='pg', tables={'tasks': df}) view_name = 'vtasks' # --- create view --- ret = self.command_executor.execute_command(parse_sql( f'create view {view_name} (select * from pg (select * from tasks))', dialect='mindsdb') ) assert ret.error_code is None # --- use predictor --- predicted_value = 3.14 predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical }, 'predicted_value': predicted_value } self.set_predictor(predictor) ret = self.command_executor.execute_command(parse_sql(f, dialect='mindsdb')) assert ret.error_code is None # native query was called assert mock_handler().native_query.mock_calls[0].args[0] == 'select * from tasks' # check predictor call # model was called assert self.mock_model_interface.predict.mock_calls[0].args[0] == 'task_model' # input = one row whit a==2 when_data = self.mock_model_interface.predict.mock_calls[0].args[1] assert len(when_data) == 1 assert when_data[0]['a'] == 2 # check prediction assert ret.data[0][0] == predicted_value assert len(ret.data) == 1 ``` ###Assistant : select task_model.p from views.{view_name} join mindsdb.task_model where {view_name}.a = 2 " 2828,"def test_vr_connector_shift_by_one(self): view_rq_dict = { ""state"": ViewRequirement(""obs""), ""next_state"": ViewRequirement( ""obs"", shift=1, used_for_compute_actions=False ), ""prev_state"": ViewRequirement(""obs"", shift=-1), } obs_arrs = np.arange(10)[:, None] + 1 config = PPOConfig().to_dict() ctx = ConnectorContext( view_requirements=view_rq_dict, config=config, is_policy_recurrent=True ) c = ViewRequirementAgentConnector(ctx) # keep a running list of observations obs_list = [] for t, obs in enumerate(obs_arrs): # t=0 is the next state of t=-1 data = AgentConnectorDataType( 0, 1, {SampleBatch.NEXT_OBS: obs, SampleBatch.T: t - 1} ) processed = c([data]) # env.reset() for t == -1 else env.step() for_action = processed[0].data.for_action # add cur obs to the list obs_list.append(obs) if t == 0: check(for_action[""prev_state""], for_action[""state""]) else: # prev state should be equal to the prev time step obs check(for_action[""prev_state""], obs_list[-2][None]) ","Test that the ViewRequirementConnector can handle shift by one correctly and can ignore future referencing view_requirements to respect causality",19,114,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_vr_connector_shift_by_one(self): view_rq_dict = { ""state"": ViewRequirement(""obs""), ""next_state"": ViewRequirement( ""obs"", shift=1, used_for_compute_actions=False ), ""prev_state"": ViewRequirement(""obs"", shift=-1), } obs_arrs = np.arange(10)[:, None] + 1 config = PPOConfig().to_dict() ctx = ConnectorContext( view_requirements=view_rq_dict, config=config, is_policy_recurrent=True ) c = ViewRequirementAgentConnector(ctx) # keep a running list of observations obs_list = [] for t, obs in enumerate(obs_arrs): # t=0 is the next state of t=-1 data = AgentConnectorDataType( 0, 1, {SampleBatch.NEXT_OBS: obs, SampleBatch.T: t - 1} ) processed = c([data]) # env.reset() for t == -1 else env.step() for_action = processed[0].data.for_action # add cur obs to the list obs_list.append(obs) if t == 0: check(for_action[""prev_state""], for_action[""state""]) else: # prev state should be equal to the prev time step obs check(for_action[""prev_state""], obs_list[-2][None]) ``` ###Assistant : Test that the ViewRequirementConnector can handle shift by one correctly and can ignore future referencing view_requirements to respect causality " 2829,"def adapt_error(cls, e): return e._replace(message=e.message.format(*e.vars))[:4] error = namedtuple(""error"", ""lineno col message type vars"") Error = partial(partial, error, message="""", type=SentryCheck, vars=()) S001 = Error( message=""S001: Avoid using the {} mock call as it is "" ""confusing and prone to causing invalid test "" ""behavior."" ) S001.methods = { ""not_called"", ""called_once"", ""called_once_with"", } S002 = Error(message=""S002: print functions or statements are not allowed."") S003 = Error(message=""S003: Use ``from sentry.utils import json`` instead."") S003.modules = {""json"", ""simplejson""} S003.names = { ""load"", ""loads"", ""dump"", ""dumps"", ""JSONEncoder"", ""JSONDecodeError"", ""_default_encoder"", } ",Adapts the extended error namedtuple to be compatible with Flake8.,10,85,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def adapt_error(cls, e): return e._replace(message=e.message.format(*e.vars))[:4] error = namedtuple(""error"", ""lineno col message type vars"") Error = partial(partial, error, message="""", type=SentryCheck, vars=()) S001 = Error( message=""S001: Avoid using the {} mock call as it is "" ""confusing and prone to causing invalid test "" ""behavior."" ) S001.methods = { ""not_called"", ""called_once"", ""called_once_with"", } S002 = Error(message=""S002: print functions or statements are not allowed."") S003 = Error(message=""S003: Use ``from sentry.utils import json`` instead."") S003.modules = {""json"", ""simplejson""} S003.names = { ""load"", ""loads"", ""dump"", ""dumps"", ""JSONEncoder"", ""JSONDecodeError"", ""_default_encoder"", } ``` ###Assistant : Adapts the extended error namedtuple to be compatible with Flake8. " 2830,"def on_train_begin(self, logs=None): logs = self._process_logs(logs) for callback in self.callbacks: callback.on_train_begin(logs) ","Calls the `on_train_begin` methods of its callbacks. Args: logs: Dict. Currently, no data is passed via this argument for this method, but that may change in the future. ",28,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def on_train_begin(self, logs=None): logs = self._process_logs(logs) for callback in self.callbacks: callback.on_train_begin(logs) ``` ###Assistant : Calls the `on_train_begin` methods of its callbacks. Args: logs: Dict. Currently, no data is passed via this argument for this method, but that may change in the future. " 2831,"def test_decorate_validated_function(): decorated_function = deprecated()(_func) with pytest.warns(FutureWarning, match=""Function _func is deprecated""): decorated_function(1, 2, c=3) # outer decorator does not interfer with validation with pytest.warns(FutureWarning, match=""Function _func is deprecated""): with pytest.raises(ValueError, match=r""The 'c' parameter of _func must be""): decorated_function(1, 2, c=""wrong"") ",Check that validate_params functions can be decorated,7,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_decorate_validated_function(): decorated_function = deprecated()(_func) with pytest.warns(FutureWarning, match=""Function _func is deprecated""): decorated_function(1, 2, c=3) # outer decorator does not interfer with validation with pytest.warns(FutureWarning, match=""Function _func is deprecated""): with pytest.raises(ValueError, match=r""The 'c' parameter of _func must be""): decorated_function(1, 2, c=""wrong"") ``` ###Assistant : Check that validate_params functions can be decorated " 2832,"def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend): axes.set_xlabel(xlabel), axes.set_ylabel(ylabel) axes.set_xscale(xscale), axes.set_yscale(yscale) axes.set_xlim(xlim), axes.set_ylim(ylim) if legend: axes.legend(legend) axes.grid() ","Set the axes for matplotlib. Defined in :numref:`sec_calculus`",8,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend): axes.set_xlabel(xlabel), axes.set_ylabel(ylabel) axes.set_xscale(xscale), axes.set_yscale(yscale) axes.set_xlim(xlim), axes.set_ylim(ylim) if legend: axes.legend(legend) axes.grid() ``` ###Assistant : Set the axes for matplotlib. Defined in :numref:`sec_calculus` " 2833,"def register(self, addon): api_changes = { # mitmproxy 6 -> mitmproxy 7 ""clientconnect"": ""client_connected"", ""clientdisconnect"": ""client_disconnected"", ""serverconnect"": ""server_connect and server_connected"", ""serverdisconnect"": ""server_disconnected"", } for a in traverse([addon]): for old, new in api_changes.items(): if hasattr(a, old): ctx.log.warn(f""The {old} event has been removed, use {new} instead. "" f""For more details, see https://docs.mitmproxy.org/stable/addons-events/."") name = _get_name(a) if name in self.lookup: raise exceptions.AddonManagerError( ""An addon called '%s' already exists."" % name ) l = Loader(self.master) self.invoke_addon_sync(addon, LoadHook(l)) for a in traverse([addon]): name = _get_name(a) self.lookup[name] = a for a in traverse([addon]): self.master.commands.collect_commands(a) self.master.options.process_deferred() return addon "," Register an addon, call its load event, and then register all its sub-addons. This should be used by addons that dynamically manage addons. If the calling addon is already running, it should follow with running and configure events. Must be called within a current context. ",45,91,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def register(self, addon): api_changes = { # mitmproxy 6 -> mitmproxy 7 ""clientconnect"": ""client_connected"", ""clientdisconnect"": ""client_disconnected"", ""serverconnect"": ""server_connect and server_connected"", ""serverdisconnect"": ""server_disconnected"", } for a in traverse([addon]): for old, new in api_changes.items(): if hasattr(a, old): ctx.log.warn(f""The {old} event has been removed, use {new} instead. "" f""For more details, see https://docs.mitmproxy.org/stable/addons-events/."") name = _get_name(a) if name in self.lookup: raise exceptions.AddonManagerError( ""An addon called '%s' already exists."" % name ) l = Loader(self.master) self.invoke_addon_sync(addon, LoadHook(l)) for a in traverse([addon]): name = _get_name(a) self.lookup[name] = a for a in traverse([addon]): self.master.commands.collect_commands(a) self.master.options.process_deferred() return addon ``` ###Assistant : Register an addon, call its load event, and then register all its sub-addons. This should be used by addons that dynamically manage addons. If the calling addon is already running, it should follow with running and configure events. Must be called within a current context. " 2834,"def get_context_data(self, **kwargs): from tickets.models import Ticket from tickets.const import TICKET_DETAIL_URL ticket_id = self.request.session.get(""auth_ticket_id"") if not ticket_id: ticket = None else: ticket = Ticket.all().filter(pk=ticket_id).first() context = super().get_context_data(**kwargs) if ticket: timestamp_created = datetime.datetime.timestamp(ticket.date_created) ticket_detail_url = TICKET_DETAIL_URL.format(id=ticket_id, type=ticket.type) assignees = ticket.current_node.first().ticket_assignees.all() assignees_display = ', '.join([str(i.assignee) for i in assignees]) msg = _().format(assignees_display) else: timestamp_created = 0 ticket_detail_url = '' msg = _(""No ticket found"") context.update({ ""msg"": msg, ""timestamp"": timestamp_created, ""ticket_detail_url"": ticket_detail_url }) return context @method_decorator(never_cache, name='dispatch')","Wait for {} confirm, You also can copy link to her/him
    Don't close this page",16,74,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_context_data(self, **kwargs): from tickets.models import Ticket from tickets.const import TICKET_DETAIL_URL ticket_id = self.request.session.get(""auth_ticket_id"") if not ticket_id: ticket = None else: ticket = Ticket.all().filter(pk=ticket_id).first() context = super().get_context_data(**kwargs) if ticket: timestamp_created = datetime.datetime.timestamp(ticket.date_created) ticket_detail_url = TICKET_DETAIL_URL.format(id=ticket_id, type=ticket.type) assignees = ticket.current_node.first().ticket_assignees.all() assignees_display = ', '.join([str(i.assignee) for i in assignees]) msg = _().format(assignees_display) else: timestamp_created = 0 ticket_detail_url = '' msg = _(""No ticket found"") context.update({ ""msg"": msg, ""timestamp"": timestamp_created, ""ticket_detail_url"": ticket_detail_url }) return context @method_decorator(never_cache, name='dispatch') ``` ###Assistant : Wait for {} confirm, You also can copy link to her/him
    Don't close this page " 2835,"def tf_shard_checkpoint(weights, max_shard_size=""10GB""): max_shard_size = convert_file_size_to_int(max_shard_size) sharded_state_dicts = [] current_block = [] current_block_size = 0 total_size = 0 for item in weights: weight_size = item.numpy().size * dtype_byte_size(item.dtype) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: sharded_state_dicts.append(current_block) current_block = [] current_block_size = 0 current_block.append(item) current_block_size += weight_size total_size += weight_size # Add the last block sharded_state_dicts.append(current_block) # If we only have one shard, we return it if len(sharded_state_dicts) == 1: return {TF2_WEIGHTS_NAME: sharded_state_dicts[0]}, None # Otherwise, let's build the index weight_map = {} shards = {} for idx, shard in enumerate(sharded_state_dicts): shard_file = TF2_WEIGHTS_NAME.replace("".h5"", f""-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.h5"") shards[shard_file] = shard for weight in shard: weight_name = weight.name weight_map[weight_name] = shard_file # Add the metadata metadata = {""total_size"": total_size} index = {""metadata"": metadata, ""weight_map"": weight_map} return shards, index "," Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a given size. The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. If one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will have a size greater than `max_shard_size`. Args: weights (`Dict[str, tf.RessourceVariable]`): The list of tf.RessourceVariable of a model to save. max_shard_size (`int` or `str`, *optional*, defaults to `""10GB""`): The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit (like `""5MB""`). ",158,137,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def tf_shard_checkpoint(weights, max_shard_size=""10GB""): max_shard_size = convert_file_size_to_int(max_shard_size) sharded_state_dicts = [] current_block = [] current_block_size = 0 total_size = 0 for item in weights: weight_size = item.numpy().size * dtype_byte_size(item.dtype) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: sharded_state_dicts.append(current_block) current_block = [] current_block_size = 0 current_block.append(item) current_block_size += weight_size total_size += weight_size # Add the last block sharded_state_dicts.append(current_block) # If we only have one shard, we return it if len(sharded_state_dicts) == 1: return {TF2_WEIGHTS_NAME: sharded_state_dicts[0]}, None # Otherwise, let's build the index weight_map = {} shards = {} for idx, shard in enumerate(sharded_state_dicts): shard_file = TF2_WEIGHTS_NAME.replace("".h5"", f""-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.h5"") shards[shard_file] = shard for weight in shard: weight_name = weight.name weight_map[weight_name] = shard_file # Add the metadata metadata = {""total_size"": total_size} index = {""metadata"": metadata, ""weight_map"": weight_map} return shards, index ``` ###Assistant : Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a given size. The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. If one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will have a size greater than `max_shard_size`. Args: weights (`Dict[str, tf.RessourceVariable]`): The list of tf.RessourceVariable of a model to save. max_shard_size (`int` or `str`, *optional*, defaults to `""10GB""`): The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit (like `""5MB""`). " 2836,"def scale(self, x=1, y=1, pt=None): if pt: pt = Point(pt, dim=2) return self.translate(*(-pt).args).scale(x, y).translate(*pt.args) fx, fy = self.functions return self.func((fx*x, fy*y), self.limits) ","Override GeometryEntity.scale since Curve is not made up of Points. Returns ======= Curve : returns scaled curve. Examples ======== >>> from sympy import Curve >>> from sympy.abc import x >>> Curve((x, x), (x, 0, 1)).scale(2) Curve((2*x, x), (x, 0, 1)) ",40,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def scale(self, x=1, y=1, pt=None): if pt: pt = Point(pt, dim=2) return self.translate(*(-pt).args).scale(x, y).translate(*pt.args) fx, fy = self.functions return self.func((fx*x, fy*y), self.limits) ``` ###Assistant : Override GeometryEntity.scale since Curve is not made up of Points. Returns ======= Curve : returns scaled curve. Examples ======== >>> from sympy import Curve >>> from sympy.abc import x >>> Curve((x, x), (x, 0, 1)).scale(2) Curve((2*x, x), (x, 0, 1)) " 2837,"def orient_body_fixed(self, parent, angles, rotation_order): _check_frame(parent) amounts = list(angles) for i, v in enumerate(amounts): if not isinstance(v, Vector): amounts[i] = sympify(v) approved_orders = ('123', '231', '312', '132', '213', '321', '121', '131', '212', '232', '313', '323', '') # make sure XYZ => 123 rot_order = translate(str(rotation_order), 'XYZxyz', '123123') if rot_order not in approved_orders: raise TypeError('The rotation order is not a valid order.') parent_orient_body = [] if not (len(amounts) == 3 & len(rot_order) == 3): raise TypeError('Body orientation takes 3 values & 3 orders') a1 = int(rot_order[0]) a2 = int(rot_order[1]) a3 = int(rot_order[2]) parent_orient_body = (self._rot(a1, amounts[0]) * self._rot(a2, amounts[1]) * self._rot(a3, amounts[2])) self._dcm(parent, parent_orient_body) try: from sympy.polys.polyerrors import CoercionFailed from sympy.physics.vector.functions import kinematic_equations q1, q2, q3 = amounts u1, u2, u3 = symbols('u1, u2, u3', cls=Dummy) templist = kinematic_equations([u1, u2, u3], [q1, q2, q3], 'body', rot_order) templist = [expand(i) for i in templist] td = solve(templist, [u1, u2, u3]) u1 = expand(td[u1]) u2 = expand(td[u2]) u3 = expand(td[u3]) wvec = u1 * self.x + u2 * self.y + u3 * self.z # NOTE : SymPy 1.7 removed the call to simplify() that occured # inside the solve() function, so this restores the pre-1.7 # behavior. See: # https://github.com/sympy/sympy/issues/23140 # and # https://github.com/sympy/sympy/issues/23130 wvec = wvec.simplify() except (CoercionFailed, AssertionError): wvec = self._w_diff_dcm(parent) self._ang_vel_dict.update({parent: wvec}) parent._ang_vel_dict.update({self: -wvec}) self._var_dict = {} ","Rotates this reference frame relative to the parent reference frame by right hand rotating through three successive body fixed simple axis rotations. Each subsequent axis of rotation is about the ""body fixed"" unit vectors of a new intermediate reference frame. This type of rotation is also referred to rotating through the `Euler and Tait-Bryan Angles`_. .. _Euler and Tait-Bryan Angles: https://en.wikipedia.org/wiki/Euler_angles Parameters ========== parent : ReferenceFrame Reference frame that this reference frame will be rotated relative to. angles : 3-tuple of sympifiable Three angles in radians used for the successive rotations. rotation_order : 3 character string or 3 digit integer Order of the rotations about each intermediate reference frames' unit vectors. The Euler rotation about the X, Z', X'' axes can be specified by the strings ``'XZX'``, ``'131'``, or the integer ``131``. There are 12 unique valid rotation orders (6 Euler and 6 Tait-Bryan): zxz, xyx, yzy, zyz, xzx, yxy, xyz, yzx, zxy, xzy, zyx, and yxz. Warns ====== UserWarning If the orientation creates a kinematic loop. Examples ======== Setup variables for the examples: >>> from sympy import symbols >>> from sympy.physics.vector import ReferenceFrame >>> q1, q2, q3 = symbols('q1, q2, q3') >>> N = ReferenceFrame('N') >>> B = ReferenceFrame('B') >>> B1 = ReferenceFrame('B1') >>> B2 = ReferenceFrame('B2') >>> B3 = ReferenceFrame('B3') For example, a classic Euler Angle rotation can be done by: >>> B.orient_body_fixed(N, (q1, q2, q3), 'XYX') >>> B.dcm(N) Matrix([ [ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)], [sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)], [sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]]) This rotates reference frame B relative to reference frame N through ``q1`` about ``N.x``, then rotates B again through ``q2`` about ``B.y``, and finally through ``q3`` about ``B.x``. It is equivalent to three successive ``orient_axis()`` calls: >>> B1.orient_axis(N, N.x, q1) >>> B2.orient_axis(B1, B1.y, q2) >>> B3.orient_axis(B2, B2.x, q3) >>> B3.dcm(N) Matrix([ [ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)], [sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)], [sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]]) Acceptable rotation orders are of length 3, expressed in as a string ``'XYZ'`` or ``'123'`` or integer ``123``. Rotations about an axis twice in a row are prohibited. >>> B.orient_body_fixed(N, (q1, q2, 0), 'ZXZ') >>> B.orient_body_fixed(N, (q1, q2, 0), '121') >>> B.orient_body_fixed(N, (q1, q2, q3), 123) ",365,217,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def orient_body_fixed(self, parent, angles, rotation_order): _check_frame(parent) amounts = list(angles) for i, v in enumerate(amounts): if not isinstance(v, Vector): amounts[i] = sympify(v) approved_orders = ('123', '231', '312', '132', '213', '321', '121', '131', '212', '232', '313', '323', '') # make sure XYZ => 123 rot_order = translate(str(rotation_order), 'XYZxyz', '123123') if rot_order not in approved_orders: raise TypeError('The rotation order is not a valid order.') parent_orient_body = [] if not (len(amounts) == 3 & len(rot_order) == 3): raise TypeError('Body orientation takes 3 values & 3 orders') a1 = int(rot_order[0]) a2 = int(rot_order[1]) a3 = int(rot_order[2]) parent_orient_body = (self._rot(a1, amounts[0]) * self._rot(a2, amounts[1]) * self._rot(a3, amounts[2])) self._dcm(parent, parent_orient_body) try: from sympy.polys.polyerrors import CoercionFailed from sympy.physics.vector.functions import kinematic_equations q1, q2, q3 = amounts u1, u2, u3 = symbols('u1, u2, u3', cls=Dummy) templist = kinematic_equations([u1, u2, u3], [q1, q2, q3], 'body', rot_order) templist = [expand(i) for i in templist] td = solve(templist, [u1, u2, u3]) u1 = expand(td[u1]) u2 = expand(td[u2]) u3 = expand(td[u3]) wvec = u1 * self.x + u2 * self.y + u3 * self.z # NOTE : SymPy 1.7 removed the call to simplify() that occured # inside the solve() function, so this restores the pre-1.7 # behavior. See: # https://github.com/sympy/sympy/issues/23140 # and # https://github.com/sympy/sympy/issues/23130 wvec = wvec.simplify() except (CoercionFailed, AssertionError): wvec = self._w_diff_dcm(parent) self._ang_vel_dict.update({parent: wvec}) parent._ang_vel_dict.update({self: -wvec}) self._var_dict = {} ``` ###Assistant : Rotates this reference frame relative to the parent reference frame by right hand rotating through three successive body fixed simple axis rotations. Each subsequent axis of rotation is about the ""body fixed"" unit vectors of a new intermediate reference frame. This type of rotation is also referred to rotating through the `Euler and Tait-Bryan Angles`_. .. _Euler and Tait-Bryan Angles: https://en.wikipedia.org/wiki/Euler_angles Parameters ========== parent : ReferenceFrame Reference frame that this reference frame will be rotated relative to. angles : 3-tuple of sympifiable Three angles in radians used for the successive rotations. rotation_order : 3 character string or 3 digit integer Order of the rotations about each intermediate reference frames' unit vectors. The Euler rotation about the X, Z', X'' axes can be specified by the strings ``'XZX'``, ``'131'``, or the integer ``131``. There are 12 unique valid rotation orders (6 Euler and 6 Tait-Bryan): zxz, xyx, yzy, zyz, xzx, yxy, xyz, yzx, zxy, xzy, zyx, and yxz. Warns ====== UserWarning If the orientation creates a kinematic loop. Examples ======== Setup variables for the examples: >>> from sympy import symbols >>> from sympy.physics.vector import ReferenceFrame >>> q1, q2, q3 = symbols('q1, q2, q3') >>> N = ReferenceFrame('N') >>> B = ReferenceFrame('B') >>> B1 = ReferenceFrame('B1') >>> B2 = ReferenceFrame('B2') >>> B3 = ReferenceFrame('B3') For example, a classic Euler Angle rotation can be done by: >>> B.orient_body_fixed(N, (q1, q2, q3), 'XYX') >>> B.dcm(N) Matrix([ [ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)], [sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)], [sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]]) This rotates reference frame B relative to reference frame N through ``q1`` about ``N.x``, then rotates B again through ``q2`` about ``B.y``, and finally through ``q3`` about ``B.x``. It is equivalent to three successive ``orient_axis()`` calls: >>> B1.orient_axis(N, N.x, q1) >>> B2.orient_axis(B1, B1.y, q2) >>> B3.orient_axis(B2, B2.x, q3) >>> B3.dcm(N) Matrix([ [ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)], [sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)], [sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]]) Acceptable rotation orders are of length 3, expressed in as a string ``'XYZ'`` or ``'123'`` or integer ``123``. Rotations about an axis twice in a row are prohibited. >>> B.orient_body_fixed(N, (q1, q2, 0), 'ZXZ') >>> B.orient_body_fixed(N, (q1, q2, 0), '121') >>> B.orient_body_fixed(N, (q1, q2, q3), 123) " 2838,"def get_applicable_block_lists(employee=None, company=None, all_lists=False): block_lists = [] if not employee: employee = frappe.db.get_value(""Employee"", {""user_id"": frappe.session.user}) if not employee: return [] if not company: company = frappe.db.get_value(""Employee"", employee, ""company"") def add_block_list(block_list): if block_list: if all_lists or not is_user_in_allow_list(block_list): block_lists.append(block_list) # per department department = frappe.db.get_value(""Employee"", employee, ""department"") if department: block_list = frappe.db.get_value(""Department"", department, ""leave_block_list"") add_block_list(block_list) # global for block_list in frappe.db.sql_list( , company, ): add_block_list(block_list) return list(set(block_lists)) ","select name from `tabLeave Block List` where applies_to_all_departments=1 and company=%s",10,66,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_applicable_block_lists(employee=None, company=None, all_lists=False): block_lists = [] if not employee: employee = frappe.db.get_value(""Employee"", {""user_id"": frappe.session.user}) if not employee: return [] if not company: company = frappe.db.get_value(""Employee"", employee, ""company"") def add_block_list(block_list): if block_list: if all_lists or not is_user_in_allow_list(block_list): block_lists.append(block_list) # per department department = frappe.db.get_value(""Employee"", employee, ""department"") if department: block_list = frappe.db.get_value(""Department"", department, ""leave_block_list"") add_block_list(block_list) # global for block_list in frappe.db.sql_list( , company, ): add_block_list(block_list) return list(set(block_lists)) ``` ###Assistant : select name from `tabLeave Block List` where applies_to_all_departments=1 and company=%s " 2839,"def _setup_connection(self): # noqa cur = self.connection.cursor() if ('store',) not in list(cur.execute(""SELECT name FROM sqlite_master WHERE type='table';"")): cur.execute( ) self.internal_registry.commit() "," Checks that a key-value table exists, otherwise creates it. create table store (key text, value text)",16,20,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _setup_connection(self): # noqa cur = self.connection.cursor() if ('store',) not in list(cur.execute(""SELECT name FROM sqlite_master WHERE type='table';"")): cur.execute( ) self.internal_registry.commit() ``` ###Assistant : Checks that a key-value table exists, otherwise creates it. create table store (key text, value text) " 2840,"def cull(dsk, keys): if not isinstance(keys, (list, set)): keys = [keys] seen = set() dependencies = dict() out = {} work = list(set(flatten(keys))) while work: new_work = [] for k in work: dependencies_k = get_dependencies(dsk, k, as_list=True) # fuse needs lists out[k] = dsk[k] dependencies[k] = dependencies_k for d in dependencies_k: if d not in seen: seen.add(d) new_work.append(d) work = new_work return out, dependencies ","Return new dask with only the tasks required to calculate keys. In other words, remove unnecessary tasks from dask. ``keys`` may be a single key or list of keys. Examples -------- >>> def inc(x): ... return x + 1 >>> def add(x, y): ... return x + y >>> d = {'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)} >>> dsk, dependencies = cull(d, 'out') >>> dsk # doctest: +ELLIPSIS {'out': (, 'x', 10), 'x': 1} >>> dependencies # doctest: +ELLIPSIS {'out': ['x'], 'x': []} Returns ------- dsk: culled dask graph dependencies: Dict mapping {key: [deps]}. Useful side effect to accelerate other optimizations, notably fuse. ",109,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cull(dsk, keys): if not isinstance(keys, (list, set)): keys = [keys] seen = set() dependencies = dict() out = {} work = list(set(flatten(keys))) while work: new_work = [] for k in work: dependencies_k = get_dependencies(dsk, k, as_list=True) # fuse needs lists out[k] = dsk[k] dependencies[k] = dependencies_k for d in dependencies_k: if d not in seen: seen.add(d) new_work.append(d) work = new_work return out, dependencies ``` ###Assistant : Return new dask with only the tasks required to calculate keys. In other words, remove unnecessary tasks from dask. ``keys`` may be a single key or list of keys. Examples -------- >>> def inc(x): ... return x + 1 >>> def add(x, y): ... return x + y >>> d = {'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)} >>> dsk, dependencies = cull(d, 'out') >>> dsk # doctest: +ELLIPSIS {'out': (, 'x', 10), 'x': 1} >>> dependencies # doctest: +ELLIPSIS {'out': ['x'], 'x': []} Returns ------- dsk: culled dask graph dependencies: Dict mapping {key: [deps]}. Useful side effect to accelerate other optimizations, notably fuse. " 2841,"def fill_leverage_tiers(self) -> None: leverage_tiers = self.load_leverage_tiers() for pair, tiers in leverage_tiers.items(): tiers = [] for tier in tiers: tiers.append(self.parse_leverage_tier(tier)) self._leverage_tiers[pair] = tiers "," Assigns property _leverage_tiers to a dictionary of information about the leverage allowed on each pair ",15,23,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fill_leverage_tiers(self) -> None: leverage_tiers = self.load_leverage_tiers() for pair, tiers in leverage_tiers.items(): tiers = [] for tier in tiers: tiers.append(self.parse_leverage_tier(tier)) self._leverage_tiers[pair] = tiers ``` ###Assistant : Assigns property _leverage_tiers to a dictionary of information about the leverage allowed on each pair " 2842,"def kronecker_product(*matrices): if not matrices: raise TypeError(""Empty Kronecker product is undefined"") validate(*matrices) if len(matrices) == 1: return matrices[0] else: return KroneckerProduct(*matrices).doit() "," The Kronecker product of two or more arguments. This computes the explicit Kronecker product for subclasses of ``MatrixBase`` i.e. explicit matrices. Otherwise, a symbolic ``KroneckerProduct`` object is returned. Examples ======== For ``MatrixSymbol`` arguments a ``KroneckerProduct`` object is returned. Elements of this matrix can be obtained by indexing, or for MatrixSymbols with known dimension the explicit matrix can be obtained with ``.as_explicit()`` >>> from sympy import kronecker_product, MatrixSymbol >>> A = MatrixSymbol('A', 2, 2) >>> B = MatrixSymbol('B', 2, 2) >>> kronecker_product(A) A >>> kronecker_product(A, B) KroneckerProduct(A, B) >>> kronecker_product(A, B)[0, 1] A[0, 0]*B[0, 1] >>> kronecker_product(A, B).as_explicit() Matrix([ [A[0, 0]*B[0, 0], A[0, 0]*B[0, 1], A[0, 1]*B[0, 0], A[0, 1]*B[0, 1]], [A[0, 0]*B[1, 0], A[0, 0]*B[1, 1], A[0, 1]*B[1, 0], A[0, 1]*B[1, 1]], [A[1, 0]*B[0, 0], A[1, 0]*B[0, 1], A[1, 1]*B[0, 0], A[1, 1]*B[0, 1]], [A[1, 0]*B[1, 0], A[1, 0]*B[1, 1], A[1, 1]*B[1, 0], A[1, 1]*B[1, 1]]]) For explicit matrices the Kronecker product is returned as a Matrix >>> from sympy import Matrix, kronecker_product >>> sigma_x = Matrix([ ... [0, 1], ... [1, 0]]) ... >>> Isigma_y = Matrix([ ... [0, 1], ... [-1, 0]]) ... >>> kronecker_product(sigma_x, Isigma_y) Matrix([ [ 0, 0, 0, 1], [ 0, 0, -1, 0], [ 0, 1, 0, 0], [-1, 0, 0, 0]]) See Also ======== KroneckerProduct ",212,21,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def kronecker_product(*matrices): if not matrices: raise TypeError(""Empty Kronecker product is undefined"") validate(*matrices) if len(matrices) == 1: return matrices[0] else: return KroneckerProduct(*matrices).doit() ``` ###Assistant : The Kronecker product of two or more arguments. This computes the explicit Kronecker product for subclasses of ``MatrixBase`` i.e. explicit matrices. Otherwise, a symbolic ``KroneckerProduct`` object is returned. Examples ======== For ``MatrixSymbol`` arguments a ``KroneckerProduct`` object is returned. Elements of this matrix can be obtained by indexing, or for MatrixSymbols with known dimension the explicit matrix can be obtained with ``.as_explicit()`` >>> from sympy import kronecker_product, MatrixSymbol >>> A = MatrixSymbol('A', 2, 2) >>> B = MatrixSymbol('B', 2, 2) >>> kronecker_product(A) A >>> kronecker_product(A, B) KroneckerProduct(A, B) >>> kronecker_product(A, B)[0, 1] A[0, 0]*B[0, 1] >>> kronecker_product(A, B).as_explicit() Matrix([ [A[0, 0]*B[0, 0], A[0, 0]*B[0, 1], A[0, 1]*B[0, 0], A[0, 1]*B[0, 1]], [A[0, 0]*B[1, 0], A[0, 0]*B[1, 1], A[0, 1]*B[1, 0], A[0, 1]*B[1, 1]], [A[1, 0]*B[0, 0], A[1, 0]*B[0, 1], A[1, 1]*B[0, 0], A[1, 1]*B[0, 1]], [A[1, 0]*B[1, 0], A[1, 0]*B[1, 1], A[1, 1]*B[1, 0], A[1, 1]*B[1, 1]]]) For explicit matrices the Kronecker product is returned as a Matrix >>> from sympy import Matrix, kronecker_product >>> sigma_x = Matrix([ ... [0, 1], ... [1, 0]]) ... >>> Isigma_y = Matrix([ ... [0, 1], ... [-1, 0]]) ... >>> kronecker_product(sigma_x, Isigma_y) Matrix([ [ 0, 0, 0, 1], [ 0, 0, -1, 0], [ 0, 1, 0, 0], [-1, 0, 0, 0]]) See Also ======== KroneckerProduct " 2843,"def _get_obj_does_not_exist_redirect(self, request, opts, object_id): msg = _(""%(name)s with ID “%(key)s” doesn’t exist. Perhaps it was deleted?"") % { ""name"": opts.verbose_name, ""key"": unquote(object_id), } self.message_user(request, msg, messages.WARNING) url = reverse(""admin:index"", current_app=self.admin_site.name) return HttpResponseRedirect(url) "," Create a message informing the user that the object doesn't exist and return a redirect to the admin index page. ",20,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_obj_does_not_exist_redirect(self, request, opts, object_id): msg = _(""%(name)s with ID “%(key)s” doesn’t exist. Perhaps it was deleted?"") % { ""name"": opts.verbose_name, ""key"": unquote(object_id), } self.message_user(request, msg, messages.WARNING) url = reverse(""admin:index"", current_app=self.admin_site.name) return HttpResponseRedirect(url) ``` ###Assistant : Create a message informing the user that the object doesn't exist and return a redirect to the admin index page. " 2844,"def compare_total(self, other, context=None): other = _convert_other(other, raiseit=True) # if one is negative and the other is positive, it's easy if self._sign and not other._sign: return _NegativeOne if not self._sign and other._sign: return _One sign = self._sign # let's handle both NaN types self_nan = self._isnan() other_nan = other._isnan() if self_nan or other_nan: if self_nan == other_nan: # compare payloads as though they're integers self_key = len(self._int), self._int other_key = len(other._int), other._int if self_key < other_key: if sign: return _One else: return _NegativeOne if self_key > other_key: if sign: return _NegativeOne else: return _One return _Zero if sign: if self_nan == 1: return _NegativeOne if other_nan == 1: return _One if self_nan == 2: return _NegativeOne if other_nan == 2: return _One else: if self_nan == 1: return _One if other_nan == 1: return _NegativeOne if self_nan == 2: return _One if other_nan == 2: return _NegativeOne if self < other: return _NegativeOne if self > other: return _One if self._exp < other._exp: if sign: return _One else: return _NegativeOne if self._exp > other._exp: if sign: return _NegativeOne else: return _One return _Zero ","Compares self to other using the abstract representations. This is not like the standard compare, which use their numerical value. Note that a total ordering is defined for all possible abstract representations. ",32,183,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def compare_total(self, other, context=None): other = _convert_other(other, raiseit=True) # if one is negative and the other is positive, it's easy if self._sign and not other._sign: return _NegativeOne if not self._sign and other._sign: return _One sign = self._sign # let's handle both NaN types self_nan = self._isnan() other_nan = other._isnan() if self_nan or other_nan: if self_nan == other_nan: # compare payloads as though they're integers self_key = len(self._int), self._int other_key = len(other._int), other._int if self_key < other_key: if sign: return _One else: return _NegativeOne if self_key > other_key: if sign: return _NegativeOne else: return _One return _Zero if sign: if self_nan == 1: return _NegativeOne if other_nan == 1: return _One if self_nan == 2: return _NegativeOne if other_nan == 2: return _One else: if self_nan == 1: return _One if other_nan == 1: return _NegativeOne if self_nan == 2: return _One if other_nan == 2: return _NegativeOne if self < other: return _NegativeOne if self > other: return _One if self._exp < other._exp: if sign: return _One else: return _NegativeOne if self._exp > other._exp: if sign: return _NegativeOne else: return _One return _Zero ``` ###Assistant : Compares self to other using the abstract representations. This is not like the standard compare, which use their numerical value. Note that a total ordering is defined for all possible abstract representations. " 2845,"def detect_current_mode(cls) -> _FaultTolerantMode: env_value = os.getenv(""PL_FAULT_TOLERANT_TRAINING"", ""0"").lower() # the int values are kept for backwards compatibility, but long-term we want to keep only the strings if env_value in (""0"", ""disabled""): return _FaultTolerantMode.DISABLED elif env_value in (""1"", ""automatic""): return _FaultTolerantMode.AUTOMATIC elif env_value in (""2"", ""manual""): return _FaultTolerantMode.MANUAL raise MisconfigurationException( ""The environment flag `PL_FAULT_TOLERANT_TRAINING` should be either 'disabled', 'automatic', or 'manual'."" ) ",This classmethod detects if `Fault Tolerant` is activated and maps its value to `_FaultTolerantMode`.,14,61,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def detect_current_mode(cls) -> _FaultTolerantMode: env_value = os.getenv(""PL_FAULT_TOLERANT_TRAINING"", ""0"").lower() # the int values are kept for backwards compatibility, but long-term we want to keep only the strings if env_value in (""0"", ""disabled""): return _FaultTolerantMode.DISABLED elif env_value in (""1"", ""automatic""): return _FaultTolerantMode.AUTOMATIC elif env_value in (""2"", ""manual""): return _FaultTolerantMode.MANUAL raise MisconfigurationException( ""The environment flag `PL_FAULT_TOLERANT_TRAINING` should be either 'disabled', 'automatic', or 'manual'."" ) ``` ###Assistant : This classmethod detects if `Fault Tolerant` is activated and maps its value to `_FaultTolerantMode`. " 2846,"def apply_channel_shift(x, intensity, channel_axis=0): x = np.rollaxis(x, channel_axis, 0) min_x, max_x = np.min(x), np.max(x) channel_images = [ np.clip(x_channel + intensity, min_x, max_x) for x_channel in x ] x = np.stack(channel_images, axis=0) x = np.rollaxis(x, 0, channel_axis + 1) return x @keras_export(""keras.preprocessing.image.random_channel_shift"")","Performs a channel shift. Args: x: Input tensor. Must be 3D. intensity: Transformation intensity. channel_axis: Index of axis for channels in the input tensor. Returns: Numpy image tensor. ",28,41,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def apply_channel_shift(x, intensity, channel_axis=0): x = np.rollaxis(x, channel_axis, 0) min_x, max_x = np.min(x), np.max(x) channel_images = [ np.clip(x_channel + intensity, min_x, max_x) for x_channel in x ] x = np.stack(channel_images, axis=0) x = np.rollaxis(x, 0, channel_axis + 1) return x @keras_export(""keras.preprocessing.image.random_channel_shift"") ``` ###Assistant : Performs a channel shift. Args: x: Input tensor. Must be 3D. intensity: Transformation intensity. channel_axis: Index of axis for channels in the input tensor. Returns: Numpy image tensor. " 2847,"def _nested_gather(self, tensors, name=None): if tensors is None: return if self.args.local_rank != -1: tensors = distributed_concat(tensors) return tensors # Copied from Accelerate."," Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before concatenating them to `gathered` ",20,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _nested_gather(self, tensors, name=None): if tensors is None: return if self.args.local_rank != -1: tensors = distributed_concat(tensors) return tensors # Copied from Accelerate. ``` ###Assistant : Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before concatenating them to `gathered` " 2848,"def test_glm_regression_unpenalized(solver, fit_intercept, glm_dataset): model, X, y, coef, _, _, _ = glm_dataset n_samples, n_features = X.shape alpha = 0 # unpenalized params = dict( alpha=alpha, fit_intercept=fit_intercept, # solver=solver, # only lbfgs available tol=1e-12, max_iter=1000, ) model = clone(model).set_params(**params) if fit_intercept: X = X[:, :-1] # remove intercept intercept = coef[-1] coef = coef[:-1] else: intercept = 0 model.fit(X, y) # FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails # for the wide/fat case with n_features > n_samples. Most current GLM solvers do # NOT return the minimum norm solution with fit_intercept=True. rtol = 5e-5 if n_samples > n_features: assert model.intercept_ == pytest.approx(intercept) assert_allclose(model.coef_, coef, rtol=rtol) else: # As it is an underdetermined problem, prediction = y. The following shows that # we get a solution, i.e. a (non-unique) minimum of the objective function ... assert_allclose(model.predict(X), y, rtol=1e-6) if fit_intercept: # But it is not the minimum norm solution. Otherwise the norms would be # equal. norm_solution = np.linalg.norm(np.r_[intercept, coef]) norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) assert norm_model > (1 + 1e-12) * norm_solution # See https://github.com/scikit-learn/scikit-learn/issues/23670. # Note: Even adding a tiny penalty does not give the minimal norm solution. # XXX: We could have naively expected LBFGS to find the minimal norm # solution by adding a very small penalty. Even that fails for a reason we # do not properly understand at this point. else: # When `fit_intercept=False`, LBFGS naturally converges to the minimum norm # solution on this problem. # XXX: Do we have any theoretical guarantees why this should be the case? assert model.intercept_ == pytest.approx(intercept) assert_allclose(model.coef_, coef, rtol=rtol) @pytest.mark.parametrize(""solver"", SOLVERS) @pytest.mark.parametrize(""fit_intercept"", [True, False])","Test that unpenalized GLM converges for all solvers to correct solution. We work with a simple constructed data set with known solution. Note: This checks the minimum norm solution for wide X, i.e. n_samples < n_features: min ||w||_2 subject to w = argmin deviance(X, y, w) ",46,269,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_glm_regression_unpenalized(solver, fit_intercept, glm_dataset): model, X, y, coef, _, _, _ = glm_dataset n_samples, n_features = X.shape alpha = 0 # unpenalized params = dict( alpha=alpha, fit_intercept=fit_intercept, # solver=solver, # only lbfgs available tol=1e-12, max_iter=1000, ) model = clone(model).set_params(**params) if fit_intercept: X = X[:, :-1] # remove intercept intercept = coef[-1] coef = coef[:-1] else: intercept = 0 model.fit(X, y) # FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails # for the wide/fat case with n_features > n_samples. Most current GLM solvers do # NOT return the minimum norm solution with fit_intercept=True. rtol = 5e-5 if n_samples > n_features: assert model.intercept_ == pytest.approx(intercept) assert_allclose(model.coef_, coef, rtol=rtol) else: # As it is an underdetermined problem, prediction = y. The following shows that # we get a solution, i.e. a (non-unique) minimum of the objective function ... assert_allclose(model.predict(X), y, rtol=1e-6) if fit_intercept: # But it is not the minimum norm solution. Otherwise the norms would be # equal. norm_solution = np.linalg.norm(np.r_[intercept, coef]) norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) assert norm_model > (1 + 1e-12) * norm_solution # See https://github.com/scikit-learn/scikit-learn/issues/23670. # Note: Even adding a tiny penalty does not give the minimal norm solution. # XXX: We could have naively expected LBFGS to find the minimal norm # solution by adding a very small penalty. Even that fails for a reason we # do not properly understand at this point. else: # When `fit_intercept=False`, LBFGS naturally converges to the minimum norm # solution on this problem. # XXX: Do we have any theoretical guarantees why this should be the case? assert model.intercept_ == pytest.approx(intercept) assert_allclose(model.coef_, coef, rtol=rtol) @pytest.mark.parametrize(""solver"", SOLVERS) @pytest.mark.parametrize(""fit_intercept"", [True, False]) ``` ###Assistant : Test that unpenalized GLM converges for all solvers to correct solution. We work with a simple constructed data set with known solution. Note: This checks the minimum norm solution for wide X, i.e. n_samples < n_features: min ||w||_2 subject to w = argmin deviance(X, y, w) " 2849,"def _node_redundancy(G, v): n = len(G[v]) overlap = sum( 1 for (u, w) in combinations(G[v], 2) if (G[u].keys() & G[w].keys()) - {v} ) return (2 * overlap) / (n * (n - 1)) ","Returns the redundancy of the node `v` in the bipartite graph `G`. If `G` is a graph with `n` nodes, the redundancy of a node is the ratio of the ""overlap"" of `v` to the maximum possible overlap of `v` according to its degree. The overlap of `v` is the number of pairs of neighbors that have mutual neighbors themselves, other than `v`. `v` must have at least two neighbors in `G`. ",72,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _node_redundancy(G, v): n = len(G[v]) overlap = sum( 1 for (u, w) in combinations(G[v], 2) if (G[u].keys() & G[w].keys()) - {v} ) return (2 * overlap) / (n * (n - 1)) ``` ###Assistant : Returns the redundancy of the node `v` in the bipartite graph `G`. If `G` is a graph with `n` nodes, the redundancy of a node is the ratio of the ""overlap"" of `v` to the maximum possible overlap of `v` according to its degree. The overlap of `v` is the number of pairs of neighbors that have mutual neighbors themselves, other than `v`. `v` must have at least two neighbors in `G`. " 2850,"def solve(self): count_reaction_loads = 0 for node in self._nodes: if node in list(self._supports): if self._supports[node[0]]=='pinned': count_reaction_loads += 2 elif self._supports[node[0]]=='roller': count_reaction_loads += 1 coefficients_matrix = [[0 for i in range(2*len(self._nodes))] for j in range(2*len(self._nodes))] load_matrix = zeros(2*len(self.nodes), 1) load_matrix_row = 0 for node in self._nodes: if node[0] in list(self._loads): for load in self._loads[node[0]]: if load[0]!=Symbol('R_'+str(node[0])+'_x') and load[0]!=Symbol('R_'+str(node[0])+'_y'): load_matrix[load_matrix_row] -= load[0]*math.cos(pi*load[1]/180) load_matrix[load_matrix_row + 1] -= load[0]*math.sin(pi*load[1]/180) load_matrix_row += 2 cols = 0 row = 0 for node in self._nodes: if node[0] in list(self._supports): if self._supports[node[0]]=='pinned': coefficients_matrix[row][cols] += 1 coefficients_matrix[row+1][cols+1] += 1 cols += 2 elif self._supports[node[0]]=='roller': coefficients_matrix[row+1][cols] += 1 cols += 1 row += 2 for member in list(self._members): start = self._members[member][0] end = self._members[member][1] length = sqrt((self._node_coordinates[start][0]-self._node_coordinates[end][0])**2 + (self._node_coordinates[start][1]-self._node_coordinates[end][1])**2) start_index = self._node_labels.index(start) end_index = self._node_labels.index(end) horizontal_component_start = (self._node_coordinates[end][0]-self._node_coordinates[start][0])/length vertical_component_start = (self._node_coordinates[end][1]-self._node_coordinates[start][1])/length horizontal_component_end = (self._node_coordinates[start][0]-self._node_coordinates[end][0])/length vertical_component_end = (self._node_coordinates[start][1]-self._node_coordinates[end][1])/length coefficients_matrix[start_index*2][cols] += horizontal_component_start coefficients_matrix[start_index*2+1][cols] += vertical_component_start coefficients_matrix[end_index*2][cols] += horizontal_component_end coefficients_matrix[end_index*2+1][cols] += vertical_component_end cols += 1 forces_matrix = (Matrix(coefficients_matrix)**-1)*load_matrix self._reaction_loads = {} i = 0 for node in self._nodes: if node[0] in list(self._supports): if self._supports[node[0]]=='pinned': self._reaction_loads['R_'+str(node[0])+'_x'] = forces_matrix[i] self._reaction_loads['R_'+str(node[0])+'_y'] = forces_matrix[i+1] i += 2 elif self._supports[node[0]]=='roller': self._reaction_loads['R_'+str(node[0])+'_y'] = forces_matrix[i] i += 1 for member in list(self._members): self._internal_forces[member] = forces_matrix[i] i += 1 return "," This method solves for all reaction forces of all supports and all internal forces of all the members in the truss, provided the Truss is solvable. A Truss is solvable if the following condition is met, 2n >= r + m Where n is the number of nodes, r is the number of reaction forces, where each pinned support has 2 reaction forces and each roller has 1, and m is the number of members. The given condition is derived from the fact that a system of equations is solvable only when the number of variables is lesser than or equal to the number of equations. Equilibrium Equations in x and y directions give two equations per node giving 2n number equations. The number of variables is simply the sum of the number of reaction forces and member forces. Examples ======== >>> from sympy.physics.continuum_mechanics.truss import Truss >>> t = Truss() >>> t.add_node(""node_1"", 0, 0) >>> t.add_node(""node_2"", 6, 0) >>> t.add_node(""node_3"", 2, 2) >>> t.add_node(""node_4"", 2, 0) >>> t.add_member(""member_1"", ""node_1"", ""node_4"") >>> t.add_member(""member_2"", ""node_2"", ""node_4"") >>> t.add_member(""member_3"", ""node_1"", ""node_3"") >>> t.add_member(""member_4"", ""node_2"", ""node_3"") >>> t.add_member(""member_5"", ""node_3"", ""node_4"") >>> t.apply_load(""node_4"", magnitude=10, direction=270) >>> t.apply_support(""node_1"", type=""pinned"") >>> t.apply_support(""node_2"", type=""roller"") >>> t.solve() >>> t.reaction_loads {'R_node_1_x': 1.83697019872103e-15, 'R_node_1_y': 6.66666666666667, 'R_node_2_y': 3.33333333333333} >>> t.internal_forces {'member_1': 6.66666666666666, 'member_2': 6.66666666666667, 'member_3': -6.66666666666667*sqrt(2), 'member_4': -3.33333333333333*sqrt(5), 'member_5': 10.0} ",218,199,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def solve(self): count_reaction_loads = 0 for node in self._nodes: if node in list(self._supports): if self._supports[node[0]]=='pinned': count_reaction_loads += 2 elif self._supports[node[0]]=='roller': count_reaction_loads += 1 coefficients_matrix = [[0 for i in range(2*len(self._nodes))] for j in range(2*len(self._nodes))] load_matrix = zeros(2*len(self.nodes), 1) load_matrix_row = 0 for node in self._nodes: if node[0] in list(self._loads): for load in self._loads[node[0]]: if load[0]!=Symbol('R_'+str(node[0])+'_x') and load[0]!=Symbol('R_'+str(node[0])+'_y'): load_matrix[load_matrix_row] -= load[0]*math.cos(pi*load[1]/180) load_matrix[load_matrix_row + 1] -= load[0]*math.sin(pi*load[1]/180) load_matrix_row += 2 cols = 0 row = 0 for node in self._nodes: if node[0] in list(self._supports): if self._supports[node[0]]=='pinned': coefficients_matrix[row][cols] += 1 coefficients_matrix[row+1][cols+1] += 1 cols += 2 elif self._supports[node[0]]=='roller': coefficients_matrix[row+1][cols] += 1 cols += 1 row += 2 for member in list(self._members): start = self._members[member][0] end = self._members[member][1] length = sqrt((self._node_coordinates[start][0]-self._node_coordinates[end][0])**2 + (self._node_coordinates[start][1]-self._node_coordinates[end][1])**2) start_index = self._node_labels.index(start) end_index = self._node_labels.index(end) horizontal_component_start = (self._node_coordinates[end][0]-self._node_coordinates[start][0])/length vertical_component_start = (self._node_coordinates[end][1]-self._node_coordinates[start][1])/length horizontal_component_end = (self._node_coordinates[start][0]-self._node_coordinates[end][0])/length vertical_component_end = (self._node_coordinates[start][1]-self._node_coordinates[end][1])/length coefficients_matrix[start_index*2][cols] += horizontal_component_start coefficients_matrix[start_index*2+1][cols] += vertical_component_start coefficients_matrix[end_index*2][cols] += horizontal_component_end coefficients_matrix[end_index*2+1][cols] += vertical_component_end cols += 1 forces_matrix = (Matrix(coefficients_matrix)**-1)*load_matrix self._reaction_loads = {} i = 0 for node in self._nodes: if node[0] in list(self._supports): if self._supports[node[0]]=='pinned': self._reaction_loads['R_'+str(node[0])+'_x'] = forces_matrix[i] self._reaction_loads['R_'+str(node[0])+'_y'] = forces_matrix[i+1] i += 2 elif self._supports[node[0]]=='roller': self._reaction_loads['R_'+str(node[0])+'_y'] = forces_matrix[i] i += 1 for member in list(self._members): self._internal_forces[member] = forces_matrix[i] i += 1 return ``` ###Assistant : This method solves for all reaction forces of all supports and all internal forces of all the members in the truss, provided the Truss is solvable. A Truss is solvable if the following condition is met, 2n >= r + m Where n is the number of nodes, r is the number of reaction forces, where each pinned support has 2 reaction forces and each roller has 1, and m is the number of members. The given condition is derived from the fact that a system of equations is solvable only when the number of variables is lesser than or equal to the number of equations. Equilibrium Equations in x and y directions give two equations per node giving 2n number equations. The number of variables is simply the sum of the number of reaction forces and member forces. Examples ======== >>> from sympy.physics.continuum_mechanics.truss import Truss >>> t = Truss() >>> t.add_node(""node_1"", 0, 0) >>> t.add_node(""node_2"", 6, 0) >>> t.add_node(""node_3"", 2, 2) >>> t.add_node(""node_4"", 2, 0) >>> t.add_member(""member_1"", ""node_1"", ""node_4"") >>> t.add_member(""member_2"", ""node_2"", ""node_4"") >>> t.add_member(""member_3"", ""node_1"", ""node_3"") >>> t.add_member(""member_4"", ""node_2"", ""node_3"") >>> t.add_member(""member_5"", ""node_3"", ""node_4"") >>> t.apply_load(""node_4"", magnitude=10, direction=270) >>> t.apply_support(""node_1"", type=""pinned"") >>> t.apply_support(""node_2"", type=""roller"") >>> t.solve() >>> t.reaction_loads {'R_node_1_x': 1.83697019872103e-15, 'R_node_1_y': 6.66666666666667, 'R_node_2_y': 3.33333333333333} >>> t.internal_forces {'member_1': 6.66666666666666, 'member_2': 6.66666666666667, 'member_3': -6.66666666666667*sqrt(2), 'member_4': -3.33333333333333*sqrt(5), 'member_5': 10.0} " 2851,"def gf_edf_zassenhaus(f, n, p, K): factors = [f] if gf_degree(f) <= n: return factors N = gf_degree(f) // n if p != 2: b = gf_frobenius_monomial_base(f, p, K) t = [K.one, K.zero] while len(factors) < N: if p == 2: h = r = t for i in range(n - 1): r = gf_pow_mod(r, 2, f, p, K) h = gf_add(h, r, p, K) g = gf_gcd(f, h, p, K) t += [K.zero, K.zero] else: r = gf_random(2 * n - 1, p, K) h = _gf_pow_pnm1d2(r, n, f, b, p, K) g = gf_gcd(f, gf_sub_ground(h, K.one, p, K), p, K) if g != [K.one] and g != f: factors = gf_edf_zassenhaus(g, n, p, K) \ + gf_edf_zassenhaus(gf_quo(f, g, p, K), n, p, K) return _sort_factors(factors, multiple=False) "," Cantor-Zassenhaus: Probabilistic Equal Degree Factorization Given a monic square-free polynomial ``f`` in ``GF(p)[x]`` and an integer ``n``, such that ``n`` divides ``deg(f)``, returns all irreducible factors ``f_1,...,f_d`` of ``f``, each of degree ``n``. EDF procedure gives complete factorization over Galois fields. Consider the square-free polynomial ``f = x**3 + x**2 + x + 1`` in ``GF(5)[x]``. Let's compute its irreducible factors of degree one:: >>> from sympy.polys.domains import ZZ >>> from sympy.polys.galoistools import gf_edf_zassenhaus >>> gf_edf_zassenhaus([1,1,1,1], 1, 5, ZZ) [[1, 1], [1, 2], [1, 3]] References ========== .. [1] [Gathen99]_ .. [2] [Geddes92]_ .. [3] [Cohen93]_ ",96,127,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def gf_edf_zassenhaus(f, n, p, K): factors = [f] if gf_degree(f) <= n: return factors N = gf_degree(f) // n if p != 2: b = gf_frobenius_monomial_base(f, p, K) t = [K.one, K.zero] while len(factors) < N: if p == 2: h = r = t for i in range(n - 1): r = gf_pow_mod(r, 2, f, p, K) h = gf_add(h, r, p, K) g = gf_gcd(f, h, p, K) t += [K.zero, K.zero] else: r = gf_random(2 * n - 1, p, K) h = _gf_pow_pnm1d2(r, n, f, b, p, K) g = gf_gcd(f, gf_sub_ground(h, K.one, p, K), p, K) if g != [K.one] and g != f: factors = gf_edf_zassenhaus(g, n, p, K) \ + gf_edf_zassenhaus(gf_quo(f, g, p, K), n, p, K) return _sort_factors(factors, multiple=False) ``` ###Assistant : Cantor-Zassenhaus: Probabilistic Equal Degree Factorization Given a monic square-free polynomial ``f`` in ``GF(p)[x]`` and an integer ``n``, such that ``n`` divides ``deg(f)``, returns all irreducible factors ``f_1,...,f_d`` of ``f``, each of degree ``n``. EDF procedure gives complete factorization over Galois fields. Consider the square-free polynomial ``f = x**3 + x**2 + x + 1`` in ``GF(5)[x]``. Let's compute its irreducible factors of degree one:: >>> from sympy.polys.domains import ZZ >>> from sympy.polys.galoistools import gf_edf_zassenhaus >>> gf_edf_zassenhaus([1,1,1,1], 1, 5, ZZ) [[1, 1], [1, 2], [1, 3]] References ========== .. [1] [Gathen99]_ .. [2] [Geddes92]_ .. [3] [Cohen93]_ " 2852,"def _tracemin_fiedler(L, X, normalized, tol, method): import numpy as np import scipy as sp import scipy.linalg # call as sp.linalg import scipy.linalg.blas # call as sp.linalg.blas import scipy.sparse # call as sp.sparse n = X.shape[0] if normalized: # Form the normalized Laplacian matrix and determine the eigenvector of # its nullspace. e = np.sqrt(L.diagonal()) # TODO: rm csr_array wrapper when spdiags array creation becomes available D = sp.sparse.csr_array(sp.sparse.spdiags(1 / e, 0, n, n, format=""csr"")) L = D @ L @ D e *= 1.0 / np.linalg.norm(e, 2) if normalized: ","Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm. The Fiedler vector of a connected undirected graph is the eigenvector corresponding to the second smallest eigenvalue of the Laplacian matrix of the graph. This function starts with the Laplacian L, not the Graph. Parameters ---------- L : Laplacian of a possibly weighted or normalized, but undirected graph X : Initial guess for a solution. Usually a matrix of random numbers. This function allows more than one column in X to identify more than one eigenvector if desired. normalized : bool Whether the normalized Laplacian matrix is used. tol : float Tolerance of relative residual in eigenvalue computation. Warning: There is no limit on number of iterations. method : string Should be 'tracemin_pcg' or 'tracemin_lu'. Otherwise exception is raised. Returns ------- sigma, X : Two NumPy arrays of floats. The lowest eigenvalues and corresponding eigenvectors of L. The size of input X determines the size of these outputs. As this is for Fiedler vectors, the zero eigenvalue (and constant eigenvector) are avoided. ",172,89,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _tracemin_fiedler(L, X, normalized, tol, method): import numpy as np import scipy as sp import scipy.linalg # call as sp.linalg import scipy.linalg.blas # call as sp.linalg.blas import scipy.sparse # call as sp.sparse n = X.shape[0] if normalized: # Form the normalized Laplacian matrix and determine the eigenvector of # its nullspace. e = np.sqrt(L.diagonal()) # TODO: rm csr_array wrapper when spdiags array creation becomes available D = sp.sparse.csr_array(sp.sparse.spdiags(1 / e, 0, n, n, format=""csr"")) L = D @ L @ D e *= 1.0 / np.linalg.norm(e, 2) if normalized: ``` ###Assistant : Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm. The Fiedler vector of a connected undirected graph is the eigenvector corresponding to the second smallest eigenvalue of the Laplacian matrix of the graph. This function starts with the Laplacian L, not the Graph. Parameters ---------- L : Laplacian of a possibly weighted or normalized, but undirected graph X : Initial guess for a solution. Usually a matrix of random numbers. This function allows more than one column in X to identify more than one eigenvector if desired. normalized : bool Whether the normalized Laplacian matrix is used. tol : float Tolerance of relative residual in eigenvalue computation. Warning: There is no limit on number of iterations. method : string Should be 'tracemin_pcg' or 'tracemin_lu'. Otherwise exception is raised. Returns ------- sigma, X : Two NumPy arrays of floats. The lowest eigenvalues and corresponding eigenvectors of L. The size of input X determines the size of these outputs. As this is for Fiedler vectors, the zero eigenvalue (and constant eigenvector) are avoided. " 2853,"def test_complex_pipeline_with_shared_prompt_model_and_prompt_template_yaml(tmp_path): with open(tmp_path / ""tmp_config_with_prompt_template.yml"", ""w"") as tmp_file: tmp_file.write( f ) pipeline = Pipeline.load_from_yaml(path=tmp_path / ""tmp_config_with_prompt_template.yml"") result = pipeline.run(query=""not relevant"", documents=[Document(""Berlin is an amazing city."")]) assert ""Berlin"" in result[""results""][0] assert len(result[""meta""][""invocation_context""]) > 0 @pytest.mark.skipif( not os.environ.get(""OPENAI_API_KEY"", None), reason=""Please export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test."", )"," version: ignore components: - name: pmodel type: PromptModel params: model_name_or_path: google/flan-t5-small model_kwargs: torch_dtype: torch.bfloat16 - name: question_generation_template type: PromptTemplate params: name: question-generation-new prompt_text: ""Given the context please generate a question. Context: $documents; Question:"" - name: p1 params: model_name_or_path: pmodel default_prompt_template: question_generation_template output_variable: questions type: PromptNode - name: p2 params: model_name_or_path: pmodel default_prompt_template: question-answering type: PromptNode pipelines: - name: query nodes: - name: p1 inputs: - Query - name: p2 inputs: - p1 ",72,55,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_complex_pipeline_with_shared_prompt_model_and_prompt_template_yaml(tmp_path): with open(tmp_path / ""tmp_config_with_prompt_template.yml"", ""w"") as tmp_file: tmp_file.write( f ) pipeline = Pipeline.load_from_yaml(path=tmp_path / ""tmp_config_with_prompt_template.yml"") result = pipeline.run(query=""not relevant"", documents=[Document(""Berlin is an amazing city."")]) assert ""Berlin"" in result[""results""][0] assert len(result[""meta""][""invocation_context""]) > 0 @pytest.mark.skipif( not os.environ.get(""OPENAI_API_KEY"", None), reason=""Please export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test."", ) ``` ###Assistant : version: ignore components: - name: pmodel type: PromptModel params: model_name_or_path: google/flan-t5-small model_kwargs: torch_dtype: torch.bfloat16 - name: question_generation_template type: PromptTemplate params: name: question-generation-new prompt_text: ""Given the context please generate a question. Context: $documents; Question:"" - name: p1 params: model_name_or_path: pmodel default_prompt_template: question_generation_template output_variable: questions type: PromptNode - name: p2 params: model_name_or_path: pmodel default_prompt_template: question-answering type: PromptNode pipelines: - name: query nodes: - name: p1 inputs: - Query - name: p2 inputs: - p1 " 2854,"def _download_model(self): self.logger.info(""Downloading model: '%s' from: %s"", self._model_name, self._url_download) for attempt in range(self._retries): try: downloaded_size = self._url_partial_size req = urllib.request.Request(self._url_download) if downloaded_size != 0: req.add_header(""Range"", f""bytes={downloaded_size}-"") with urllib.request.urlopen(req, timeout=10) as response: self.logger.debug(""header info: {%s}"", response.info()) self.logger.debug(""Return Code: %s"", response.getcode()) self._write_zipfile(response, downloaded_size) break except (socket_error, socket_timeout, urllib.error.HTTPError, urllib.error.URLError) as err: if attempt + 1 < self._retries: self.logger.warning(""Error downloading model (%s). Retrying %s of %s..."", str(err), attempt + 2, self._retries) else: self.logger.error(""Failed to download model. Exiting. (Error: '%s', URL: "" ""'%s')"", str(err), self._url_download) self.logger.info(""You can try running again to resume the download."") self.logger.info(""Alternatively, you can manually download the model from: %s "" ""and unzip the contents to: %s"", self._url_download, self._cache_dir) sys.exit(1) ", Download the model zip from github to the cache folder. ,10,109,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _download_model(self): self.logger.info(""Downloading model: '%s' from: %s"", self._model_name, self._url_download) for attempt in range(self._retries): try: downloaded_size = self._url_partial_size req = urllib.request.Request(self._url_download) if downloaded_size != 0: req.add_header(""Range"", f""bytes={downloaded_size}-"") with urllib.request.urlopen(req, timeout=10) as response: self.logger.debug(""header info: {%s}"", response.info()) self.logger.debug(""Return Code: %s"", response.getcode()) self._write_zipfile(response, downloaded_size) break except (socket_error, socket_timeout, urllib.error.HTTPError, urllib.error.URLError) as err: if attempt + 1 < self._retries: self.logger.warning(""Error downloading model (%s). Retrying %s of %s..."", str(err), attempt + 2, self._retries) else: self.logger.error(""Failed to download model. Exiting. (Error: '%s', URL: "" ""'%s')"", str(err), self._url_download) self.logger.info(""You can try running again to resume the download."") self.logger.info(""Alternatively, you can manually download the model from: %s "" ""and unzip the contents to: %s"", self._url_download, self._cache_dir) sys.exit(1) ``` ###Assistant : Download the model zip from github to the cache folder. " 2855,"def fit_transform(self, X, y=None): self._validate_params() self._fit_transform(X) return self.embedding_ ","Compute the embedding vectors for data X and transform X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training set. y : Ignored Not used, present here for API consistency by convention. Returns ------- X_new : array-like, shape (n_samples, n_components) Returns the instance itself. ",45,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fit_transform(self, X, y=None): self._validate_params() self._fit_transform(X) return self.embedding_ ``` ###Assistant : Compute the embedding vectors for data X and transform X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training set. y : Ignored Not used, present here for API consistency by convention. Returns ------- X_new : array-like, shape (n_samples, n_components) Returns the instance itself. " 2856,"def test_song_from_data_dump(): # Loads from str song = Song.from_data_dump( ) assert song.name == ""Ropes"" assert song.artists == [""Dirty Palm"", ""Chandler Jewels""] assert song.album_name == ""Ropes"" assert song.album_artist == ""Dirty Palm"" assert song.genres == [""gaming edm"", ""melbourne bounce international""] assert song.disc_number == 1 assert song.duration == 188 assert song.year == 2021 assert song.date == ""2021-10-28"" assert song.track_number == 1 assert song.tracks_count == 1 assert song.isrc == ""GB2LD2110301"" assert song.song_id == ""1t2qKa8K72IBC8yQlhD9bU"" assert ( song.cover_url == ""https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332"" ) assert song.explicit == False assert song.download_url == None "," Tests if Song.from_data_dump() works correctly. { ""name"": ""Ropes"", ""artists"": [""Dirty Palm"", ""Chandler Jewels""], ""album_name"": ""Ropes"", ""album_artist"": ""Dirty Palm"", ""genres"": [""gaming edm"", ""melbourne bounce international""], ""disc_number"": 1, ""duration"": 188, ""year"": 2021, ""date"": ""2021-10-28"", ""track_number"": 1, ""tracks_count"": 1, ""isrc"": ""GB2LD2110301"", ""song_id"": ""1t2qKa8K72IBC8yQlhD9bU"", ""cover_url"": ""https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332"", ""explicit"": false, ""download_url"": null, ""artist"" : ""Dirty Palm"", ""disc_count"": 1, ""copyright"": """", ""publisher"": """", ""url"": ""https://open.spotify.com/track/1t2qKa8K72IBC8yQlhD9bU"" } ",59,84,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_song_from_data_dump(): # Loads from str song = Song.from_data_dump( ) assert song.name == ""Ropes"" assert song.artists == [""Dirty Palm"", ""Chandler Jewels""] assert song.album_name == ""Ropes"" assert song.album_artist == ""Dirty Palm"" assert song.genres == [""gaming edm"", ""melbourne bounce international""] assert song.disc_number == 1 assert song.duration == 188 assert song.year == 2021 assert song.date == ""2021-10-28"" assert song.track_number == 1 assert song.tracks_count == 1 assert song.isrc == ""GB2LD2110301"" assert song.song_id == ""1t2qKa8K72IBC8yQlhD9bU"" assert ( song.cover_url == ""https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332"" ) assert song.explicit == False assert song.download_url == None ``` ###Assistant : Tests if Song.from_data_dump() works correctly. { ""name"": ""Ropes"", ""artists"": [""Dirty Palm"", ""Chandler Jewels""], ""album_name"": ""Ropes"", ""album_artist"": ""Dirty Palm"", ""genres"": [""gaming edm"", ""melbourne bounce international""], ""disc_number"": 1, ""duration"": 188, ""year"": 2021, ""date"": ""2021-10-28"", ""track_number"": 1, ""tracks_count"": 1, ""isrc"": ""GB2LD2110301"", ""song_id"": ""1t2qKa8K72IBC8yQlhD9bU"", ""cover_url"": ""https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332"", ""explicit"": false, ""download_url"": null, ""artist"" : ""Dirty Palm"", ""disc_count"": 1, ""copyright"": """", ""publisher"": """", ""url"": ""https://open.spotify.com/track/1t2qKa8K72IBC8yQlhD9bU"" } " 2857,"def test_run(self) -> Tuple[Dict, Dict]: print("" | > Synthesizing test sentences."") test_audios = {} test_figures = {} test_sentences = self.config.test_sentences for idx, s_info in enumerate(test_sentences): try: aux_inputs = self.get_aux_input_from_test_sentences(s_info) wav, alignment, _, _ = synthesis( self, aux_inputs[""text""], self.config, ""cuda"" in str(next(self.parameters()).device), ap, speaker_id=aux_inputs[""speaker_id""], d_vector=aux_inputs[""d_vector""], style_wav=aux_inputs[""style_wav""], language_id=aux_inputs[""language_id""], language_name=aux_inputs[""language_name""], enable_eos_bos_chars=self.config.enable_eos_bos_chars, use_griffin_lim=True, do_trim_silence=False, ).values() test_audios[""{}-audio"".format(idx)] = wav test_figures[""{}-alignment"".format(idx)] = plot_alignment(alignment.T, output_fig=False) except: # pylint: disable=bare-except print("" !! Error creating Test Sentence -"", idx) return test_figures, test_audios ","Generic test run for `tts` models used by `Trainer`. You can override this for a different behaviour. Returns: Tuple[Dict, Dict]: Test figures and audios to be projected to Tensorboard. ",29,73,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_run(self) -> Tuple[Dict, Dict]: print("" | > Synthesizing test sentences."") test_audios = {} test_figures = {} test_sentences = self.config.test_sentences for idx, s_info in enumerate(test_sentences): try: aux_inputs = self.get_aux_input_from_test_sentences(s_info) wav, alignment, _, _ = synthesis( self, aux_inputs[""text""], self.config, ""cuda"" in str(next(self.parameters()).device), ap, speaker_id=aux_inputs[""speaker_id""], d_vector=aux_inputs[""d_vector""], style_wav=aux_inputs[""style_wav""], language_id=aux_inputs[""language_id""], language_name=aux_inputs[""language_name""], enable_eos_bos_chars=self.config.enable_eos_bos_chars, use_griffin_lim=True, do_trim_silence=False, ).values() test_audios[""{}-audio"".format(idx)] = wav test_figures[""{}-alignment"".format(idx)] = plot_alignment(alignment.T, output_fig=False) except: # pylint: disable=bare-except print("" !! Error creating Test Sentence -"", idx) return test_figures, test_audios ``` ###Assistant : Generic test run for `tts` models used by `Trainer`. You can override this for a different behaviour. Returns: Tuple[Dict, Dict]: Test figures and audios to be projected to Tensorboard. " 2858,"def read_dict(self, dictionary, source=''): elements_added = set() for section, keys in dictionary.items(): section = str(section) try: self.add_section(section) except (DuplicateSectionError, ValueError): if self._strict and section in elements_added: raise elements_added.add(section) for key, value in keys.items(): key = self.optionxform(str(key)) if value is not None: value = str(value) if self._strict and (section, key) in elements_added: raise DuplicateOptionError(section, key, source) elements_added.add((section, key)) self.set(section, key, value) ","Read configuration from a dictionary. Keys are section names, values are dictionaries with keys and values that should be present in the section. If the used dictionary type preserves order, sections and their keys will be added in order. All types held in the dictionary are converted to strings during reading, including section names, option names and keys. Optional second argument is the `source' specifying the name of the dictionary being read. ",72,60,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def read_dict(self, dictionary, source=''): elements_added = set() for section, keys in dictionary.items(): section = str(section) try: self.add_section(section) except (DuplicateSectionError, ValueError): if self._strict and section in elements_added: raise elements_added.add(section) for key, value in keys.items(): key = self.optionxform(str(key)) if value is not None: value = str(value) if self._strict and (section, key) in elements_added: raise DuplicateOptionError(section, key, source) elements_added.add((section, key)) self.set(section, key, value) ``` ###Assistant : Read configuration from a dictionary. Keys are section names, values are dictionaries with keys and values that should be present in the section. If the used dictionary type preserves order, sections and their keys will be added in order. All types held in the dictionary are converted to strings during reading, including section names, option names and keys. Optional second argument is the `source' specifying the name of the dictionary being read. " 2859,"def send(self, load, tries=3, timeout=60, raw=False): _try = 1 while True: try: if self.crypt == ""clear"": log.trace(""ReqChannel send clear load=%r"", load) ret = yield self._uncrypted_transfer(load, timeout=timeout) else: log.trace(""ReqChannel send crypt load=%r"", load) ret = yield self._crypted_transfer( load, timeout=timeout, raw=raw ) break except Exception as exc: log.error(""Failed to send msg %r"", dir(exc)) if _try == tries: raise #salt.exceptions.SaltClientError(""Connection to master lost"") else: _try += 1 continue raise salt.ext.tornado.gen.Return(ret) "," Send a request, return a future which will complete when we send the message :param dict load: A load to send across the wire :param int tries: The number of times to make before failure :param int timeout: The number of seconds on a response before failing ",47,67,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def send(self, load, tries=3, timeout=60, raw=False): _try = 1 while True: try: if self.crypt == ""clear"": log.trace(""ReqChannel send clear load=%r"", load) ret = yield self._uncrypted_transfer(load, timeout=timeout) else: log.trace(""ReqChannel send crypt load=%r"", load) ret = yield self._crypted_transfer( load, timeout=timeout, raw=raw ) break except Exception as exc: log.error(""Failed to send msg %r"", dir(exc)) if _try == tries: raise #salt.exceptions.SaltClientError(""Connection to master lost"") else: _try += 1 continue raise salt.ext.tornado.gen.Return(ret) ``` ###Assistant : Send a request, return a future which will complete when we send the message :param dict load: A load to send across the wire :param int tries: The number of times to make before failure :param int timeout: The number of seconds on a response before failing " 2860,"def get_fields_from_path(model, path): pieces = path.split(LOOKUP_SEP) fields = [] for piece in pieces: if fields: parent = get_model_from_relation(fields[-1]) else: parent = model fields.append(parent._meta.get_field(piece)) return fields ","Return list of Fields given path relative to model. e.g. (ModelX, ""user__groups__name"") -> [ , , , ] ",27,25,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_fields_from_path(model, path): pieces = path.split(LOOKUP_SEP) fields = [] for piece in pieces: if fields: parent = get_model_from_relation(fields[-1]) else: parent = model fields.append(parent._meta.get_field(piece)) return fields ``` ###Assistant : Return list of Fields given path relative to model. e.g. (ModelX, ""user__groups__name"") -> [ , , , ] " 2861,"def find_config_files(self): files = [] check_environ() # Where to look for the system-wide Distutils config file sys_dir = os.path.dirname(sys.modules['distutils'].__file__) # Look for the system config file sys_file = os.path.join(sys_dir, ""distutils.cfg"") if os.path.isfile(sys_file): files.append(sys_file) # What to call the per-user config file if os.name == 'posix': user_filename = "".pydistutils.cfg"" else: user_filename = ""pydistutils.cfg"" # And look for the user config file if self.want_user_cfg: user_file = os.path.join(os.path.expanduser('~'), user_filename) if os.path.isfile(user_file): files.append(user_file) # All platforms support local setup.cfg local_file = ""setup.cfg"" if os.path.isfile(local_file): files.append(local_file) if DEBUG: self.announce(""using config files: %s"" % ', '.join(files)) return files ","Find as many configuration files as should be processed for this platform, and return a list of filenames in the order in which they should be parsed. The filenames returned are guaranteed to exist (modulo nasty race conditions). There are three possible config files: distutils.cfg in the Distutils installation directory (ie. where the top-level Distutils __inst__.py file lives), a file in the user's home directory named .pydistutils.cfg on Unix and pydistutils.cfg on Windows/Mac; and setup.cfg in the current directory. The file in the user's home directory can be disabled with the --no-user-cfg option. ",93,92,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_config_files(self): files = [] check_environ() # Where to look for the system-wide Distutils config file sys_dir = os.path.dirname(sys.modules['distutils'].__file__) # Look for the system config file sys_file = os.path.join(sys_dir, ""distutils.cfg"") if os.path.isfile(sys_file): files.append(sys_file) # What to call the per-user config file if os.name == 'posix': user_filename = "".pydistutils.cfg"" else: user_filename = ""pydistutils.cfg"" # And look for the user config file if self.want_user_cfg: user_file = os.path.join(os.path.expanduser('~'), user_filename) if os.path.isfile(user_file): files.append(user_file) # All platforms support local setup.cfg local_file = ""setup.cfg"" if os.path.isfile(local_file): files.append(local_file) if DEBUG: self.announce(""using config files: %s"" % ', '.join(files)) return files ``` ###Assistant : Find as many configuration files as should be processed for this platform, and return a list of filenames in the order in which they should be parsed. The filenames returned are guaranteed to exist (modulo nasty race conditions). There are three possible config files: distutils.cfg in the Distutils installation directory (ie. where the top-level Distutils __inst__.py file lives), a file in the user's home directory named .pydistutils.cfg on Unix and pydistutils.cfg on Windows/Mac; and setup.cfg in the current directory. The file in the user's home directory can be disabled with the --no-user-cfg option. " 2862,"def get_save_image_name(org_im_path, output_dir, num): # name prefix of orginal image org_im_name = os.path.split(org_im_path)[-1] im_prefix = os.path.splitext(org_im_name)[0] ext = '.png' # save image path save_im_path = os.path.join(output_dir, im_prefix + ext) if os.path.exists(save_im_path): save_im_path = os.path.join( output_dir, im_prefix + str(num) + ext) return save_im_path "," Get save image name from source image path. ",8,42,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_save_image_name(org_im_path, output_dir, num): # name prefix of orginal image org_im_name = os.path.split(org_im_path)[-1] im_prefix = os.path.splitext(org_im_name)[0] ext = '.png' # save image path save_im_path = os.path.join(output_dir, im_prefix + ext) if os.path.exists(save_im_path): save_im_path = os.path.join( output_dir, im_prefix + str(num) + ext) return save_im_path ``` ###Assistant : Get save image name from source image path. " 2863,"def rgb_to_hsv(arr): arr = np.asarray(arr) # check length of the last dimension, should be _some_ sort of rgb if arr.shape[-1] != 3: raise ValueError(""Last dimension of input array must be 3; "" ""shape {} was found."".format(arr.shape)) in_shape = arr.shape arr = np.array( arr, copy=False, dtype=np.promote_types(arr.dtype, np.float32), # Don't work on ints. ndmin=2, # In case input was 1D. ) out = np.zeros_like(arr) arr_max = arr.max(-1) ipos = arr_max > 0 delta = arr.ptp(-1) s = np.zeros_like(delta) s[ipos] = delta[ipos] / arr_max[ipos] ipos = delta > 0 # red is max idx = (arr[..., 0] == arr_max) & ipos out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx] # green is max idx = (arr[..., 1] == arr_max) & ipos out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx] # blue is max idx = (arr[..., 2] == arr_max) & ipos out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx] out[..., 0] = (out[..., 0] / 6.0) % 1.0 out[..., 1] = s out[..., 2] = arr_max return out.reshape(in_shape) "," Convert float RGB values (in the range [0, 1]), in a numpy array to HSV values. Parameters ---------- arr : (..., 3) array-like All values must be in the range [0, 1] Returns ------- (..., 3) ndarray Colors converted to HSV values in range [0, 1] ",46,175,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rgb_to_hsv(arr): arr = np.asarray(arr) # check length of the last dimension, should be _some_ sort of rgb if arr.shape[-1] != 3: raise ValueError(""Last dimension of input array must be 3; "" ""shape {} was found."".format(arr.shape)) in_shape = arr.shape arr = np.array( arr, copy=False, dtype=np.promote_types(arr.dtype, np.float32), # Don't work on ints. ndmin=2, # In case input was 1D. ) out = np.zeros_like(arr) arr_max = arr.max(-1) ipos = arr_max > 0 delta = arr.ptp(-1) s = np.zeros_like(delta) s[ipos] = delta[ipos] / arr_max[ipos] ipos = delta > 0 # red is max idx = (arr[..., 0] == arr_max) & ipos out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx] # green is max idx = (arr[..., 1] == arr_max) & ipos out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx] # blue is max idx = (arr[..., 2] == arr_max) & ipos out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx] out[..., 0] = (out[..., 0] / 6.0) % 1.0 out[..., 1] = s out[..., 2] = arr_max return out.reshape(in_shape) ``` ###Assistant : Convert float RGB values (in the range [0, 1]), in a numpy array to HSV values. Parameters ---------- arr : (..., 3) array-like All values must be in the range [0, 1] Returns ------- (..., 3) ndarray Colors converted to HSV values in range [0, 1] " 2864,"def test_scheduler_verify_pool_full(self, dag_maker, configs): with conf_vars(configs): with dag_maker(dag_id='test_scheduler_verify_pool_full'): BashOperator( task_id='dummy', pool='test_scheduler_verify_pool_full', bash_command='echo hi', ) session = settings.Session() pool = Pool(pool='test_scheduler_verify_pool_full', slots=1) session.add(pool) session.flush() self.scheduler_job = SchedulerJob(executor=self.null_exec) self.scheduler_job.processor_agent = mock.MagicMock() # Create 2 dagruns, which will create 2 task instances. dr = dag_maker.create_dagrun( run_type=DagRunType.SCHEDULED, ) self.scheduler_job._schedule_dag_run(dr, session) dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.RUNNING) self.scheduler_job._schedule_dag_run(dr, session) session.flush() task_instances_list = self.scheduler_job._executable_task_instances_to_queued( max_tis=32, session=session ) assert len(task_instances_list) == 1 "," Test task instances not queued when pool is full ",9,64,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_scheduler_verify_pool_full(self, dag_maker, configs): with conf_vars(configs): with dag_maker(dag_id='test_scheduler_verify_pool_full'): BashOperator( task_id='dummy', pool='test_scheduler_verify_pool_full', bash_command='echo hi', ) session = settings.Session() pool = Pool(pool='test_scheduler_verify_pool_full', slots=1) session.add(pool) session.flush() self.scheduler_job = SchedulerJob(executor=self.null_exec) self.scheduler_job.processor_agent = mock.MagicMock() # Create 2 dagruns, which will create 2 task instances. dr = dag_maker.create_dagrun( run_type=DagRunType.SCHEDULED, ) self.scheduler_job._schedule_dag_run(dr, session) dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.RUNNING) self.scheduler_job._schedule_dag_run(dr, session) session.flush() task_instances_list = self.scheduler_job._executable_task_instances_to_queued( max_tis=32, session=session ) assert len(task_instances_list) == 1 ``` ###Assistant : Test task instances not queued when pool is full " 2865,"def ms_word(): # TODO : Take location input from the user try: speak(""Enter the document's location - "") location = input(""Enter the document's location - "") file_loc = doubleslash(location) doc = docx.Document(file_loc) fullText = [] for para in doc.paragraphs: fullText.append(para.text) #print(fullText) doc_file = '\n'.join(fullText) print(doc_file) speak(doc_file) except Exception as exp: #print(exp) print(f""ERROR - {exp}"") print(Fore.YELLOW + ""I could'nt locate the file!\nIf you didn't specify the extension of the file, please specify it."") return ""None"" ","[Print and speak out a ms_word docx file as specified in the path] ",13,74,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ms_word(): # TODO : Take location input from the user try: speak(""Enter the document's location - "") location = input(""Enter the document's location - "") file_loc = doubleslash(location) doc = docx.Document(file_loc) fullText = [] for para in doc.paragraphs: fullText.append(para.text) #print(fullText) doc_file = '\n'.join(fullText) print(doc_file) speak(doc_file) except Exception as exp: #print(exp) print(f""ERROR - {exp}"") print(Fore.YELLOW + ""I could'nt locate the file!\nIf you didn't specify the extension of the file, please specify it."") return ""None"" ``` ###Assistant : [Print and speak out a ms_word docx file as specified in the path] " 2866,"def tree_data(G, root, attrs=None, ident=""id"", children=""children""): if G.number_of_nodes() != G.number_of_edges() + 1: raise TypeError(""G is not a tree."") if not G.is_directed(): raise TypeError(""G is not directed."") if not nx.is_weakly_connected(G): raise TypeError(""G is not weakly connected."") # NOTE: to be removed in 3.0 if attrs is not None: import warnings msg = ( ""\nThe `attrs` keyword argument of tree_data is deprecated\n"" ""and will be removed in networkx 3.0.\n"" ""It is replaced with explicit `ident` and `children` "" ""keyword arguments.\n"" ""To make this warning go away and ensure usage is forward\n"" ""compatible, replace `attrs` with `ident` and `children,\n"" ""for example:\n\n"" "" >>> tree_data(G, root, attrs={'id': 'foo', 'children': 'bar'})\n\n"" ""should instead be written as\n\n"" "" >>> tree_data(G, root, ident='foo', children='bar')\n\n"" ""The default values of 'id' and 'children' will not change."" ) warnings.warn(msg, DeprecationWarning, stacklevel=2) ident = attrs[""id""] children = attrs[""children""] if ident == children: raise nx.NetworkXError(""The values for `id` and `children` must be different."") ","Returns data in tree format that is suitable for JSON serialization and use in Javascript documents. Parameters ---------- G : NetworkX graph G must be an oriented tree root : node The root of the tree attrs : dict A dictionary that contains two keys 'id' and 'children'. The corresponding values provide the attribute names for storing NetworkX-internal graph data. The values should be unique. Default value: :samp:`dict(id='id', children='children')`. If some user-defined graph data use these attribute names as data keys, they may be silently dropped. .. deprecated:: 2.6 The `attrs` keyword argument is replaced by `ident` and `children` and will be removed in networkx 3.0 ident : string Attribute name for storing NetworkX-internal graph data. `ident` must have a different value than `children`. The default is 'id'. children : string Attribute name for storing NetworkX-internal graph data. `children` must have a different value than `ident`. The default is 'children'. Returns ------- data : dict A dictionary with node-link formatted data. Raises ------ NetworkXError If `children` and `ident` attributes are identical. Examples -------- >>> from networkx.readwrite import json_graph >>> G = nx.DiGraph([(1, 2)]) >>> data = json_graph.tree_data(G, root=1) To serialize with json >>> import json >>> s = json.dumps(data) Notes ----- Node attributes are stored in this format but keys for attributes must be strings if you want to serialize with JSON. Graph and edge attributes are not stored. See Also -------- tree_graph, node_link_data, adjacency_data ",235,151,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def tree_data(G, root, attrs=None, ident=""id"", children=""children""): if G.number_of_nodes() != G.number_of_edges() + 1: raise TypeError(""G is not a tree."") if not G.is_directed(): raise TypeError(""G is not directed."") if not nx.is_weakly_connected(G): raise TypeError(""G is not weakly connected."") # NOTE: to be removed in 3.0 if attrs is not None: import warnings msg = ( ""\nThe `attrs` keyword argument of tree_data is deprecated\n"" ""and will be removed in networkx 3.0.\n"" ""It is replaced with explicit `ident` and `children` "" ""keyword arguments.\n"" ""To make this warning go away and ensure usage is forward\n"" ""compatible, replace `attrs` with `ident` and `children,\n"" ""for example:\n\n"" "" >>> tree_data(G, root, attrs={'id': 'foo', 'children': 'bar'})\n\n"" ""should instead be written as\n\n"" "" >>> tree_data(G, root, ident='foo', children='bar')\n\n"" ""The default values of 'id' and 'children' will not change."" ) warnings.warn(msg, DeprecationWarning, stacklevel=2) ident = attrs[""id""] children = attrs[""children""] if ident == children: raise nx.NetworkXError(""The values for `id` and `children` must be different."") ``` ###Assistant : Returns data in tree format that is suitable for JSON serialization and use in Javascript documents. Parameters ---------- G : NetworkX graph G must be an oriented tree root : node The root of the tree attrs : dict A dictionary that contains two keys 'id' and 'children'. The corresponding values provide the attribute names for storing NetworkX-internal graph data. The values should be unique. Default value: :samp:`dict(id='id', children='children')`. If some user-defined graph data use these attribute names as data keys, they may be silently dropped. .. deprecated:: 2.6 The `attrs` keyword argument is replaced by `ident` and `children` and will be removed in networkx 3.0 ident : string Attribute name for storing NetworkX-internal graph data. `ident` must have a different value than `children`. The default is 'id'. children : string Attribute name for storing NetworkX-internal graph data. `children` must have a different value than `ident`. The default is 'children'. Returns ------- data : dict A dictionary with node-link formatted data. Raises ------ NetworkXError If `children` and `ident` attributes are identical. Examples -------- >>> from networkx.readwrite import json_graph >>> G = nx.DiGraph([(1, 2)]) >>> data = json_graph.tree_data(G, root=1) To serialize with json >>> import json >>> s = json.dumps(data) Notes ----- Node attributes are stored in this format but keys for attributes must be strings if you want to serialize with JSON. Graph and edge attributes are not stored. See Also -------- tree_graph, node_link_data, adjacency_data " 2867,"def test_mutating_input_arrays_y_and_z(fig_test, fig_ref): ax1 = fig_test.add_subplot(111, projection='3d') x = [1, 2, 3] y = [0.0, 0.0, 0.0] z = [0.0, 0.0, 0.0] ax1.plot(x, y, z, 'o-') ax1.set_ylim([0, 4]) ax1.set_zlim([0, 4]) fig_test.draw_without_rendering() # mutate y,z to get a nontrivial line y[:] = [1, 2, 3] z[:] = [1, 2, 3] # draw the same plot without mutating x and y ax2 = fig_ref.add_subplot(111, projection='3d') x = [1, 2, 3] y = [0.0, 0.0, 0.0] z = [0.0, 0.0, 0.0] ax2.plot(x, y, z, 'o-') ax2.set_ylim([0, 4]) ax2.set_zlim([0, 4]) fig_test.draw_without_rendering() "," Test to see if the `z` axis does not get mutated after a call to `Axes3D.plot` test cases came from GH#8990 ",21,87,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_mutating_input_arrays_y_and_z(fig_test, fig_ref): ax1 = fig_test.add_subplot(111, projection='3d') x = [1, 2, 3] y = [0.0, 0.0, 0.0] z = [0.0, 0.0, 0.0] ax1.plot(x, y, z, 'o-') ax1.set_ylim([0, 4]) ax1.set_zlim([0, 4]) fig_test.draw_without_rendering() # mutate y,z to get a nontrivial line y[:] = [1, 2, 3] z[:] = [1, 2, 3] # draw the same plot without mutating x and y ax2 = fig_ref.add_subplot(111, projection='3d') x = [1, 2, 3] y = [0.0, 0.0, 0.0] z = [0.0, 0.0, 0.0] ax2.plot(x, y, z, 'o-') ax2.set_ylim([0, 4]) ax2.set_zlim([0, 4]) fig_test.draw_without_rendering() ``` ###Assistant : Test to see if the `z` axis does not get mutated after a call to `Axes3D.plot` test cases came from GH#8990 " 2868,"def test_search_with_hyphen(self): book = models.Book.objects.create( title=""Harry Potter and the Half-Blood Prince"", publication_date=date(2009, 7, 15), number_of_pages=607, ) index = self.backend.get_index_for_model(models.Book) index.add_item(book) index.refresh() results = self.backend.search(""Half-Blood"", models.Book) self.assertUnsortedListEqual( [r.title for r in results], [ ""Harry Potter and the Half-Blood Prince"", ], ) "," This tests that punctuation characters are treated the same way in both indexing and querying. See: https://github.com/wagtail/wagtail/issues/937 ",17,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_search_with_hyphen(self): book = models.Book.objects.create( title=""Harry Potter and the Half-Blood Prince"", publication_date=date(2009, 7, 15), number_of_pages=607, ) index = self.backend.get_index_for_model(models.Book) index.add_item(book) index.refresh() results = self.backend.search(""Half-Blood"", models.Book) self.assertUnsortedListEqual( [r.title for r in results], [ ""Harry Potter and the Half-Blood Prince"", ], ) ``` ###Assistant : This tests that punctuation characters are treated the same way in both indexing and querying. See: https://github.com/wagtail/wagtail/issues/937 " 2869,"def resolve(self, s): name = s.split('.') used = name.pop(0) try: found = self.importer(used) for frag in name: used += '.' + frag try: found = getattr(found, frag) except AttributeError: self.importer(used) found = getattr(found, frag) return found except ImportError: e, tb = sys.exc_info()[1:] v = ValueError('Cannot resolve %r: %s' % (s, e)) v.__cause__, v.__traceback__ = e, tb raise v "," Resolve strings to objects using standard import and attribute syntax. ",10,58,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def resolve(self, s): name = s.split('.') used = name.pop(0) try: found = self.importer(used) for frag in name: used += '.' + frag try: found = getattr(found, frag) except AttributeError: self.importer(used) found = getattr(found, frag) return found except ImportError: e, tb = sys.exc_info()[1:] v = ValueError('Cannot resolve %r: %s' % (s, e)) v.__cause__, v.__traceback__ = e, tb raise v ``` ###Assistant : Resolve strings to objects using standard import and attribute syntax. " 2870,"def heldout_score(clf, X_test, y_test): score = np.zeros((n_estimators,), dtype=np.float64) for i, y_pred in enumerate(clf.staged_decision_function(X_test)): score[i] = binomial_deviance(y_test, y_pred.ravel()) return score ",compute deviance scores on ``X_test`` and ``y_test``.,7,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def heldout_score(clf, X_test, y_test): score = np.zeros((n_estimators,), dtype=np.float64) for i, y_pred in enumerate(clf.staged_decision_function(X_test)): score[i] = binomial_deviance(y_test, y_pred.ravel()) return score ``` ###Assistant : compute deviance scores on ``X_test`` and ``y_test``. " 2871,"def test_product_types_query_ids_not_exists(user_api_client, category): query = NOT_EXISTS_IDS_COLLECTIONS_QUERY variables = {""filter"": {""ids"": [""fTEJRuFHU6fd2RU="", ""2XwnQNNhwCdEjhP=""]}} response = user_api_client.post_graphql(query, variables) content = get_graphql_content(response, ignore_errors=True) message_error = '{""ids"": [{""message"": ""Invalid ID specified."", ""code"": """"}]}' assert len(content[""errors""]) == 1 assert content[""errors""][0][""message""] == message_error assert content[""data""][""productTypes""] is None QUERY_FILTER_PRODUCT_TYPES = @pytest.mark.parametrize( ""search, expected_names"", ( ("""", [""The best juices"", ""The best beers"", ""The worst beers""]), (""best"", [""The best juices"", ""The best beers""]), (""worst"", [""The worst beers""]), (""average"", []), ), )"," query($filters: ProductTypeFilterInput) { productTypes(first: 10, filter: $filters) { edges { node { name } } } } ",17,72,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_product_types_query_ids_not_exists(user_api_client, category): query = NOT_EXISTS_IDS_COLLECTIONS_QUERY variables = {""filter"": {""ids"": [""fTEJRuFHU6fd2RU="", ""2XwnQNNhwCdEjhP=""]}} response = user_api_client.post_graphql(query, variables) content = get_graphql_content(response, ignore_errors=True) message_error = '{""ids"": [{""message"": ""Invalid ID specified."", ""code"": """"}]}' assert len(content[""errors""]) == 1 assert content[""errors""][0][""message""] == message_error assert content[""data""][""productTypes""] is None QUERY_FILTER_PRODUCT_TYPES = @pytest.mark.parametrize( ""search, expected_names"", ( ("""", [""The best juices"", ""The best beers"", ""The worst beers""]), (""best"", [""The best juices"", ""The best beers""]), (""worst"", [""The worst beers""]), (""average"", []), ), ) ``` ###Assistant : query($filters: ProductTypeFilterInput) { productTypes(first: 10, filter: $filters) { edges { node { name } } } } " 2872,"def _generate_examples(self, filepath): for idx, line in enumerate(open(filepath, ""r"", encoding=""utf-8"")): if idx == 0: continue # skip header line = line.strip() split_line = line.split(""\t"") # Examples not marked with a three out of five consensus are marked with # ""-"" and should not be used in standard evaluations. if split_line[0] == ""-"": continue # Works for both splits even though dev has some extra human labels. yield idx, { ""premise"": split_line[5], ""hypothesis"": split_line[6], ""label"": split_line[0], ""binary_parse_premise"": split_line[1], ""binary_parse_hypothesis"": split_line[2], ""parse_premise"": split_line[3], ""parse_hypothesis"": split_line[4], ""heuristic"": split_line[8], ""subcase"": split_line[9], ""template"": split_line[10], } ","Generate hans examples. Args: filepath: a string Yields: dictionaries containing ""premise"", ""hypothesis"" and ""label"" strings ",15,90,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _generate_examples(self, filepath): for idx, line in enumerate(open(filepath, ""r"", encoding=""utf-8"")): if idx == 0: continue # skip header line = line.strip() split_line = line.split(""\t"") # Examples not marked with a three out of five consensus are marked with # ""-"" and should not be used in standard evaluations. if split_line[0] == ""-"": continue # Works for both splits even though dev has some extra human labels. yield idx, { ""premise"": split_line[5], ""hypothesis"": split_line[6], ""label"": split_line[0], ""binary_parse_premise"": split_line[1], ""binary_parse_hypothesis"": split_line[2], ""parse_premise"": split_line[3], ""parse_hypothesis"": split_line[4], ""heuristic"": split_line[8], ""subcase"": split_line[9], ""template"": split_line[10], } ``` ###Assistant : Generate hans examples. Args: filepath: a string Yields: dictionaries containing ""premise"", ""hypothesis"" and ""label"" strings " 2873,"def _light_internal_color_mode(self) -> str: if (color_mode := self.color_mode) is None: # Backwards compatibility for color_mode added in 2021.4 # Add warning in 2021.6, remove in 2021.10 supported = self._light_internal_supported_color_modes if ColorMode.HS in supported and self.hs_color is not None: return ColorMode.HS if ColorMode.COLOR_TEMP in supported and self.color_temp_kelvin is not None: return ColorMode.COLOR_TEMP if ColorMode.BRIGHTNESS in supported and self.brightness is not None: return ColorMode.BRIGHTNESS if ColorMode.ONOFF in supported: return ColorMode.ONOFF return ColorMode.UNKNOWN return color_mode ",Return the color mode of the light with backwards compatibility.,10,72,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _light_internal_color_mode(self) -> str: if (color_mode := self.color_mode) is None: # Backwards compatibility for color_mode added in 2021.4 # Add warning in 2021.6, remove in 2021.10 supported = self._light_internal_supported_color_modes if ColorMode.HS in supported and self.hs_color is not None: return ColorMode.HS if ColorMode.COLOR_TEMP in supported and self.color_temp_kelvin is not None: return ColorMode.COLOR_TEMP if ColorMode.BRIGHTNESS in supported and self.brightness is not None: return ColorMode.BRIGHTNESS if ColorMode.ONOFF in supported: return ColorMode.ONOFF return ColorMode.UNKNOWN return color_mode ``` ###Assistant : Return the color mode of the light with backwards compatibility. " 2874,"def kde(self, bw_method=None, ind=None, **kwargs): return self(kind=""kde"", bw_method=bw_method, ind=ind, **kwargs) density = kde "," Generate Kernel Density Estimate plot using Gaussian kernels. In statistics, `kernel density estimation`_ (KDE) is a non-parametric way to estimate the probability density function (PDF) of a random variable. This function uses Gaussian kernels and includes automatic bandwidth determination. .. _kernel density estimation: https://en.wikipedia.org/wiki/Kernel_density_estimation Parameters ---------- bw_method : str, scalar or callable, optional The method used to calculate the estimator bandwidth. This can be 'scott', 'silverman', a scalar constant or a callable. If None (default), 'scott' is used. See :class:`scipy.stats.gaussian_kde` for more information. ind : NumPy array or int, optional Evaluation points for the estimated PDF. If None (default), 1000 equally spaced points are used. If `ind` is a NumPy array, the KDE is evaluated at the points passed. If `ind` is an integer, `ind` number of equally spaced points are used. **kwargs Additional keyword arguments are documented in :meth:`DataFrame.plot`. Returns ------- matplotlib.axes.Axes or numpy.ndarray of them See Also -------- scipy.stats.gaussian_kde : Representation of a kernel-density estimate using Gaussian kernels. This is the function used internally to estimate the PDF. Examples -------- Given a Series of points randomly sampled from an unknown distribution, estimate its PDF using KDE with automatic bandwidth determination and plot the results, evaluating them at 1000 equally spaced points (default): .. plot:: :context: close-figs >>> s = pd.Series([1, 2, 2.5, 3, 3.5, 4, 5]) >>> ax = s.plot.kde() A scalar bandwidth can be specified. Using a small bandwidth value can lead to over-fitting, while using a large bandwidth value may result in under-fitting: .. plot:: :context: close-figs >>> ax = s.plot.kde(bw_method=0.3) .. plot:: :context: close-figs >>> ax = s.plot.kde(bw_method=3) Finally, the `ind` parameter determines the evaluation points for the plot of the estimated PDF: .. plot:: :context: close-figs >>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5]) For DataFrame, it works in the same way: .. plot:: :context: close-figs >>> df = pd.DataFrame({ ... 'x': [1, 2, 2.5, 3, 3.5, 4, 5], ... 'y': [4, 4, 4.5, 5, 5.5, 6, 6], ... }) >>> ax = df.plot.kde() A scalar bandwidth can be specified. Using a small bandwidth value can lead to over-fitting, while using a large bandwidth value may result in under-fitting: .. plot:: :context: close-figs >>> ax = df.plot.kde(bw_method=0.3) .. plot:: :context: close-figs >>> ax = df.plot.kde(bw_method=3) Finally, the `ind` parameter determines the evaluation points for the plot of the estimated PDF: .. plot:: :context: close-figs >>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6]) ",399,13,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def kde(self, bw_method=None, ind=None, **kwargs): return self(kind=""kde"", bw_method=bw_method, ind=ind, **kwargs) density = kde ``` ###Assistant : Generate Kernel Density Estimate plot using Gaussian kernels. In statistics, `kernel density estimation`_ (KDE) is a non-parametric way to estimate the probability density function (PDF) of a random variable. This function uses Gaussian kernels and includes automatic bandwidth determination. .. _kernel density estimation: https://en.wikipedia.org/wiki/Kernel_density_estimation Parameters ---------- bw_method : str, scalar or callable, optional The method used to calculate the estimator bandwidth. This can be 'scott', 'silverman', a scalar constant or a callable. If None (default), 'scott' is used. See :class:`scipy.stats.gaussian_kde` for more information. ind : NumPy array or int, optional Evaluation points for the estimated PDF. If None (default), 1000 equally spaced points are used. If `ind` is a NumPy array, the KDE is evaluated at the points passed. If `ind` is an integer, `ind` number of equally spaced points are used. **kwargs Additional keyword arguments are documented in :meth:`DataFrame.plot`. Returns ------- matplotlib.axes.Axes or numpy.ndarray of them See Also -------- scipy.stats.gaussian_kde : Representation of a kernel-density estimate using Gaussian kernels. This is the function used internally to estimate the PDF. Examples -------- Given a Series of points randomly sampled from an unknown distribution, estimate its PDF using KDE with automatic bandwidth determination and plot the results, evaluating them at 1000 equally spaced points (default): .. plot:: :context: close-figs >>> s = pd.Series([1, 2, 2.5, 3, 3.5, 4, 5]) >>> ax = s.plot.kde() A scalar bandwidth can be specified. Using a small bandwidth value can lead to over-fitting, while using a large bandwidth value may result in under-fitting: .. plot:: :context: close-figs >>> ax = s.plot.kde(bw_method=0.3) .. plot:: :context: close-figs >>> ax = s.plot.kde(bw_method=3) Finally, the `ind` parameter determines the evaluation points for the plot of the estimated PDF: .. plot:: :context: close-figs >>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5]) For DataFrame, it works in the same way: .. plot:: :context: close-figs >>> df = pd.DataFrame({ ... 'x': [1, 2, 2.5, 3, 3.5, 4, 5], ... 'y': [4, 4, 4.5, 5, 5.5, 6, 6], ... }) >>> ax = df.plot.kde() A scalar bandwidth can be specified. Using a small bandwidth value can lead to over-fitting, while using a large bandwidth value may result in under-fitting: .. plot:: :context: close-figs >>> ax = df.plot.kde(bw_method=0.3) .. plot:: :context: close-figs >>> ax = df.plot.kde(bw_method=3) Finally, the `ind` parameter determines the evaluation points for the plot of the estimated PDF: .. plot:: :context: close-figs >>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6]) " 2875,"def subscription_invoice_requested_webhook(subscription_webhook): return subscription_webhook( INVOICE_REQUESTED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.INVOICE_REQUESTED ) INVOICE_DELETED_SUBSCRIPTION_QUERY = @pytest.fixture"," subscription{ event{ ...on InvoiceDeleted{ invoice{ id } } } } ",10,10,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def subscription_invoice_requested_webhook(subscription_webhook): return subscription_webhook( INVOICE_REQUESTED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.INVOICE_REQUESTED ) INVOICE_DELETED_SUBSCRIPTION_QUERY = @pytest.fixture ``` ###Assistant : subscription{ event{ ...on InvoiceDeleted{ invoice{ id } } } } " 2876,"def test_prompt_from_examples_valid() -> None: template = input_variables = [""question""] example_separator = ""\n\n"" prefix = suffix = examples = [ , , ] prompt_from_examples = PromptTemplate.from_examples( examples, suffix, input_variables, example_separator=example_separator, prefix=prefix, ) prompt_from_template = PromptTemplate( input_variables=input_variables, template=template ) assert prompt_from_examples.template == prompt_from_template.template assert prompt_from_examples.input_variables == prompt_from_template.input_variables ","Test prompt can be successfully constructed from examples.Test Prompt: Question: who are you? Answer: foo Question: what are you? Answer: bar Question: {question} Answer:Test Prompt:Question: {question}\nAnswer:Question: who are you?\nAnswer: fooQuestion: what are you?\nAnswer: bar",34,45,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_prompt_from_examples_valid() -> None: template = input_variables = [""question""] example_separator = ""\n\n"" prefix = suffix = examples = [ , , ] prompt_from_examples = PromptTemplate.from_examples( examples, suffix, input_variables, example_separator=example_separator, prefix=prefix, ) prompt_from_template = PromptTemplate( input_variables=input_variables, template=template ) assert prompt_from_examples.template == prompt_from_template.template assert prompt_from_examples.input_variables == prompt_from_template.input_variables ``` ###Assistant : Test prompt can be successfully constructed from examples.Test Prompt: Question: who are you? Answer: foo Question: what are you? Answer: bar Question: {question} Answer:Test Prompt:Question: {question}\nAnswer:Question: who are you?\nAnswer: fooQuestion: what are you?\nAnswer: bar " 2877,"def compatible_platforms(provided, required): if provided is None or required is None or provided == required: # easy case return True # Mac OS X special cases reqMac = macosVersionString.match(required) if reqMac: provMac = macosVersionString.match(provided) # is this a Mac package? if not provMac: # this is backwards compatibility for packages built before # setuptools 0.6. All packages built after this point will # use the new macosx designation. provDarwin = darwinVersionString.match(provided) if provDarwin: dversion = int(provDarwin.group(1)) macosversion = ""%s.%s"" % (reqMac.group(1), reqMac.group(2)) if dversion == 7 and macosversion >= ""10.3"" or \ dversion == 8 and macosversion >= ""10.4"": return True # egg isn't macosx or legacy darwin return False # are they the same major version and machine type? if provMac.group(1) != reqMac.group(1) or \ provMac.group(3) != reqMac.group(3): return False # is the required OS major update >= the provided one? if int(provMac.group(2)) > int(reqMac.group(2)): return False return True # XXX Linux and other platforms' special cases should go here return False ","Can code for the `provided` platform run on the `required` platform? Returns true if either platform is ``None``, or the platforms are equal. XXX Needs compatibility checks for Linux and other unixy OSes. ",33,163,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def compatible_platforms(provided, required): if provided is None or required is None or provided == required: # easy case return True # Mac OS X special cases reqMac = macosVersionString.match(required) if reqMac: provMac = macosVersionString.match(provided) # is this a Mac package? if not provMac: # this is backwards compatibility for packages built before # setuptools 0.6. All packages built after this point will # use the new macosx designation. provDarwin = darwinVersionString.match(provided) if provDarwin: dversion = int(provDarwin.group(1)) macosversion = ""%s.%s"" % (reqMac.group(1), reqMac.group(2)) if dversion == 7 and macosversion >= ""10.3"" or \ dversion == 8 and macosversion >= ""10.4"": return True # egg isn't macosx or legacy darwin return False # are they the same major version and machine type? if provMac.group(1) != reqMac.group(1) or \ provMac.group(3) != reqMac.group(3): return False # is the required OS major update >= the provided one? if int(provMac.group(2)) > int(reqMac.group(2)): return False return True # XXX Linux and other platforms' special cases should go here return False ``` ###Assistant : Can code for the `provided` platform run on the `required` platform? Returns true if either platform is ``None``, or the platforms are equal. XXX Needs compatibility checks for Linux and other unixy OSes. " 2878,"def find_negative_cycle(G, source, weight=""weight""): weight = _weight_function(G, weight) pred = {source: []} v = _inner_bellman_ford(G, [source], weight, pred=pred) if v is None: raise nx.NetworkXError(""No negative cycles detected."") # negative cycle detected... find it neg_cycle = [] stack = [(v, list(pred[v]))] seen = {v} while stack: node, preds = stack[-1] if v in preds: # found the cycle neg_cycle.extend([node, v]) neg_cycle = list(reversed(neg_cycle)) return neg_cycle if preds: nbr = preds.pop() if nbr not in seen: stack.append((nbr, list(pred[nbr]))) neg_cycle.append(node) seen.add(nbr) else: stack.pop() if neg_cycle: neg_cycle.pop() else: if v in G[v] and weight(G, v, v) < 0: return [v, v] # should not reach here raise nx.NetworkXError(""Negative cycle is detected but not found"") # should not get here... msg = ""negative cycle detected but not identified"" raise nx.NetworkXUnbounded(msg) ","Returns a cycle with negative total weight if it exists. Bellman-Ford is used to find shortest_paths. That algorithm stops if there exists a negative cycle. This algorithm picks up from there and returns the found negative cycle. The cycle consists of a list of nodes in the cycle order. The last node equals the first to make it a cycle. You can look up the edge weights in the original graph. In the case of multigraphs the relevant edge is the minimal weight edge between the nodes in the 2-tuple. If the graph has no negative cycle, a NetworkXError is raised. Parameters ---------- G : NetworkX graph source: node label The search for the negative cycle will start from this node. weight : string or function If this is a string, then edge weights will be accessed via the edge attribute with this key (that is, the weight of the edge joining `u` to `v` will be ``G.edges[u, v][weight]``). If no such edge attribute exists, the weight of the edge is assumed to be one. If this is a function, the weight of an edge is the value returned by the function. The function must accept exactly three positional arguments: the two endpoints of an edge and the dictionary of edge attributes for that edge. The function must return a number. Examples -------- >>> G = nx.DiGraph() >>> G.add_weighted_edges_from([(0, 1, 2), (1, 2, 2), (2, 0, 1), (1, 4, 2), (4, 0, -5)]) >>> nx.find_negative_cycle(G, 0) [4, 0, 1, 4] Returns ------- cycle : list A list of nodes in the order of the cycle found. The last node equals the first to indicate a cycle. Raises ------ NetworkXError If no negative cycle is found. ",285,125,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_negative_cycle(G, source, weight=""weight""): weight = _weight_function(G, weight) pred = {source: []} v = _inner_bellman_ford(G, [source], weight, pred=pred) if v is None: raise nx.NetworkXError(""No negative cycles detected."") # negative cycle detected... find it neg_cycle = [] stack = [(v, list(pred[v]))] seen = {v} while stack: node, preds = stack[-1] if v in preds: # found the cycle neg_cycle.extend([node, v]) neg_cycle = list(reversed(neg_cycle)) return neg_cycle if preds: nbr = preds.pop() if nbr not in seen: stack.append((nbr, list(pred[nbr]))) neg_cycle.append(node) seen.add(nbr) else: stack.pop() if neg_cycle: neg_cycle.pop() else: if v in G[v] and weight(G, v, v) < 0: return [v, v] # should not reach here raise nx.NetworkXError(""Negative cycle is detected but not found"") # should not get here... msg = ""negative cycle detected but not identified"" raise nx.NetworkXUnbounded(msg) ``` ###Assistant : Returns a cycle with negative total weight if it exists. Bellman-Ford is used to find shortest_paths. That algorithm stops if there exists a negative cycle. This algorithm picks up from there and returns the found negative cycle. The cycle consists of a list of nodes in the cycle order. The last node equals the first to make it a cycle. You can look up the edge weights in the original graph. In the case of multigraphs the relevant edge is the minimal weight edge between the nodes in the 2-tuple. If the graph has no negative cycle, a NetworkXError is raised. Parameters ---------- G : NetworkX graph source: node label The search for the negative cycle will start from this node. weight : string or function If this is a string, then edge weights will be accessed via the edge attribute with this key (that is, the weight of the edge joining `u` to `v` will be ``G.edges[u, v][weight]``). If no such edge attribute exists, the weight of the edge is assumed to be one. If this is a function, the weight of an edge is the value returned by the function. The function must accept exactly three positional arguments: the two endpoints of an edge and the dictionary of edge attributes for that edge. The function must return a number. Examples -------- >>> G = nx.DiGraph() >>> G.add_weighted_edges_from([(0, 1, 2), (1, 2, 2), (2, 0, 1), (1, 4, 2), (4, 0, -5)]) >>> nx.find_negative_cycle(G, 0) [4, 0, 1, 4] Returns ------- cycle : list A list of nodes in the order of the cycle found. The last node equals the first to indicate a cycle. Raises ------ NetworkXError If no negative cycle is found. " 2879,"def expand(image, border=0, fill=0): left, top, right, bottom = _border(border) width = left + image.size[0] + right height = top + image.size[1] + bottom color = _color(fill, image.mode) if image.mode == ""P"" and image.palette: palette = ImagePalette.ImagePalette(palette=image.getpalette()) if isinstance(color, tuple): color = palette.getcolor(color) else: palette = None out = Image.new(image.mode, (width, height), color) if palette: out.putpalette(palette.palette) out.paste(image, (left, top)) return out "," Add border to the image :param image: The image to expand. :param border: Border width, in pixels. :param fill: Pixel fill value (a color value). Default is 0 (black). :return: An image. ",32,61,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def expand(image, border=0, fill=0): left, top, right, bottom = _border(border) width = left + image.size[0] + right height = top + image.size[1] + bottom color = _color(fill, image.mode) if image.mode == ""P"" and image.palette: palette = ImagePalette.ImagePalette(palette=image.getpalette()) if isinstance(color, tuple): color = palette.getcolor(color) else: palette = None out = Image.new(image.mode, (width, height), color) if palette: out.putpalette(palette.palette) out.paste(image, (left, top)) return out ``` ###Assistant : Add border to the image :param image: The image to expand. :param border: Border width, in pixels. :param fill: Pixel fill value (a color value). Default is 0 (black). :return: An image. " 2880,"def test_digest_enabled(self, digests, mock_func): backend = RedisBackend() digests.digest = backend.digest digests.enabled.return_value = True rule = Rule.objects.create(project=self.project, label=""my rule"") ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True) event = self.store_event( data={""message"": ""Hello world"", ""level"": ""error""}, project_id=self.project.id ) key = f""mail:p:{self.project.id}"" backend.add(key, event_to_record(event, [rule]), increment_delay=0, maximum_delay=0) with self.tasks(): deliver_digest(key) attachment, text = get_attachment() assert attachment[""title""] == ""Hello world"" assert attachment[""text""] == """" "," Test that with digests enabled, but Slack notification settings (and not email settings), we send a Slack notification ",18,54,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_digest_enabled(self, digests, mock_func): backend = RedisBackend() digests.digest = backend.digest digests.enabled.return_value = True rule = Rule.objects.create(project=self.project, label=""my rule"") ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True) event = self.store_event( data={""message"": ""Hello world"", ""level"": ""error""}, project_id=self.project.id ) key = f""mail:p:{self.project.id}"" backend.add(key, event_to_record(event, [rule]), increment_delay=0, maximum_delay=0) with self.tasks(): deliver_digest(key) attachment, text = get_attachment() assert attachment[""title""] == ""Hello world"" assert attachment[""text""] == """" ``` ###Assistant : Test that with digests enabled, but Slack notification settings (and not email settings), we send a Slack notification " 2881,"def pole_zero_numerical_data(system): _check_system(system) system = system.doit() # Get the equivalent TransferFunction object. num_poly = Poly(system.num, system.var).all_coeffs() den_poly = Poly(system.den, system.var).all_coeffs() num_poly = np.array(num_poly, dtype=np.complex128) den_poly = np.array(den_poly, dtype=np.complex128) zeros = np.roots(num_poly) poles = np.roots(den_poly) return zeros, poles "," Returns the numerical data of poles and zeros of the system. It is internally used by ``pole_zero_plot`` to get the data for plotting poles and zeros. Users can use this data to further analyse the dynamics of the system or plot using a different backend/plotting-module. Parameters ========== system : SISOLinearTimeInvariant The system for which the pole-zero data is to be computed. Returns ======= tuple : (zeros, poles) zeros = Zeros of the system. NumPy array of complex numbers. poles = Poles of the system. NumPy array of complex numbers. Raises ====== NotImplementedError When a SISO LTI system is not passed. When time delay terms are present in the system. ValueError When more than one free symbol is present in the system. The only variable in the transfer function should be the variable of the Laplace transform. Examples ======== >>> from sympy.abc import s >>> from sympy.physics.control.lti import TransferFunction >>> from sympy.physics.control.control_plots import pole_zero_numerical_data >>> tf1 = TransferFunction(s**2 + 1, s**4 + 4*s**3 + 6*s**2 + 5*s + 2, s) >>> pole_zero_numerical_data(tf1) # doctest: +SKIP ([-0.+1.j 0.-1.j], [-2. +0.j -0.5+0.8660254j -0.5-0.8660254j -1. +0.j ]) See Also ======== pole_zero_plot ",187,37,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def pole_zero_numerical_data(system): _check_system(system) system = system.doit() # Get the equivalent TransferFunction object. num_poly = Poly(system.num, system.var).all_coeffs() den_poly = Poly(system.den, system.var).all_coeffs() num_poly = np.array(num_poly, dtype=np.complex128) den_poly = np.array(den_poly, dtype=np.complex128) zeros = np.roots(num_poly) poles = np.roots(den_poly) return zeros, poles ``` ###Assistant : Returns the numerical data of poles and zeros of the system. It is internally used by ``pole_zero_plot`` to get the data for plotting poles and zeros. Users can use this data to further analyse the dynamics of the system or plot using a different backend/plotting-module. Parameters ========== system : SISOLinearTimeInvariant The system for which the pole-zero data is to be computed. Returns ======= tuple : (zeros, poles) zeros = Zeros of the system. NumPy array of complex numbers. poles = Poles of the system. NumPy array of complex numbers. Raises ====== NotImplementedError When a SISO LTI system is not passed. When time delay terms are present in the system. ValueError When more than one free symbol is present in the system. The only variable in the transfer function should be the variable of the Laplace transform. Examples ======== >>> from sympy.abc import s >>> from sympy.physics.control.lti import TransferFunction >>> from sympy.physics.control.control_plots import pole_zero_numerical_data >>> tf1 = TransferFunction(s**2 + 1, s**4 + 4*s**3 + 6*s**2 + 5*s + 2, s) >>> pole_zero_numerical_data(tf1) # doctest: +SKIP ([-0.+1.j 0.-1.j], [-2. +0.j -0.5+0.8660254j -0.5-0.8660254j -1. +0.j ]) See Also ======== pole_zero_plot " 2882,"def get_versions(): commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version'] return tuple([_find_exe_version(cmd) for cmd in commands]) "," Try to find out the versions of gcc, ld and dllwrap. If not possible it returns None for it. ",19,16,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_versions(): commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version'] return tuple([_find_exe_version(cmd) for cmd in commands]) ``` ###Assistant : Try to find out the versions of gcc, ld and dllwrap. If not possible it returns None for it. " 2883,"def __virtual__(): if salt.utils.napalm.virtual(__opts__, __virtualname__, __file__): return __virtualname__ else: err_msg = ""NAPALM is not installed."" log.error(""Unable to load %s beacon: %s"", __virtualname__, err_msg) return False, err_msg "," This beacon can only work when running under a regular or a proxy minion, managed through napalm. ",17,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __virtual__(): if salt.utils.napalm.virtual(__opts__, __virtualname__, __file__): return __virtualname__ else: err_msg = ""NAPALM is not installed."" log.error(""Unable to load %s beacon: %s"", __virtualname__, err_msg) return False, err_msg ``` ###Assistant : This beacon can only work when running under a regular or a proxy minion, managed through napalm. " 2884,"def parse_prompt_attention(text): res = [] round_brackets = [] square_brackets = [] round_bracket_multiplier = 1.1 square_bracket_multiplier = 1 / 1.1 "," Parses a string with attention tokens and returns a list of pairs: text and its assoicated weight. Accepted tokens are: (abc) - increases attention to abc by a multiplier of 1.1 (abc:3.12) - increases attention to abc by a multiplier of 3.12 [abc] - decreases attention to abc by a multiplier of 1.1 \( - literal character '(' \[ - literal character '[' \) - literal character ')' \] - literal character ']' \\ - literal character '\' anything else - just text >>> parse_prompt_attention('normal text') [['normal text', 1.0]] >>> parse_prompt_attention('an (important) word') [['an ', 1.0], ['important', 1.1], [' word', 1.0]] >>> parse_prompt_attention('(unbalanced') [['unbalanced', 1.1]] >>> parse_prompt_attention('\(literal\]') [['(literal]', 1.0]] >>> parse_prompt_attention('(unnecessary)(parens)') [['unnecessaryparens', 1.1]] >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') [['a ', 1.0], ['house', 1.5730000000000004], [' ', 1.1], ['on', 1.0], [' a ', 1.1], ['hill', 0.55], [', sun, ', 1.1], ['sky', 1.4641000000000006], ['.', 1.1]] ",145,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parse_prompt_attention(text): res = [] round_brackets = [] square_brackets = [] round_bracket_multiplier = 1.1 square_bracket_multiplier = 1 / 1.1 ``` ###Assistant : Parses a string with attention tokens and returns a list of pairs: text and its assoicated weight. Accepted tokens are: (abc) - increases attention to abc by a multiplier of 1.1 (abc:3.12) - increases attention to abc by a multiplier of 3.12 [abc] - decreases attention to abc by a multiplier of 1.1 \( - literal character '(' \[ - literal character '[' \) - literal character ')' \] - literal character ']' \\ - literal character '\' anything else - just text >>> parse_prompt_attention('normal text') [['normal text', 1.0]] >>> parse_prompt_attention('an (important) word') [['an ', 1.0], ['important', 1.1], [' word', 1.0]] >>> parse_prompt_attention('(unbalanced') [['unbalanced', 1.1]] >>> parse_prompt_attention('\(literal\]') [['(literal]', 1.0]] >>> parse_prompt_attention('(unnecessary)(parens)') [['unnecessaryparens', 1.1]] >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') [['a ', 1.0], ['house', 1.5730000000000004], [' ', 1.1], ['on', 1.0], [' a ', 1.1], ['hill', 0.55], [', sun, ', 1.1], ['sky', 1.4641000000000006], ['.', 1.1]] " 2885,"def get_dependencies(self, candidate): # type: (Candidate) -> list[Candidate] r # FIXME: If there's several galaxy servers set, there may be a # FIXME: situation when the metadata of the same collection # FIXME: differs. So how do we resolve this case? Priority? # FIXME: Taking into account a pinned hash? Exploding on # FIXME: any differences? # NOTE: The underlying implmentation currently uses first found req_map = self._api_proxy.get_collection_dependencies(candidate) # NOTE: This guard expression MUST perform an early exit only # NOTE: after the `get_collection_dependencies()` call because # NOTE: internally it polulates the artifact URL of the candidate, # NOTE: its SHA hash and the Galaxy API token. These are still # NOTE: necessary with `--no-deps` because even with the disabled # NOTE: dependency resolution the outer layer will still need to # NOTE: know how to download and validate the artifact. # # NOTE: Virtual candidates should always return dependencies # NOTE: because they are ephemeral and non-installable. if not self._with_deps and not candidate.is_virtual: return [] return [ self._make_req_from_dict({'name': dep_name, 'version': dep_req}) for dep_name, dep_req in req_map.items() ] ","Get direct dependencies of a candidate. :returns: A collection of requirements that `candidate` \ specifies as its dependencies. ",18,178,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_dependencies(self, candidate): # type: (Candidate) -> list[Candidate] r # FIXME: If there's several galaxy servers set, there may be a # FIXME: situation when the metadata of the same collection # FIXME: differs. So how do we resolve this case? Priority? # FIXME: Taking into account a pinned hash? Exploding on # FIXME: any differences? # NOTE: The underlying implmentation currently uses first found req_map = self._api_proxy.get_collection_dependencies(candidate) # NOTE: This guard expression MUST perform an early exit only # NOTE: after the `get_collection_dependencies()` call because # NOTE: internally it polulates the artifact URL of the candidate, # NOTE: its SHA hash and the Galaxy API token. These are still # NOTE: necessary with `--no-deps` because even with the disabled # NOTE: dependency resolution the outer layer will still need to # NOTE: know how to download and validate the artifact. # # NOTE: Virtual candidates should always return dependencies # NOTE: because they are ephemeral and non-installable. if not self._with_deps and not candidate.is_virtual: return [] return [ self._make_req_from_dict({'name': dep_name, 'version': dep_req}) for dep_name, dep_req in req_map.items() ] ``` ###Assistant : Get direct dependencies of a candidate. :returns: A collection of requirements that `candidate` \ specifies as its dependencies. " 2886,"def test_set_displayname(self) -> None: # Modify user channel = self.make_request( ""PUT"", self.url_other_user, access_token=self.admin_user_tok, content={""displayname"": ""foobar""}, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual(""@user:test"", channel.json_body[""name""]) self.assertEqual(""foobar"", channel.json_body[""displayname""]) # Get user channel = self.make_request( ""GET"", self.url_other_user, access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual(""@user:test"", channel.json_body[""name""]) self.assertEqual(""foobar"", channel.json_body[""displayname""]) "," Test setting the displayname of another user. ",7,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_set_displayname(self) -> None: # Modify user channel = self.make_request( ""PUT"", self.url_other_user, access_token=self.admin_user_tok, content={""displayname"": ""foobar""}, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual(""@user:test"", channel.json_body[""name""]) self.assertEqual(""foobar"", channel.json_body[""displayname""]) # Get user channel = self.make_request( ""GET"", self.url_other_user, access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual(""@user:test"", channel.json_body[""name""]) self.assertEqual(""foobar"", channel.json_body[""displayname""]) ``` ###Assistant : Test setting the displayname of another user. " 2887,"def on_ssl_errors(self, reply, qt_errors): errors = certificateerror.CertificateErrorWrapper(reply, qt_errors) log.network.debug(""Certificate errors: {!r}"".format(errors)) try: host_tpl: Optional[urlutils.HostTupleType] = urlutils.host_tuple( reply.url()) except ValueError: host_tpl = None is_accepted = False is_rejected = False else: assert host_tpl is not None is_accepted = errors in self._accepted_ssl_errors[host_tpl] is_rejected = errors in self._rejected_ssl_errors[host_tpl] log.network.debug(""Already accepted: {} / "" ""rejected {}"".format(is_accepted, is_rejected)) if is_rejected: return elif is_accepted: reply.ignoreSslErrors() return abort_on = self._get_abort_signals(reply) tab = self._get_tab() first_party_url = QUrl() if tab is None else tab.data.last_navigation.url shared.handle_certificate_error( request_url=reply.url(), first_party_url=first_party_url, error=errors, abort_on=abort_on, ) if errors.certificate_was_accepted(): if host_tpl is not None: self._accepted_ssl_errors[host_tpl].add(errors) elif host_tpl is not None: self._rejected_ssl_errors[host_tpl].add(errors) ","Decide if SSL errors should be ignored or not. This slot is called on SSL/TLS errors by the self.sslErrors signal. Args: reply: The QNetworkReply that is encountering the errors. qt_errors: A list of errors. ",34,94,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def on_ssl_errors(self, reply, qt_errors): errors = certificateerror.CertificateErrorWrapper(reply, qt_errors) log.network.debug(""Certificate errors: {!r}"".format(errors)) try: host_tpl: Optional[urlutils.HostTupleType] = urlutils.host_tuple( reply.url()) except ValueError: host_tpl = None is_accepted = False is_rejected = False else: assert host_tpl is not None is_accepted = errors in self._accepted_ssl_errors[host_tpl] is_rejected = errors in self._rejected_ssl_errors[host_tpl] log.network.debug(""Already accepted: {} / "" ""rejected {}"".format(is_accepted, is_rejected)) if is_rejected: return elif is_accepted: reply.ignoreSslErrors() return abort_on = self._get_abort_signals(reply) tab = self._get_tab() first_party_url = QUrl() if tab is None else tab.data.last_navigation.url shared.handle_certificate_error( request_url=reply.url(), first_party_url=first_party_url, error=errors, abort_on=abort_on, ) if errors.certificate_was_accepted(): if host_tpl is not None: self._accepted_ssl_errors[host_tpl].add(errors) elif host_tpl is not None: self._rejected_ssl_errors[host_tpl].add(errors) ``` ###Assistant : Decide if SSL errors should be ignored or not. This slot is called on SSL/TLS errors by the self.sslErrors signal. Args: reply: The QNetworkReply that is encountering the errors. qt_errors: A list of errors. " 2888,"def render_pep440(pieces): if pieces[""closest-tag""]: rendered = pieces[""closest-tag""] if pieces[""distance""] or pieces[""dirty""]: rendered += plus_or_dot(pieces) rendered += f""{pieces['distance']}.g{pieces['short']}"" if pieces[""dirty""]: rendered += "".dirty"" else: # exception #1 rendered = f""0+untagged.{pieces['distance']}.g{pieces['short']}"" if pieces[""dirty""]: rendered += "".dirty"" return rendered ","Build up version string, with post-release ""local version identifier"". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] ",37,36,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def render_pep440(pieces): if pieces[""closest-tag""]: rendered = pieces[""closest-tag""] if pieces[""distance""] or pieces[""dirty""]: rendered += plus_or_dot(pieces) rendered += f""{pieces['distance']}.g{pieces['short']}"" if pieces[""dirty""]: rendered += "".dirty"" else: # exception #1 rendered = f""0+untagged.{pieces['distance']}.g{pieces['short']}"" if pieces[""dirty""]: rendered += "".dirty"" return rendered ``` ###Assistant : Build up version string, with post-release ""local version identifier"". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] " 2889,"async def test_aset_many(self): self.assertEqual(await cache.aset_many({""a"": 1, ""b"": 2}), []) self.assertEqual( await cache.aset_many({""a"": 1, ""b"": 2}, timeout=2, version=""1""), [], ) ",aset_many() does nothing for the dummy cache backend.,8,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_aset_many(self): self.assertEqual(await cache.aset_many({""a"": 1, ""b"": 2}), []) self.assertEqual( await cache.aset_many({""a"": 1, ""b"": 2}, timeout=2, version=""1""), [], ) ``` ###Assistant : aset_many() does nothing for the dummy cache backend. " 2890,"def _user_input(self, input_str): os.environ[""COMP_WORDS""] = input_str idx = len(input_str.split("" "")) - 1 # Index of the last word comp_cword = idx + 1 if input_str.endswith("" "") else idx os.environ[""COMP_CWORD""] = str(comp_cword) sys.argv = input_str.split() "," Set the environment and the list of command line arguments. This sets the bash variables $COMP_WORDS and $COMP_CWORD. The former is an array consisting of the individual words in the current command line, the latter is the index of the current cursor position, so in case a word is completed and the cursor is placed after a whitespace, $COMP_CWORD must be incremented by 1: * 'django-admin start' -> COMP_CWORD=1 * 'django-admin startproject' -> COMP_CWORD=1 * 'django-admin startproject ' -> COMP_CWORD=2 ",80,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _user_input(self, input_str): os.environ[""COMP_WORDS""] = input_str idx = len(input_str.split("" "")) - 1 # Index of the last word comp_cword = idx + 1 if input_str.endswith("" "") else idx os.environ[""COMP_CWORD""] = str(comp_cword) sys.argv = input_str.split() ``` ###Assistant : Set the environment and the list of command line arguments. This sets the bash variables $COMP_WORDS and $COMP_CWORD. The former is an array consisting of the individual words in the current command line, the latter is the index of the current cursor position, so in case a word is completed and the cursor is placed after a whitespace, $COMP_CWORD must be incremented by 1: * 'django-admin start' -> COMP_CWORD=1 * 'django-admin startproject' -> COMP_CWORD=1 * 'django-admin startproject ' -> COMP_CWORD=2 " 2891,"def _check_conflicts() -> None: # Node-related conflicts # When using the Node server, we must always connect to 8501 (this is # hard-coded in JS). Otherwise, the browser would decide what port to # connect to based on window.location.port, which in dev is going to # be (3000) # Import logger locally to prevent circular references from streamlit.logger import get_logger LOGGER = get_logger(__name__) if get_option(""global.developmentMode""): assert _is_unset( ""server.port"" ), ""server.port does not work when global.developmentMode is true."" assert _is_unset(""browser.serverPort""), ( ""browser.serverPort does not work when global.developmentMode is "" ""true."" ) # XSRF conflicts if get_option(""server.enableXsrfProtection""): if not get_option(""server.enableCORS"") or get_option(""global.developmentMode""): LOGGER.warning( ) "," Warning: the config option 'server.enableCORS=false' is not compatible with 'server.enableXsrfProtection=true'. As a result, 'server.enableCORS' is being overridden to 'true'. More information: In order to protect against CSRF attacks, we send a cookie with each request. To do so, we must specify allowable origins, which places a restriction on cross-origin resource sharing. If cross origin resource sharing is required, please disable server.enableXsrfProtection. ",61,102,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_conflicts() -> None: # Node-related conflicts # When using the Node server, we must always connect to 8501 (this is # hard-coded in JS). Otherwise, the browser would decide what port to # connect to based on window.location.port, which in dev is going to # be (3000) # Import logger locally to prevent circular references from streamlit.logger import get_logger LOGGER = get_logger(__name__) if get_option(""global.developmentMode""): assert _is_unset( ""server.port"" ), ""server.port does not work when global.developmentMode is true."" assert _is_unset(""browser.serverPort""), ( ""browser.serverPort does not work when global.developmentMode is "" ""true."" ) # XSRF conflicts if get_option(""server.enableXsrfProtection""): if not get_option(""server.enableCORS"") or get_option(""global.developmentMode""): LOGGER.warning( ) ``` ###Assistant : Warning: the config option 'server.enableCORS=false' is not compatible with 'server.enableXsrfProtection=true'. As a result, 'server.enableCORS' is being overridden to 'true'. More information: In order to protect against CSRF attacks, we send a cookie with each request. To do so, we must specify allowable origins, which places a restriction on cross-origin resource sharing. If cross origin resource sharing is required, please disable server.enableXsrfProtection. " 2892,"def num_columns(self) -> Dict[str, int]: self._check_values_type() return {k: dataset.num_columns for k, dataset in self.items()} ","Number of columns in each split of the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset(""rotten_tomatoes"") >>> ds.num_columns {'test': 2, 'train': 2, 'validation': 2} ``` ",29,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def num_columns(self) -> Dict[str, int]: self._check_values_type() return {k: dataset.num_columns for k, dataset in self.items()} ``` ###Assistant : Number of columns in each split of the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset(""rotten_tomatoes"") >>> ds.num_columns {'test': 2, 'train': 2, 'validation': 2} ``` " 2893,"def _sample_with_replacement_map_partitions(population, k): stream = iter(population) e = next(stream) reservoir, stream_length = [e for _ in range(k)], 1 w = [rnd.random() for _ in range(k)] nxt = [_geometric(wi) for wi in w] min_nxt = min(nxt) for i, e in enumerate(stream, 1): if i == min_nxt: for j, n in enumerate(nxt): if n == min_nxt: reservoir[j] = e w[j] *= rnd.random() nxt[j] += _geometric(w[j]) min_nxt = min(nxt) stream_length += 1 return reservoir, stream_length "," Reservoir sampling with replacement, the main idea is to use k reservoirs of size 1 See Section Applications in http://utopia.duth.gr/~pefraimi/research/data/2007EncOfAlg.pdf ",20,72,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _sample_with_replacement_map_partitions(population, k): stream = iter(population) e = next(stream) reservoir, stream_length = [e for _ in range(k)], 1 w = [rnd.random() for _ in range(k)] nxt = [_geometric(wi) for wi in w] min_nxt = min(nxt) for i, e in enumerate(stream, 1): if i == min_nxt: for j, n in enumerate(nxt): if n == min_nxt: reservoir[j] = e w[j] *= rnd.random() nxt[j] += _geometric(w[j]) min_nxt = min(nxt) stream_length += 1 return reservoir, stream_length ``` ###Assistant : Reservoir sampling with replacement, the main idea is to use k reservoirs of size 1 See Section Applications in http://utopia.duth.gr/~pefraimi/research/data/2007EncOfAlg.pdf " 2894,"def __setattr__(cls, name, value): member_map = cls.__dict__.get('_member_map_', {}) if name in member_map: raise AttributeError('cannot reassign member %r' % (name, )) super().__setattr__(name, value) "," Block attempts to reassign Enum members. A simple assignment to the class namespace only changes one of the several possible ways to get an Enum member from the Enum class, resulting in an inconsistent Enumeration. ",35,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __setattr__(cls, name, value): member_map = cls.__dict__.get('_member_map_', {}) if name in member_map: raise AttributeError('cannot reassign member %r' % (name, )) super().__setattr__(name, value) ``` ###Assistant : Block attempts to reassign Enum members. A simple assignment to the class namespace only changes one of the several possible ways to get an Enum member from the Enum class, resulting in an inconsistent Enumeration. " 2895,"def read_file(self, f, source=None): if source is None: try: source = f.name except AttributeError: source = '' self._read(f, source) ","Like read() but the argument must be a file-like object. The `f' argument must be iterable, returning one line at a time. Optional second argument is the `source' specifying the name of the file being read. If not given, it is taken from f.name. If `f' has no `name' attribute, `' is used. ",53,19,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def read_file(self, f, source=None): if source is None: try: source = f.name except AttributeError: source = '' self._read(f, source) ``` ###Assistant : Like read() but the argument must be a file-like object. The `f' argument must be iterable, returning one line at a time. Optional second argument is the `source' specifying the name of the file being read. If not given, it is taken from f.name. If `f' has no `name' attribute, `' is used. " 2896,"def get_newsletters() -> pd.DataFrame: urls = [ ""https://defiweekly.substack.com/archive"", ""https://newsletter.thedefiant.io/archive"", ""https://thedailygwei.substack.com/archive"", ""https://todayindefi.substack.com/archive"", ""https://newsletter.banklesshq.com/archive"", ""https://defislate.substack.com/archive"", ] threads = len(urls) newsletters = [] with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor: for newsletter in executor.map(scrape_substack, urls): try: newsletters.append(pd.DataFrame(newsletter)) except KeyError as e: console.print(e, ""\n"") continue df = pd.concat(newsletters, ignore_index=True) df.columns = [""Title"", ""Link"", ""Date""] df[""Title""] = df[""Title""].apply(lambda x: """".join(i for i in x if ord(i) < 128)) df[""Date""] = df[""Date""].apply( lambda x: parser.parse(x).strftime(""%Y-%m-%d %H:%M:%S"") ) df[""Title""] = df[""Title""].apply( lambda x: ""\n"".join(textwrap.wrap(x, width=50)) if isinstance(x, str) else x ) return ( df[[""Title"", ""Date"", ""Link""]] .sort_values(by=""Date"", ascending=False) .reset_index(drop=""index"") ) ","Scrape all substack newsletters from url list. [Source: substack.com] Returns ------- pd.DataFrame DataFrame with recent news from most popular DeFi related newsletters. ",22,90,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_newsletters() -> pd.DataFrame: urls = [ ""https://defiweekly.substack.com/archive"", ""https://newsletter.thedefiant.io/archive"", ""https://thedailygwei.substack.com/archive"", ""https://todayindefi.substack.com/archive"", ""https://newsletter.banklesshq.com/archive"", ""https://defislate.substack.com/archive"", ] threads = len(urls) newsletters = [] with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor: for newsletter in executor.map(scrape_substack, urls): try: newsletters.append(pd.DataFrame(newsletter)) except KeyError as e: console.print(e, ""\n"") continue df = pd.concat(newsletters, ignore_index=True) df.columns = [""Title"", ""Link"", ""Date""] df[""Title""] = df[""Title""].apply(lambda x: """".join(i for i in x if ord(i) < 128)) df[""Date""] = df[""Date""].apply( lambda x: parser.parse(x).strftime(""%Y-%m-%d %H:%M:%S"") ) df[""Title""] = df[""Title""].apply( lambda x: ""\n"".join(textwrap.wrap(x, width=50)) if isinstance(x, str) else x ) return ( df[[""Title"", ""Date"", ""Link""]] .sort_values(by=""Date"", ascending=False) .reset_index(drop=""index"") ) ``` ###Assistant : Scrape all substack newsletters from url list. [Source: substack.com] Returns ------- pd.DataFrame DataFrame with recent news from most popular DeFi related newsletters. " 2897,"def _object2proto(self) -> GetEnumAttributeAction_PB: return GetEnumAttributeAction_PB( path=self.path, id_at_location=serialize(self.id_at_location), address=serialize(self.address), msg_id=serialize(self.id), ) ","Returns a protobuf serialization of self. As a requirement of all objects which inherit from Serializable, this method transforms the current object into the corresponding Protobuf object so that it can be further serialized. :return: returns a protobuf object :rtype: GetOrSetPropertyAction_PB .. note:: This method is purely an internal method. Please use serialize(object) or one of the other public serialization methods if you wish to serialize an object. ",68,11,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _object2proto(self) -> GetEnumAttributeAction_PB: return GetEnumAttributeAction_PB( path=self.path, id_at_location=serialize(self.id_at_location), address=serialize(self.address), msg_id=serialize(self.id), ) ``` ###Assistant : Returns a protobuf serialization of self. As a requirement of all objects which inherit from Serializable, this method transforms the current object into the corresponding Protobuf object so that it can be further serialized. :return: returns a protobuf object :rtype: GetOrSetPropertyAction_PB .. note:: This method is purely an internal method. Please use serialize(object) or one of the other public serialization methods if you wish to serialize an object. " 2898,"async def test_becomes_delivered_not_shown_notification(hass): package = Package( tracking_number=""456"", destination_country=206, friendly_name=""friendly name 1"", info_text=""info text 1"", location=""location 1"", timestamp=""2020-08-10 10:32"", origin_country=206, package_type=2, ) ProfileMock.package_list = [package] await _setup_seventeentrack(hass, VALID_CONFIG_FULL_NO_DELIVERED) assert hass.states.get(""sensor.seventeentrack_package_456"") is not None assert len(hass.states.async_entity_ids()) == 1 package_delivered = Package( tracking_number=""456"", destination_country=206, friendly_name=""friendly name 1"", info_text=""info text 1"", location=""location 1"", timestamp=""2020-08-10 10:32"", origin_country=206, package_type=2, status=40, ) ProfileMock.package_list = [package_delivered] with patch( ""homeassistant.components.seventeentrack.sensor.persistent_notification"" ) as persistent_notification_mock: await _goto_future(hass) persistent_notification_mock.create.assert_called() assert not hass.states.async_entity_ids() ",Ensure notification is triggered when package becomes delivered.,8,70,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_becomes_delivered_not_shown_notification(hass): package = Package( tracking_number=""456"", destination_country=206, friendly_name=""friendly name 1"", info_text=""info text 1"", location=""location 1"", timestamp=""2020-08-10 10:32"", origin_country=206, package_type=2, ) ProfileMock.package_list = [package] await _setup_seventeentrack(hass, VALID_CONFIG_FULL_NO_DELIVERED) assert hass.states.get(""sensor.seventeentrack_package_456"") is not None assert len(hass.states.async_entity_ids()) == 1 package_delivered = Package( tracking_number=""456"", destination_country=206, friendly_name=""friendly name 1"", info_text=""info text 1"", location=""location 1"", timestamp=""2020-08-10 10:32"", origin_country=206, package_type=2, status=40, ) ProfileMock.package_list = [package_delivered] with patch( ""homeassistant.components.seventeentrack.sensor.persistent_notification"" ) as persistent_notification_mock: await _goto_future(hass) persistent_notification_mock.create.assert_called() assert not hass.states.async_entity_ids() ``` ###Assistant : Ensure notification is triggered when package becomes delivered. " 2899,"def fit(self, X, y=None, sample_weight=None): X = self._validate_data(X, accept_sparse=[""csc""]) if issparse(X): # Pre-sort indices to avoid that each individual tree of the # ensemble sorts the indices. X.sort_indices() rnd = check_random_state(self.random_state) y = rnd.uniform(size=X.shape[0]) # ensure that max_sample is in [1, n_samples]: n_samples = X.shape[0] if self.contamination != ""auto"": if not (0.0 < self.contamination <= 0.5): raise ValueError( ""contamination must be in (0, 0.5], got: %f"" % self.contamination ) if isinstance(self.max_samples, str): if self.max_samples == ""auto"": max_samples = min(256, n_samples) else: raise ValueError( ""max_samples (%s) is not supported."" 'Valid choices are: ""auto"", int or' ""float"" % self.max_samples ) elif isinstance(self.max_samples, numbers.Integral): if self.max_samples > n_samples: warn( ""max_samples (%s) is greater than the "" ""total number of samples (%s). max_samples "" ""will be set to n_samples for estimation."" % (self.max_samples, n_samples) ) max_samples = n_samples else: max_samples = self.max_samples else: # float if not 0.0 < self.max_samples <= 1.0: raise ValueError( ""max_samples must be in (0, 1], got %r"" % self.max_samples ) max_samples = int(self.max_samples * X.shape[0]) self.max_samples_ = max_samples max_depth = int(np.ceil(np.log2(max(max_samples, 2)))) super()._fit( X, y, max_samples, max_depth=max_depth, sample_weight=sample_weight, check_input=False, ) if self.contamination == ""auto"": # 0.5 plays a special role as described in the original paper. # we take the opposite as we consider the opposite of their score. self.offset_ = -0.5 return self # else, define offset_ wrt contamination parameter self.offset_ = np.percentile(self.score_samples(X), 100.0 * self.contamination) return self "," Fit estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Use ``dtype=np.float32`` for maximum efficiency. Sparse matrices are also supported, use sparse ``csc_matrix`` for maximum efficiency. y : Ignored Not used, present for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Returns ------- self : object Fitted estimator. ",66,230,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fit(self, X, y=None, sample_weight=None): X = self._validate_data(X, accept_sparse=[""csc""]) if issparse(X): # Pre-sort indices to avoid that each individual tree of the # ensemble sorts the indices. X.sort_indices() rnd = check_random_state(self.random_state) y = rnd.uniform(size=X.shape[0]) # ensure that max_sample is in [1, n_samples]: n_samples = X.shape[0] if self.contamination != ""auto"": if not (0.0 < self.contamination <= 0.5): raise ValueError( ""contamination must be in (0, 0.5], got: %f"" % self.contamination ) if isinstance(self.max_samples, str): if self.max_samples == ""auto"": max_samples = min(256, n_samples) else: raise ValueError( ""max_samples (%s) is not supported."" 'Valid choices are: ""auto"", int or' ""float"" % self.max_samples ) elif isinstance(self.max_samples, numbers.Integral): if self.max_samples > n_samples: warn( ""max_samples (%s) is greater than the "" ""total number of samples (%s). max_samples "" ""will be set to n_samples for estimation."" % (self.max_samples, n_samples) ) max_samples = n_samples else: max_samples = self.max_samples else: # float if not 0.0 < self.max_samples <= 1.0: raise ValueError( ""max_samples must be in (0, 1], got %r"" % self.max_samples ) max_samples = int(self.max_samples * X.shape[0]) self.max_samples_ = max_samples max_depth = int(np.ceil(np.log2(max(max_samples, 2)))) super()._fit( X, y, max_samples, max_depth=max_depth, sample_weight=sample_weight, check_input=False, ) if self.contamination == ""auto"": # 0.5 plays a special role as described in the original paper. # we take the opposite as we consider the opposite of their score. self.offset_ = -0.5 return self # else, define offset_ wrt contamination parameter self.offset_ = np.percentile(self.score_samples(X), 100.0 * self.contamination) return self ``` ###Assistant : Fit estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Use ``dtype=np.float32`` for maximum efficiency. Sparse matrices are also supported, use sparse ``csc_matrix`` for maximum efficiency. y : Ignored Not used, present for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Returns ------- self : object Fitted estimator. " 2900,"def get_sympy_dir(): this_file = os.path.abspath(__file__) sympy_dir = os.path.join(os.path.dirname(this_file), "".."", "".."") sympy_dir = os.path.normpath(sympy_dir) return os.path.normcase(sympy_dir) "," Returns the root SymPy directory and set the global value indicating whether the system is case sensitive or not. ",19,15,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_sympy_dir(): this_file = os.path.abspath(__file__) sympy_dir = os.path.join(os.path.dirname(this_file), "".."", "".."") sympy_dir = os.path.normpath(sympy_dir) return os.path.normcase(sympy_dir) ``` ###Assistant : Returns the root SymPy directory and set the global value indicating whether the system is case sensitive or not. " 2901,"def CheckCaffeRandom(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] for function in c_random_function_list: ix = line.find(function) # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and line[ix - 1] not in ('_', '.', '>'))): error(filename, linenum, 'caffe/random_fn', 2, 'Use caffe_rng_rand() (or other caffe_rng_* function) instead of ' + function + ') to ensure results are deterministic for a fixed Caffe seed.') threading_list = ( ('asctime(', 'asctime_r('), ('ctime(', 'ctime_r('), ('getgrgid(', 'getgrgid_r('), ('getgrnam(', 'getgrnam_r('), ('getlogin(', 'getlogin_r('), ('getpwnam(', 'getpwnam_r('), ('getpwuid(', 'getpwuid_r('), ('gmtime(', 'gmtime_r('), ('localtime(', 'localtime_r('), ('strtok(', 'strtok_r('), ('ttyname(', 'ttyname_r('), ) ","Checks for calls to C random functions (rand, rand_r, random, ...). Caffe code should (almost) always use the caffe_rng_* functions rather than these, as the internal state of these C functions is independent of the native Caffe RNG system which should produce deterministic results for a fixed Caffe seed set using Caffe::set_random_seed(...). Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. ",84,99,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def CheckCaffeRandom(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] for function in c_random_function_list: ix = line.find(function) # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and line[ix - 1] not in ('_', '.', '>'))): error(filename, linenum, 'caffe/random_fn', 2, 'Use caffe_rng_rand() (or other caffe_rng_* function) instead of ' + function + ') to ensure results are deterministic for a fixed Caffe seed.') threading_list = ( ('asctime(', 'asctime_r('), ('ctime(', 'ctime_r('), ('getgrgid(', 'getgrgid_r('), ('getgrnam(', 'getgrnam_r('), ('getlogin(', 'getlogin_r('), ('getpwnam(', 'getpwnam_r('), ('getpwuid(', 'getpwuid_r('), ('gmtime(', 'gmtime_r('), ('localtime(', 'localtime_r('), ('strtok(', 'strtok_r('), ('ttyname(', 'ttyname_r('), ) ``` ###Assistant : Checks for calls to C random functions (rand, rand_r, random, ...). Caffe code should (almost) always use the caffe_rng_* functions rather than these, as the internal state of these C functions is independent of the native Caffe RNG system which should produce deterministic results for a fixed Caffe seed set using Caffe::set_random_seed(...). Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. " 2902,"def test_mapped_dag(self, dag_id, executor_name, session): # This test needs a real executor to run, so that the `make_list` task can write out the TaskMap from airflow.executors.executor_loader import ExecutorLoader self.dagbag.process_file(str(TEST_DAGS_FOLDER / f'{dag_id}.py')) dag = self.dagbag.get_dag(dag_id) when = datetime.datetime(2022, 1, 1) job = BackfillJob( dag=dag, start_date=when, end_date=when, donot_pickle=True, executor=ExecutorLoader.load_executor(executor_name), ) job.run() dr = DagRun.find(dag_id=dag.dag_id, execution_date=when, session=session)[0] assert dr assert dr.state == DagRunState.SUCCESS # Check that every task has a start and end date for ti in dr.task_instances: assert ti.state == TaskInstanceState.SUCCESS assert ti.start_date is not None assert ti.end_date is not None "," End-to-end test of a simple mapped dag. We test with multiple executors as they have different ""execution environments"" -- for instance DebugExecutor runs a lot more in the same process than other Executors. ",33,89,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_mapped_dag(self, dag_id, executor_name, session): # This test needs a real executor to run, so that the `make_list` task can write out the TaskMap from airflow.executors.executor_loader import ExecutorLoader self.dagbag.process_file(str(TEST_DAGS_FOLDER / f'{dag_id}.py')) dag = self.dagbag.get_dag(dag_id) when = datetime.datetime(2022, 1, 1) job = BackfillJob( dag=dag, start_date=when, end_date=when, donot_pickle=True, executor=ExecutorLoader.load_executor(executor_name), ) job.run() dr = DagRun.find(dag_id=dag.dag_id, execution_date=when, session=session)[0] assert dr assert dr.state == DagRunState.SUCCESS # Check that every task has a start and end date for ti in dr.task_instances: assert ti.state == TaskInstanceState.SUCCESS assert ti.start_date is not None assert ti.end_date is not None ``` ###Assistant : End-to-end test of a simple mapped dag. We test with multiple executors as they have different ""execution environments"" -- for instance DebugExecutor runs a lot more in the same process than other Executors. " 2903,"async def test_emergency_ssl_certificate_when_invalid(hass, tmpdir, caplog): cert_path, key_path = await hass.async_add_executor_job( _setup_broken_ssl_pem_files, tmpdir ) hass.config.safe_mode = True assert ( await async_setup_component( hass, ""http"", { ""http"": {""ssl_certificate"": cert_path, ""ssl_key"": key_path}, }, ) is True ) await hass.async_start() await hass.async_block_till_done() assert ( ""Home Assistant is running in safe mode with an emergency self signed ssl certificate because the configured SSL certificate was not usable"" in caplog.text ) assert hass.http.site is not None ",Test http can startup with an emergency self signed cert when the current one is broken.,16,69,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_emergency_ssl_certificate_when_invalid(hass, tmpdir, caplog): cert_path, key_path = await hass.async_add_executor_job( _setup_broken_ssl_pem_files, tmpdir ) hass.config.safe_mode = True assert ( await async_setup_component( hass, ""http"", { ""http"": {""ssl_certificate"": cert_path, ""ssl_key"": key_path}, }, ) is True ) await hass.async_start() await hass.async_block_till_done() assert ( ""Home Assistant is running in safe mode with an emergency self signed ssl certificate because the configured SSL certificate was not usable"" in caplog.text ) assert hass.http.site is not None ``` ###Assistant : Test http can startup with an emergency self signed cert when the current one is broken. " 2904,"def get_safe_request_meta(self, request): if not hasattr(request, ""META""): return {} return {k: self.cleanse_setting(k, v) for k, v in request.META.items()} "," Return a dictionary of request.META with sensitive values redacted. ",9,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_safe_request_meta(self, request): if not hasattr(request, ""META""): return {} return {k: self.cleanse_setting(k, v) for k, v in request.META.items()} ``` ###Assistant : Return a dictionary of request.META with sensitive values redacted. " 2905,"def cuda(self, *args, **kwargs) -> nn.Module: return self.data_preprocessor.cuda(*args, **kwargs) ","Overrides this method to set the :attr:`device` Returns: nn.Module: The model itself. ",12,9,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cuda(self, *args, **kwargs) -> nn.Module: return self.data_preprocessor.cuda(*args, **kwargs) ``` ###Assistant : Overrides this method to set the :attr:`device` Returns: nn.Module: The model itself. " 2906,"def get_cache_attr_name(cls): return ""_{}.{}"".format(cls._meta.app_label, cls._meta.model_name).lower() "," Returns the name of the attribute that should be used to store a reference to the fetched/created object on a request. ",21,5,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_cache_attr_name(cls): return ""_{}.{}"".format(cls._meta.app_label, cls._meta.model_name).lower() ``` ###Assistant : Returns the name of the attribute that should be used to store a reference to the fetched/created object on a request. " 2907,"def get_instance(cls, info, **data): object_id = data.get(""id"") object_sku = data.get(""sku"") attributes = data.get(""attributes"") if attributes: # Prefetches needed by AttributeAssignmentMixin and # associate_attribute_values_to_instance qs = cls.Meta.model.objects.prefetch_related( ""product__product_type__variant_attributes__values"", ""product__product_type__attributevariant"", ) else: # Use the default queryset. qs = models.ProductVariant.objects.all() if object_id: return cls.get_node_or_error( info, object_id, only_type=""ProductVariant"", qs=qs ) elif object_sku: instance = qs.filter(sku=object_sku).first() if not instance: raise ValidationError( { ""sku"": ValidationError( ""Couldn't resolve to a node: %s"" % object_sku, code=""not_found"", ) } ) return instance else: return cls._meta.model() ","Prefetch related fields that are needed to process the mutation. If we are updating an instance and want to update its attributes, # prefetch them. ",25,77,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_instance(cls, info, **data): object_id = data.get(""id"") object_sku = data.get(""sku"") attributes = data.get(""attributes"") if attributes: # Prefetches needed by AttributeAssignmentMixin and # associate_attribute_values_to_instance qs = cls.Meta.model.objects.prefetch_related( ""product__product_type__variant_attributes__values"", ""product__product_type__attributevariant"", ) else: # Use the default queryset. qs = models.ProductVariant.objects.all() if object_id: return cls.get_node_or_error( info, object_id, only_type=""ProductVariant"", qs=qs ) elif object_sku: instance = qs.filter(sku=object_sku).first() if not instance: raise ValidationError( { ""sku"": ValidationError( ""Couldn't resolve to a node: %s"" % object_sku, code=""not_found"", ) } ) return instance else: return cls._meta.model() ``` ###Assistant : Prefetch related fields that are needed to process the mutation. If we are updating an instance and want to update its attributes, # prefetch them. " 2908,"def _can_use_libjoin(self) -> bool: if type(self) is Index: # excludes EAs return isinstance(self.dtype, np.dtype) return not is_interval_dtype(self.dtype) # -------------------------------------------------------------------- # Uncategorized Methods "," Whether we can use the fastpaths implement in _libs.join ",9,22,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _can_use_libjoin(self) -> bool: if type(self) is Index: # excludes EAs return isinstance(self.dtype, np.dtype) return not is_interval_dtype(self.dtype) # -------------------------------------------------------------------- # Uncategorized Methods ``` ###Assistant : Whether we can use the fastpaths implement in _libs.join " 2909,"def sub_syllables(self, from_i, to_j): if not isinstance(from_i, int) or not isinstance(to_j, int): raise ValueError(""both arguments should be integers"") group = self.group if to_j <= from_i: return group.identity else: r = tuple(self.array_form[from_i: to_j]) return group.dtype(r) "," `sub_syllables` returns the subword of the associative word `self` that consists of syllables from positions `from_to` to `to_j`, where `from_to` and `to_j` must be positive integers and indexing is done with origin 0. Examples ======== >>> from sympy.combinatorics import free_group >>> f, a, b = free_group(""a, b"") >>> w = a**5*b*a**2*b**-4*a >>> w.sub_syllables(1, 2) b >>> w.sub_syllables(3, 3) ",59,34,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def sub_syllables(self, from_i, to_j): if not isinstance(from_i, int) or not isinstance(to_j, int): raise ValueError(""both arguments should be integers"") group = self.group if to_j <= from_i: return group.identity else: r = tuple(self.array_form[from_i: to_j]) return group.dtype(r) ``` ###Assistant : `sub_syllables` returns the subword of the associative word `self` that consists of syllables from positions `from_to` to `to_j`, where `from_to` and `to_j` must be positive integers and indexing is done with origin 0. Examples ======== >>> from sympy.combinatorics import free_group >>> f, a, b = free_group(""a, b"") >>> w = a**5*b*a**2*b**-4*a >>> w.sub_syllables(1, 2) b >>> w.sub_syllables(3, 3) " 2910,"def seterr(all=None, divide=None, over=None, under=None, invalid=None): pyvals = umath.geterrobj() old = geterr() if divide is None: divide = all or old['divide'] if over is None: over = all or old['over'] if under is None: under = all or old['under'] if invalid is None: invalid = all or old['invalid'] maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) + (_errdict[over] << SHIFT_OVERFLOW) + (_errdict[under] << SHIFT_UNDERFLOW) + (_errdict[invalid] << SHIFT_INVALID)) pyvals[1] = maskvalue umath.seterrobj(pyvals) return old @set_module('numpy')"," Set how floating-point errors are handled. Note that operations on integer scalar types (such as `int16`) are handled like floating point, and are affected by these settings. Parameters ---------- all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional Set treatment for all types of floating-point errors at once: - ignore: Take no action when the exception occurs. - warn: Print a `RuntimeWarning` (via the Python `warnings` module). - raise: Raise a `FloatingPointError`. - call: Call a function specified using the `seterrcall` function. - print: Print a warning directly to ``stdout``. - log: Record error in a Log object specified by `seterrcall`. The default is not to change the current behavior. divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional Treatment for division by zero. over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional Treatment for floating-point overflow. under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional Treatment for floating-point underflow. invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional Treatment for invalid floating-point operation. Returns ------- old_settings : dict Dictionary containing the old settings. See also -------- seterrcall : Set a callback function for the 'call' mode. geterr, geterrcall, errstate Notes ----- The floating-point exceptions are defined in the IEEE 754 standard [1]_: - Division by zero: infinite result obtained from finite numbers. - Overflow: result too large to be expressed. - Underflow: result so close to zero that some precision was lost. - Invalid operation: result is not an expressible number, typically indicates that a NaN was produced. .. [1] https://en.wikipedia.org/wiki/IEEE_754 Examples -------- >>> old_settings = np.seterr(all='ignore') #seterr to known value >>> np.seterr(over='raise') {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} >>> np.seterr(**old_settings) # reset to default {'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'} >>> np.int16(32000) * np.int16(3) 30464 >>> old_settings = np.seterr(all='warn', over='raise') >>> np.int16(32000) * np.int16(3) Traceback (most recent call last): File """", line 1, in FloatingPointError: overflow encountered in scalar multiply >>> old_settings = np.seterr(all='print') >>> np.geterr() {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'} >>> np.int16(32000) * np.int16(3) 30464 ",336,72,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def seterr(all=None, divide=None, over=None, under=None, invalid=None): pyvals = umath.geterrobj() old = geterr() if divide is None: divide = all or old['divide'] if over is None: over = all or old['over'] if under is None: under = all or old['under'] if invalid is None: invalid = all or old['invalid'] maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) + (_errdict[over] << SHIFT_OVERFLOW) + (_errdict[under] << SHIFT_UNDERFLOW) + (_errdict[invalid] << SHIFT_INVALID)) pyvals[1] = maskvalue umath.seterrobj(pyvals) return old @set_module('numpy') ``` ###Assistant : Set how floating-point errors are handled. Note that operations on integer scalar types (such as `int16`) are handled like floating point, and are affected by these settings. Parameters ---------- all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional Set treatment for all types of floating-point errors at once: - ignore: Take no action when the exception occurs. - warn: Print a `RuntimeWarning` (via the Python `warnings` module). - raise: Raise a `FloatingPointError`. - call: Call a function specified using the `seterrcall` function. - print: Print a warning directly to ``stdout``. - log: Record error in a Log object specified by `seterrcall`. The default is not to change the current behavior. divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional Treatment for division by zero. over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional Treatment for floating-point overflow. under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional Treatment for floating-point underflow. invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional Treatment for invalid floating-point operation. Returns ------- old_settings : dict Dictionary containing the old settings. See also -------- seterrcall : Set a callback function for the 'call' mode. geterr, geterrcall, errstate Notes ----- The floating-point exceptions are defined in the IEEE 754 standard [1]_: - Division by zero: infinite result obtained from finite numbers. - Overflow: result too large to be expressed. - Underflow: result so close to zero that some precision was lost. - Invalid operation: result is not an expressible number, typically indicates that a NaN was produced. .. [1] https://en.wikipedia.org/wiki/IEEE_754 Examples -------- >>> old_settings = np.seterr(all='ignore') #seterr to known value >>> np.seterr(over='raise') {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} >>> np.seterr(**old_settings) # reset to default {'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'} >>> np.int16(32000) * np.int16(3) 30464 >>> old_settings = np.seterr(all='warn', over='raise') >>> np.int16(32000) * np.int16(3) Traceback (most recent call last): File """", line 1, in FloatingPointError: overflow encountered in scalar multiply >>> old_settings = np.seterr(all='print') >>> np.geterr() {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'} >>> np.int16(32000) * np.int16(3) 30464 " 2911,"def _create_drawables(self, tokensource): lineno = charno = maxcharno = 0 maxlinelength = linelength = 0 for ttype, value in tokensource: while ttype not in self.styles: ttype = ttype.parent style = self.styles[ttype] # TODO: make sure tab expansion happens earlier in the chain. It # really ought to be done on the input, as to do it right here is # quite complex. value = value.expandtabs(4) lines = value.splitlines(True) # print lines for i, line in enumerate(lines): temp = line.rstrip('\n') if temp: self._draw_text( self._get_text_pos(linelength, lineno), temp, font = self._get_style_font(style), text_fg = self._get_text_color(style), text_bg = self._get_text_bg_color(style), ) temp_width, temp_hight = self.fonts.get_text_size(temp) linelength += temp_width maxlinelength = max(maxlinelength, linelength) charno += len(temp) maxcharno = max(maxcharno, charno) if line.endswith('\n'): # add a line for each extra line in the value linelength = 0 charno = 0 lineno += 1 self.maxlinelength = maxlinelength self.maxcharno = maxcharno self.maxlineno = lineno "," Create drawables for the token content. ",6,144,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_drawables(self, tokensource): lineno = charno = maxcharno = 0 maxlinelength = linelength = 0 for ttype, value in tokensource: while ttype not in self.styles: ttype = ttype.parent style = self.styles[ttype] # TODO: make sure tab expansion happens earlier in the chain. It # really ought to be done on the input, as to do it right here is # quite complex. value = value.expandtabs(4) lines = value.splitlines(True) # print lines for i, line in enumerate(lines): temp = line.rstrip('\n') if temp: self._draw_text( self._get_text_pos(linelength, lineno), temp, font = self._get_style_font(style), text_fg = self._get_text_color(style), text_bg = self._get_text_bg_color(style), ) temp_width, temp_hight = self.fonts.get_text_size(temp) linelength += temp_width maxlinelength = max(maxlinelength, linelength) charno += len(temp) maxcharno = max(maxcharno, charno) if line.endswith('\n'): # add a line for each extra line in the value linelength = 0 charno = 0 lineno += 1 self.maxlinelength = maxlinelength self.maxcharno = maxcharno self.maxlineno = lineno ``` ###Assistant : Create drawables for the token content. " 2912,"def __monotonic_time_coarse() -> float: return time.clock_gettime(CLOCK_MONOTONIC_COARSE) monotonic_time_coarse = time.monotonic with suppress(Exception): if ( platform.system() == ""Linux"" and abs(time.monotonic() - __monotonic_time_coarse()) < 1 ): monotonic_time_coarse = __monotonic_time_coarse ","Return a monotonic time in seconds. This is the coarse version of time_monotonic, which is faster but less accurate. Since many arm64 and 32-bit platforms don't support VDSO with time.monotonic because of errata, we can't rely on the kernel to provide a fast monotonic time. https://lore.kernel.org/lkml/20170404171826.25030-1-marc.zyngier@arm.com/ ",46,26,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __monotonic_time_coarse() -> float: return time.clock_gettime(CLOCK_MONOTONIC_COARSE) monotonic_time_coarse = time.monotonic with suppress(Exception): if ( platform.system() == ""Linux"" and abs(time.monotonic() - __monotonic_time_coarse()) < 1 ): monotonic_time_coarse = __monotonic_time_coarse ``` ###Assistant : Return a monotonic time in seconds. This is the coarse version of time_monotonic, which is faster but less accurate. Since many arm64 and 32-bit platforms don't support VDSO with time.monotonic because of errata, we can't rely on the kernel to provide a fast monotonic time. https://lore.kernel.org/lkml/20170404171826.25030-1-marc.zyngier@arm.com/ " 2913,"def prepare_image_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False): assert not (numpify and torchify), ""You cannot specify both numpy and PyTorch tensors at the same time"" if equal_resolution: image_inputs = [] for i in range(feature_extract_tester.batch_size): image_inputs.append( np.random.randint( 255, size=( feature_extract_tester.num_channels, feature_extract_tester.max_resolution, feature_extract_tester.max_resolution, ), dtype=np.uint8, ) ) else: image_inputs = [] # To avoid getting image width/height 0 min_resolution = feature_extract_tester.min_resolution if getattr(feature_extract_tester, ""size_divisor"", None): # If `size_divisor` is defined, the image needs to have width/size >= `size_divisor` min_resolution = max(feature_extract_tester.size_divisor, min_resolution) for i in range(feature_extract_tester.batch_size): width, height = np.random.choice(np.arange(min_resolution, feature_extract_tester.max_resolution), 2) image_inputs.append( np.random.randint(255, size=(feature_extract_tester.num_channels, width, height), dtype=np.uint8) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] if torchify: image_inputs = [torch.from_numpy(x) for x in image_inputs] return image_inputs ","This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. ",28,129,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def prepare_image_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False): assert not (numpify and torchify), ""You cannot specify both numpy and PyTorch tensors at the same time"" if equal_resolution: image_inputs = [] for i in range(feature_extract_tester.batch_size): image_inputs.append( np.random.randint( 255, size=( feature_extract_tester.num_channels, feature_extract_tester.max_resolution, feature_extract_tester.max_resolution, ), dtype=np.uint8, ) ) else: image_inputs = [] # To avoid getting image width/height 0 min_resolution = feature_extract_tester.min_resolution if getattr(feature_extract_tester, ""size_divisor"", None): # If `size_divisor` is defined, the image needs to have width/size >= `size_divisor` min_resolution = max(feature_extract_tester.size_divisor, min_resolution) for i in range(feature_extract_tester.batch_size): width, height = np.random.choice(np.arange(min_resolution, feature_extract_tester.max_resolution), 2) image_inputs.append( np.random.randint(255, size=(feature_extract_tester.num_channels, width, height), dtype=np.uint8) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] if torchify: image_inputs = [torch.from_numpy(x) for x in image_inputs] return image_inputs ``` ###Assistant : This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. " 2914,"def _update_step_xla(self, gradient, variable, key): return self._update_step(gradient, variable) ","A wrapper of `update_step` to enable XLA acceleration. Due to `tf.function` tracing mechanism, for (gradient, variable) pairs of the same shape and dtype, the execution graph always invoke the first pair it has seen. Thus, we need a `key` argument to make each (gradient, variable) pair unique. In additions, XLA cannot understand string input, so the key is an integer. Args: gradient: backpropagated gradient of the given variable. variable: variable whose value needs to be updated. key (int): a unique key that identifies the variable. Returns: An `Operation` that applies the specified gradients. ",93,8,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _update_step_xla(self, gradient, variable, key): return self._update_step(gradient, variable) ``` ###Assistant : A wrapper of `update_step` to enable XLA acceleration. Due to `tf.function` tracing mechanism, for (gradient, variable) pairs of the same shape and dtype, the execution graph always invoke the first pair it has seen. Thus, we need a `key` argument to make each (gradient, variable) pair unique. In additions, XLA cannot understand string input, so the key is an integer. Args: gradient: backpropagated gradient of the given variable. variable: variable whose value needs to be updated. key (int): a unique key that identifies the variable. Returns: An `Operation` that applies the specified gradients. " 2915,"async def log_in(self, request): fingerprint = request[""fingerprint""] if self.service.logged_in_fingerprint == fingerprint: return {""fingerprint"": fingerprint} await self._stop_wallet() started = await self.service._start(fingerprint) if started is True: return {""fingerprint"": fingerprint} return {""success"": False, ""error"": ""Unknown Error""} "," Logs in the wallet with a specific key. ",8,33,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def log_in(self, request): fingerprint = request[""fingerprint""] if self.service.logged_in_fingerprint == fingerprint: return {""fingerprint"": fingerprint} await self._stop_wallet() started = await self.service._start(fingerprint) if started is True: return {""fingerprint"": fingerprint} return {""success"": False, ""error"": ""Unknown Error""} ``` ###Assistant : Logs in the wallet with a specific key. " 2916,"def test_bitbucket2_on_push_commits_multiple_committers_with_others(self) -> None: commit_info = ""* first commit ([84b96adc644](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\n"" expected_message = f self.check_webhook( ""push_multiple_committers_with_others"", TOPIC_BRANCH_EVENTS, expected_message ) ","Tomasz [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 10 commits to branch master. Commits by Tomasz (4), James (3), Brendon (2) and others (1).\n\n{commit_info*9}* first commit ([84b96adc644](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))",21,18,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_bitbucket2_on_push_commits_multiple_committers_with_others(self) -> None: commit_info = ""* first commit ([84b96adc644](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\n"" expected_message = f self.check_webhook( ""push_multiple_committers_with_others"", TOPIC_BRANCH_EVENTS, expected_message ) ``` ###Assistant : Tomasz [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 10 commits to branch master. Commits by Tomasz (4), James (3), Brendon (2) and others (1).\n\n{commit_info*9}* first commit ([84b96adc644](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed)) " 2917,"def drawControl(self, element, opt, p, widget=None): if element not in [QStyle.ControlElement.CE_TabBarTab, QStyle.ControlElement.CE_TabBarTabShape, QStyle.ControlElement.CE_TabBarTabLabel]: # Let the real style draw it. self._style.drawControl(element, opt, p, widget) return layouts = self._tab_layout(opt) if layouts is None: log.misc.warning(""Could not get layouts for tab!"") return if element == QStyle.ControlElement.CE_TabBarTab: # We override this so we can control TabBarTabShape/TabBarTabLabel. self.drawControl(QStyle.ControlElement.CE_TabBarTabShape, opt, p, widget) self.drawControl(QStyle.ControlElement.CE_TabBarTabLabel, opt, p, widget) elif element == QStyle.ControlElement.CE_TabBarTabShape: p.fillRect(opt.rect, opt.palette.window()) self._draw_indicator(layouts, opt, p) # We use super() rather than self._style here because we don't want # any sophisticated drawing. super().drawControl(QStyle.ControlElement.CE_TabBarTabShape, opt, p, widget) elif element == QStyle.ControlElement.CE_TabBarTabLabel: if not opt.icon.isNull() and layouts.icon.isValid(): self._draw_icon(layouts, opt, p) alignment = (config.cache['tabs.title.alignment'] | Qt.AlignmentFlag.AlignVCenter | Qt.TextFlag.TextHideMnemonic) self._style.drawItemText(p, layouts.text, int(alignment), opt.palette, bool(opt.state & QStyle.StateFlag.State_Enabled), opt.text, QPalette.ColorRole.WindowText) else: raise ValueError(""Invalid element {!r}"".format(element)) ","Override drawControl to draw odd tabs in a different color. Draws the given element with the provided painter with the style options specified by option. Args: element: ControlElement opt: QStyleOption p: QPainter widget: QWidget ",34,122,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def drawControl(self, element, opt, p, widget=None): if element not in [QStyle.ControlElement.CE_TabBarTab, QStyle.ControlElement.CE_TabBarTabShape, QStyle.ControlElement.CE_TabBarTabLabel]: # Let the real style draw it. self._style.drawControl(element, opt, p, widget) return layouts = self._tab_layout(opt) if layouts is None: log.misc.warning(""Could not get layouts for tab!"") return if element == QStyle.ControlElement.CE_TabBarTab: # We override this so we can control TabBarTabShape/TabBarTabLabel. self.drawControl(QStyle.ControlElement.CE_TabBarTabShape, opt, p, widget) self.drawControl(QStyle.ControlElement.CE_TabBarTabLabel, opt, p, widget) elif element == QStyle.ControlElement.CE_TabBarTabShape: p.fillRect(opt.rect, opt.palette.window()) self._draw_indicator(layouts, opt, p) # We use super() rather than self._style here because we don't want # any sophisticated drawing. super().drawControl(QStyle.ControlElement.CE_TabBarTabShape, opt, p, widget) elif element == QStyle.ControlElement.CE_TabBarTabLabel: if not opt.icon.isNull() and layouts.icon.isValid(): self._draw_icon(layouts, opt, p) alignment = (config.cache['tabs.title.alignment'] | Qt.AlignmentFlag.AlignVCenter | Qt.TextFlag.TextHideMnemonic) self._style.drawItemText(p, layouts.text, int(alignment), opt.palette, bool(opt.state & QStyle.StateFlag.State_Enabled), opt.text, QPalette.ColorRole.WindowText) else: raise ValueError(""Invalid element {!r}"".format(element)) ``` ###Assistant : Override drawControl to draw odd tabs in a different color. Draws the given element with the provided painter with the style options specified by option. Args: element: ControlElement opt: QStyleOption p: QPainter widget: QWidget " 2918,"def _get_mask(length, max_length): length = length.unsqueeze(-1) B = paddle.shape(length)[0] grid = paddle.arange(0, max_length).unsqueeze(0).tile([B, 1]) zero_mask = paddle.zeros([B, max_length], dtype='float32') inf_mask = paddle.full([B, max_length], '-inf', dtype='float32') diag_mask = paddle.diag( paddle.full( [max_length], '-inf', dtype=paddle.float32), offset=0, name=None) mask = paddle.where(grid >= length, inf_mask, zero_mask) mask = mask.unsqueeze(1) + diag_mask return mask.unsqueeze(1) ","Generate a square mask for the sequence. The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0). ",20,48,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_mask(length, max_length): length = length.unsqueeze(-1) B = paddle.shape(length)[0] grid = paddle.arange(0, max_length).unsqueeze(0).tile([B, 1]) zero_mask = paddle.zeros([B, max_length], dtype='float32') inf_mask = paddle.full([B, max_length], '-inf', dtype='float32') diag_mask = paddle.diag( paddle.full( [max_length], '-inf', dtype=paddle.float32), offset=0, name=None) mask = paddle.where(grid >= length, inf_mask, zero_mask) mask = mask.unsqueeze(1) + diag_mask return mask.unsqueeze(1) ``` ###Assistant : Generate a square mask for the sequence. The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0). " 2919,"def test_config_options_removed_on_reparse(self): global_config_path = ""/mock/home/folder/.streamlit/config.toml"" makedirs_patch = patch(""streamlit.config.os.makedirs"") makedirs_patch.return_value = True pathexists_patch = patch(""streamlit.config.os.path.exists"") pathexists_patch.side_effect = lambda path: path == global_config_path global_config = open_patch = patch(""streamlit.config.open"", mock_open(read_data=global_config)) with open_patch, makedirs_patch, pathexists_patch: config.get_config_options() self.assertEqual(""dark"", config.get_option(""theme.base"")) self.assertEqual(""sans serif"", config.get_option(""theme.font"")) global_config = open_patch = patch(""streamlit.config.open"", mock_open(read_data=global_config)) with open_patch, makedirs_patch, pathexists_patch: config.get_config_options(force_reparse=True) self.assertEqual(""dark"", config.get_option(""theme.base"")) self.assertEqual(None, config.get_option(""theme.font"")) ","Test that config options that are removed in a file are also removed from our _config_options dict. [theme] base = ""dark"" font = ""sans serif"" [theme] base = ""dark"" ",29,52,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_config_options_removed_on_reparse(self): global_config_path = ""/mock/home/folder/.streamlit/config.toml"" makedirs_patch = patch(""streamlit.config.os.makedirs"") makedirs_patch.return_value = True pathexists_patch = patch(""streamlit.config.os.path.exists"") pathexists_patch.side_effect = lambda path: path == global_config_path global_config = open_patch = patch(""streamlit.config.open"", mock_open(read_data=global_config)) with open_patch, makedirs_patch, pathexists_patch: config.get_config_options() self.assertEqual(""dark"", config.get_option(""theme.base"")) self.assertEqual(""sans serif"", config.get_option(""theme.font"")) global_config = open_patch = patch(""streamlit.config.open"", mock_open(read_data=global_config)) with open_patch, makedirs_patch, pathexists_patch: config.get_config_options(force_reparse=True) self.assertEqual(""dark"", config.get_option(""theme.base"")) self.assertEqual(None, config.get_option(""theme.font"")) ``` ###Assistant : Test that config options that are removed in a file are also removed from our _config_options dict. [theme] base = ""dark"" font = ""sans serif"" [theme] base = ""dark"" " 2920,"def __getitem__(self, key): getitem = self._data.__getitem__ if is_integer(key) or is_float(key): # GH#44051 exclude bool, which would return a 2d ndarray key = com.cast_scalar_indexer(key, warn_float=True) return getitem(key) if isinstance(key, slice): # This case is separated from the conditional above to avoid # pessimization com.is_bool_indexer and ndim checks. result = getitem(key) # Going through simple_new for performance. return type(self)._simple_new(result, name=self._name) if com.is_bool_indexer(key): # if we have list[bools, length=1e5] then doing this check+convert # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__ # time below from 3.8 ms to 496 µs # if we already have ndarray[bool], the overhead is 1.4 µs or .25% key = np.asarray(key, dtype=bool) result = getitem(key) # Because we ruled out integer above, we always get an arraylike here if result.ndim > 1: deprecate_ndim_indexing(result) if hasattr(result, ""_ndarray""): # error: Item ""ndarray[Any, Any]"" of ""Union[ExtensionArray, # ndarray[Any, Any]]"" has no attribute ""_ndarray"" [union-attr] # i.e. NDArrayBackedExtensionArray # Unpack to ndarray for MPL compat return result._ndarray # type: ignore[union-attr] return result # NB: Using _constructor._simple_new would break if MultiIndex # didn't override __getitem__ return self._constructor._simple_new(result, name=self._name) "," Override numpy.ndarray's __getitem__ method to work as desired. This function adds lists and Series as valid boolean indexers (ndarrays only supports ndarray with dtype=bool). If resulting ndim != 1, plain ndarray is returned instead of corresponding `Index` subclass. ",38,178,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __getitem__(self, key): getitem = self._data.__getitem__ if is_integer(key) or is_float(key): # GH#44051 exclude bool, which would return a 2d ndarray key = com.cast_scalar_indexer(key, warn_float=True) return getitem(key) if isinstance(key, slice): # This case is separated from the conditional above to avoid # pessimization com.is_bool_indexer and ndim checks. result = getitem(key) # Going through simple_new for performance. return type(self)._simple_new(result, name=self._name) if com.is_bool_indexer(key): # if we have list[bools, length=1e5] then doing this check+convert # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__ # time below from 3.8 ms to 496 µs # if we already have ndarray[bool], the overhead is 1.4 µs or .25% key = np.asarray(key, dtype=bool) result = getitem(key) # Because we ruled out integer above, we always get an arraylike here if result.ndim > 1: deprecate_ndim_indexing(result) if hasattr(result, ""_ndarray""): # error: Item ""ndarray[Any, Any]"" of ""Union[ExtensionArray, # ndarray[Any, Any]]"" has no attribute ""_ndarray"" [union-attr] # i.e. NDArrayBackedExtensionArray # Unpack to ndarray for MPL compat return result._ndarray # type: ignore[union-attr] return result # NB: Using _constructor._simple_new would break if MultiIndex # didn't override __getitem__ return self._constructor._simple_new(result, name=self._name) ``` ###Assistant : Override numpy.ndarray's __getitem__ method to work as desired. This function adds lists and Series as valid boolean indexers (ndarrays only supports ndarray with dtype=bool). If resulting ndim != 1, plain ndarray is returned instead of corresponding `Index` subclass. " 2921,"def get_data(conditions, filters): data = frappe.db.sql( .format( conditions=conditions ), filters, as_dict=1, ) return data "," SELECT so.transaction_date as date, soi.delivery_date as delivery_date, so.name as sales_order, so.status, so.customer, soi.item_code, DATEDIFF(CURDATE(), soi.delivery_date) as delay_days, IF(so.status in ('Completed','To Bill'), 0, (SELECT delay_days)) as delay, soi.qty, soi.delivered_qty, (soi.qty - soi.delivered_qty) AS pending_qty, IF((SELECT pending_qty) = 0, (TO_SECONDS(Max(dn.posting_date))-TO_SECONDS(so.transaction_date)), 0) as time_taken_to_deliver, IFNULL(SUM(sii.qty), 0) as billed_qty, soi.base_amount as amount, (soi.delivered_qty * soi.base_rate) as delivered_qty_amount, (soi.billed_amt * IFNULL(so.conversion_rate, 1)) as billed_amount, (soi.base_amount - (soi.billed_amt * IFNULL(so.conversion_rate, 1))) as pending_amount, soi.warehouse as warehouse, so.company, soi.name, soi.description as description FROM `tabSales Order` so, (`tabSales Order Item` soi LEFT JOIN `tabSales Invoice Item` sii ON sii.so_detail = soi.name and sii.docstatus = 1) LEFT JOIN `tabDelivery Note Item` dni on dni.so_detail = soi.name RIGHT JOIN `tabDelivery Note` dn on dni.parent = dn.name and dn.docstatus = 1 WHERE soi.parent = so.name and so.status not in ('Stopped', 'Closed', 'On Hold') and so.docstatus = 1 {conditions} GROUP BY soi.name ORDER BY so.transaction_date ASC, soi.item_code ASC ",146,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_data(conditions, filters): data = frappe.db.sql( .format( conditions=conditions ), filters, as_dict=1, ) return data ``` ###Assistant : SELECT so.transaction_date as date, soi.delivery_date as delivery_date, so.name as sales_order, so.status, so.customer, soi.item_code, DATEDIFF(CURDATE(), soi.delivery_date) as delay_days, IF(so.status in ('Completed','To Bill'), 0, (SELECT delay_days)) as delay, soi.qty, soi.delivered_qty, (soi.qty - soi.delivered_qty) AS pending_qty, IF((SELECT pending_qty) = 0, (TO_SECONDS(Max(dn.posting_date))-TO_SECONDS(so.transaction_date)), 0) as time_taken_to_deliver, IFNULL(SUM(sii.qty), 0) as billed_qty, soi.base_amount as amount, (soi.delivered_qty * soi.base_rate) as delivered_qty_amount, (soi.billed_amt * IFNULL(so.conversion_rate, 1)) as billed_amount, (soi.base_amount - (soi.billed_amt * IFNULL(so.conversion_rate, 1))) as pending_amount, soi.warehouse as warehouse, so.company, soi.name, soi.description as description FROM `tabSales Order` so, (`tabSales Order Item` soi LEFT JOIN `tabSales Invoice Item` sii ON sii.so_detail = soi.name and sii.docstatus = 1) LEFT JOIN `tabDelivery Note Item` dni on dni.so_detail = soi.name RIGHT JOIN `tabDelivery Note` dn on dni.parent = dn.name and dn.docstatus = 1 WHERE soi.parent = so.name and so.status not in ('Stopped', 'Closed', 'On Hold') and so.docstatus = 1 {conditions} GROUP BY soi.name ORDER BY so.transaction_date ASC, soi.item_code ASC " 2922,"def getcoroutinelocals(coroutine): frame = getattr(coroutine, ""cr_frame"", None) if frame is not None: return frame.f_locals else: return {} ############################################################################### ### Function Signature Object (PEP 362) ############################################################################### _WrapperDescriptor = type(type.__call__) _MethodWrapper = type(all.__call__) _ClassMethodWrapper = type(int.__dict__['from_bytes']) _NonUserDefinedCallables = (_WrapperDescriptor, _MethodWrapper, _ClassMethodWrapper, types.BuiltinFunctionType) "," Get the mapping of coroutine local variables to their current values. A dict is returned, with the keys the local variable names and values the bound values.",27,40,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def getcoroutinelocals(coroutine): frame = getattr(coroutine, ""cr_frame"", None) if frame is not None: return frame.f_locals else: return {} ############################################################################### ### Function Signature Object (PEP 362) ############################################################################### _WrapperDescriptor = type(type.__call__) _MethodWrapper = type(all.__call__) _ClassMethodWrapper = type(int.__dict__['from_bytes']) _NonUserDefinedCallables = (_WrapperDescriptor, _MethodWrapper, _ClassMethodWrapper, types.BuiltinFunctionType) ``` ###Assistant : Get the mapping of coroutine local variables to their current values. A dict is returned, with the keys the local variable names and values the bound values. " 2923,"def _forward_over_back_hessian(f, params, use_pfor, dtype=None): return _vectorize_parameters( functools.partial(_hvp, f, params), params, use_pfor=use_pfor, dtype=dtype, ) ","Computes the full Hessian matrix for the scalar-valued f(*params). Args: f: A function taking `params` and returning a scalar. params: A possibly nested structure of tensors. use_pfor: If true, uses `tf.vectorized_map` calls instead of looping. dtype: Required if `use_pfor=False`. A possibly nested structure of dtypes (e.g. `tf.float32`) matching the structure of `f`'s returns. Returns: A possibly nested structure of matrix slices corresponding to `params`. Each slice has shape [P, p_s] where `p_s` is the number of parameters (`tf.size`) in the corresponding element of `params` and `P` is the total number of parameters (`sum_s(p_s)`). The full matrix can be obtained by concatenating along the second axis. ",105,14,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _forward_over_back_hessian(f, params, use_pfor, dtype=None): return _vectorize_parameters( functools.partial(_hvp, f, params), params, use_pfor=use_pfor, dtype=dtype, ) ``` ###Assistant : Computes the full Hessian matrix for the scalar-valued f(*params). Args: f: A function taking `params` and returning a scalar. params: A possibly nested structure of tensors. use_pfor: If true, uses `tf.vectorized_map` calls instead of looping. dtype: Required if `use_pfor=False`. A possibly nested structure of dtypes (e.g. `tf.float32`) matching the structure of `f`'s returns. Returns: A possibly nested structure of matrix slices corresponding to `params`. Each slice has shape [P, p_s] where `p_s` is the number of parameters (`tf.size`) in the corresponding element of `params` and `P` is the total number of parameters (`sum_s(p_s)`). The full matrix can be obtained by concatenating along the second axis. " 2924,"def predict_proba(self, X): check_is_fitted(self) # TODO(1.3): Remove ""log"" if self.loss in (""log_loss"", ""log""): return self._predict_proba_lr(X) elif self.loss == ""modified_huber"": binary = len(self.classes_) == 2 scores = self.decision_function(X) if binary: prob2 = np.ones((scores.shape[0], 2)) prob = prob2[:, 1] else: prob = scores np.clip(scores, -1, 1, prob) prob += 1.0 prob /= 2.0 if binary: prob2[:, 0] -= prob prob = prob2 else: # the above might assign zero to all classes, which doesn't # normalize neatly; work around this to produce uniform # probabilities prob_sum = prob.sum(axis=1) all_zero = prob_sum == 0 if np.any(all_zero): prob[all_zero, :] = 1 prob_sum[all_zero] = len(self.classes_) # normalize prob /= prob_sum.reshape((prob.shape[0], -1)) return prob else: raise NotImplementedError( ""predict_(log_)proba only supported when"" "" loss='log_loss' or loss='modified_huber' "" ""(%r given)"" % self.loss ) ","Probability estimates. This method is only available for log loss and modified Huber loss. Multiclass probability estimates are derived from binary (one-vs.-rest) estimates by simple normalization, as recommended by Zadrozny and Elkan. Binary probability estimates for loss=""modified_huber"" are given by (clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions it is necessary to perform proper probability calibration by wrapping the classifier with :class:`~sklearn.calibration.CalibratedClassifierCV` instead. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data for prediction. Returns ------- ndarray of shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. References ---------- Zadrozny and Elkan, ""Transforming classifier scores into multiclass probability estimates"", SIGKDD'02, https://dl.acm.org/doi/pdf/10.1145/775047.775151 The justification for the formula in the loss=""modified_huber"" case is in the appendix B in: http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf ",138,125,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def predict_proba(self, X): check_is_fitted(self) # TODO(1.3): Remove ""log"" if self.loss in (""log_loss"", ""log""): return self._predict_proba_lr(X) elif self.loss == ""modified_huber"": binary = len(self.classes_) == 2 scores = self.decision_function(X) if binary: prob2 = np.ones((scores.shape[0], 2)) prob = prob2[:, 1] else: prob = scores np.clip(scores, -1, 1, prob) prob += 1.0 prob /= 2.0 if binary: prob2[:, 0] -= prob prob = prob2 else: # the above might assign zero to all classes, which doesn't # normalize neatly; work around this to produce uniform # probabilities prob_sum = prob.sum(axis=1) all_zero = prob_sum == 0 if np.any(all_zero): prob[all_zero, :] = 1 prob_sum[all_zero] = len(self.classes_) # normalize prob /= prob_sum.reshape((prob.shape[0], -1)) return prob else: raise NotImplementedError( ""predict_(log_)proba only supported when"" "" loss='log_loss' or loss='modified_huber' "" ""(%r given)"" % self.loss ) ``` ###Assistant : Probability estimates. This method is only available for log loss and modified Huber loss. Multiclass probability estimates are derived from binary (one-vs.-rest) estimates by simple normalization, as recommended by Zadrozny and Elkan. Binary probability estimates for loss=""modified_huber"" are given by (clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions it is necessary to perform proper probability calibration by wrapping the classifier with :class:`~sklearn.calibration.CalibratedClassifierCV` instead. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data for prediction. Returns ------- ndarray of shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. References ---------- Zadrozny and Elkan, ""Transforming classifier scores into multiclass probability estimates"", SIGKDD'02, https://dl.acm.org/doi/pdf/10.1145/775047.775151 The justification for the formula in the loss=""modified_huber"" case is in the appendix B in: http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf " 2925,"def fetch_jwks(jwks_url) -> Optional[dict]: response = None try: response = requests.get(jwks_url, timeout=REQUEST_TIMEOUT) response.raise_for_status() jwks = response.json() except requests.exceptions.RequestException: logger.exception(""Unable to fetch jwks from %s"", jwks_url) raise AuthenticationError(""Unable to finalize the authentication process."") except json.JSONDecodeError: content = response.content if response else ""Unable to find the response"" logger.exception( ""Unable to decode the response from auth service with jwks. "" ""Response: %s"", content, ) raise AuthenticationError(""Unable to finalize the authentication process."") keys = jwks.get(""keys"", []) if not keys: logger.warning(""List of JWKS keys is empty"") cache.set(JWKS_KEY, keys, JWKS_CACHE_TIME) return keys ","Fetch JSON Web Key Sets from a provider. Fetched keys will be stored in the cache to the reduced amount of possible requests. :raises AuthenticationError ",25,86,Python," ###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def fetch_jwks(jwks_url) -> Optional[dict]: response = None try: response = requests.get(jwks_url, timeout=REQUEST_TIMEOUT) response.raise_for_status() jwks = response.json() except requests.exceptions.RequestException: logger.exception(""Unable to fetch jwks from %s"", jwks_url) raise AuthenticationError(""Unable to finalize the authentication process."") except json.JSONDecodeError: content = response.content if response else ""Unable to find the response"" logger.exception( ""Unable to decode the response from auth service with jwks. "" ""Response: %s"", content, ) raise AuthenticationError(""Unable to finalize the authentication process."") keys = jwks.get(""keys"", []) if not keys: logger.warning(""List of JWKS keys is empty"") cache.set(JWKS_KEY, keys, JWKS_CACHE_TIME) return keys ``` ###Assistant : Fetch JSON Web Key Sets from a provider. Fetched keys will be stored in the cache to the reduced amount of possible requests. :raises AuthenticationError "