Unnamed: 0
int64
0
2.93k
code
stringlengths
101
62.2k
docs
stringlengths
51
10.7k
doc_len
int64
4
1.74k
words
int64
4
4.82k
lang
stringclasses
1 value
prompt
stringlengths
320
71.2k
500
def set_aspect(self, aspect, adjustable=None, anchor=None, share=False): if cbook._str_equal(aspect, 'equal'): aspect = 1 if not cbook._str_equal(aspect, 'auto'): aspect = float(aspect) # raise ValueError if necessary if aspect<0: raise ValueError("aspect must be positive") if share: axes = {sibling for name in self._axis_names for sibling in self._shared_axes[name].get_siblings(self)} else: axes = [self] for ax in axes: ax._aspect = aspect if adjustable is None: adjustable = self._adjustable self.set_adjustable(adjustable, share=share) # Handle sharing. if anchor is not None: self.set_anchor(anchor, share=share) self.stale = True
Set the aspect ratio of the axes scaling, i.e. y/x-scale. Parameters ---------- aspect : {'auto', 'equal'} or float Possible values: - 'auto': fill the position rectangle with data. - 'equal': same as ``aspect=1``, i.e. same scaling for x and y. - *float*: The displayed size of 1 unit in y-data coordinates will be *aspect* times the displayed size of 1 unit in x-data coordinates; e.g. for ``aspect=2`` a square in data coordinates will be rendered with a height of twice its width. adjustable : None or {'box', 'datalim'}, optional If not ``None``, this defines which parameter will be adjusted to meet the required aspect. See `.set_adjustable` for further details. anchor : None or str or (float, float), optional If not ``None``, this defines where the Axes will be drawn if there is extra space due to aspect constraints. The most common way to to specify the anchor are abbreviations of cardinal directions: ===== ===================== value description ===== ===================== 'C' centered 'SW' lower left corner 'S' middle of bottom edge 'SE' lower right corner etc. ===== ===================== See `~.Axes.set_anchor` for further details. share : bool, default: False If ``True``, apply the settings to all shared Axes. See Also -------- matplotlib.axes.Axes.set_adjustable Set how the Axes adjusts to achieve the required aspect ratio. matplotlib.axes.Axes.set_anchor Set the position in case of extra space.
219
77
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_aspect(self, aspect, adjustable=None, anchor=None, share=False): if cbook._str_equal(aspect, 'equal'): aspect = 1 if not cbook._str_equal(aspect, 'auto'): aspect = float(aspect) # raise ValueError if necessary if aspect<0: raise ValueError("aspect must be positive") if share: axes = {sibling for name in self._axis_names for sibling in self._shared_axes[name].get_siblings(self)} else: axes = [self] for ax in axes: ax._aspect = aspect if adjustable is None: adjustable = self._adjustable self.set_adjustable(adjustable, share=share) # Handle sharing. if anchor is not None: self.set_anchor(anchor, share=share) self.stale = True ``` ###Assistant : Set the aspect ratio of the axes scaling, i.e. y/x-scale. Parameters ---------- aspect : {'auto', 'equal'} or float Possible values: - 'auto': fill the position rectangle with data. - 'equal': same as ``aspect=1``, i.e. same scaling for x and y. - *float*: The displayed size of 1 unit in y-data coordinates will be *aspect* times the displayed size of 1 unit in x-data coordinates; e.g. for ``aspect=2`` a square in data coordinates will be rendered with a height of twice its width. adjustable : None or {'box', 'datalim'}, optional If not ``None``, this defines which parameter will be adjusted to meet the required aspect. See `.set_adjustable` for further details. anchor : None or str or (float, float), optional If not ``None``, this defines where the Axes will be drawn if there is extra space due to aspect constraints. The most common way to to specify the anchor are abbreviations of cardinal directions: ===== ===================== value description ===== ===================== 'C' centered 'SW' lower left corner 'S' middle of bottom edge 'SE' lower right corner etc. ===== ===================== See `~.Axes.set_anchor` for further details. share : bool, default: False If ``True``, apply the settings to all shared Axes. See Also -------- matplotlib.axes.Axes.set_adjustable Set how the Axes adjusts to achieve the required aspect ratio. matplotlib.axes.Axes.set_anchor Set the position in case of extra space.
501
def test_pk_attributes(self): # pk can be used as a substitute for the primary key. # The primary key can be accessed via the pk property on the model. e = Employee.objects.get(pk=123) self.assertEqual(e.pk, 123) # Or we can use the real attribute name for the primary key: self.assertEqual(e.employee_code, 123) with self.assertRaisesMessage( AttributeError, "'Employee' object has no attribute 'id'" ): e.id
pk and attribute name are available on the model No default id attribute is added
15
59
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_pk_attributes(self): # pk can be used as a substitute for the primary key. # The primary key can be accessed via the pk property on the model. e = Employee.objects.get(pk=123) self.assertEqual(e.pk, 123) # Or we can use the real attribute name for the primary key: self.assertEqual(e.employee_code, 123) with self.assertRaisesMessage( AttributeError, "'Employee' object has no attribute 'id'" ): e.id ``` ###Assistant : pk and attribute name are available on the model No default id attribute is added
502
async def __aenter__(self): if self._closed: # httpx.AsyncClient does not allow reuse so we will not either. raise RuntimeError( "The client cannot be started again after closing. " "Retrieve a new client with `get_client()` instead." ) if self._started: # httpx.AsyncClient does not allow reentrancy so we will not either. raise RuntimeError("The client cannot be started more than once.") await self._exit_stack.__aenter__() # Enter a lifespan context if using an ephemeral application. # See https://github.com/encode/httpx/issues/350 if self._ephemeral_app and self.manage_lifespan: self._ephemeral_lifespan = await self._exit_stack.enter_async_context( app_lifespan_context(self._ephemeral_app) ) # Enter the httpx client's context await self._exit_stack.enter_async_context(self._client) self._started = True return self
Start the client. If the client is already started, this will raise an exception. If the client is already closed, this will raise an exception. Use a new client instance instead.
31
95
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def __aenter__(self): if self._closed: # httpx.AsyncClient does not allow reuse so we will not either. raise RuntimeError( "The client cannot be started again after closing. " "Retrieve a new client with `get_client()` instead." ) if self._started: # httpx.AsyncClient does not allow reentrancy so we will not either. raise RuntimeError("The client cannot be started more than once.") await self._exit_stack.__aenter__() # Enter a lifespan context if using an ephemeral application. # See https://github.com/encode/httpx/issues/350 if self._ephemeral_app and self.manage_lifespan: self._ephemeral_lifespan = await self._exit_stack.enter_async_context( app_lifespan_context(self._ephemeral_app) ) # Enter the httpx client's context await self._exit_stack.enter_async_context(self._client) self._started = True return self ``` ###Assistant : Start the client. If the client is already started, this will raise an exception. If the client is already closed, this will raise an exception. Use a new client instance instead.
503
def training_iteration(self) -> ResultDict: # Sample n batches from n workers. new_sample_batches = synchronous_parallel_sample( worker_set=self.workers, concat=False ) for batch in new_sample_batches: # Update counters. self._counters[NUM_ENV_STEPS_SAMPLED] += batch.env_steps() self._counters[NUM_AGENT_STEPS_SAMPLED] += batch.agent_steps() # Store new samples in the replay buffer. self.local_replay_buffer.add(batch) # Sample n batches from replay buffer until the total number of timesteps # reaches `train_batch_size`. train_batch = sample_min_n_steps_from_buffer( replay_buffer=self.local_replay_buffer, min_steps=self.config["train_batch_size"], count_by_agent_steps=self._by_agent_steps, ) if train_batch is None: return {} # Learn on the training batch. # Use simple optimizer (only for multi-agent or tf-eager; all other # cases should use the multi-GPU optimizer, even if only using 1 GPU) if self.config.get("simple_optimizer") is True: train_results = train_one_step(self, train_batch) else: train_results = multi_gpu_train_one_step(self, train_batch) # TODO: Move training steps counter update outside of `train_one_step()` method. # # Update train step counters. # self._counters[NUM_ENV_STEPS_TRAINED] += train_batch.env_steps() # self._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps() # Update target network every `target_network_update_freq` steps. cur_ts = self._counters[NUM_ENV_STEPS_SAMPLED] last_update = self._counters[LAST_TARGET_UPDATE_TS] if cur_ts - last_update >= self.config["target_network_update_freq"]: to_update = self.workers.local_worker().get_policies_to_train() self.workers.local_worker().foreach_policy_to_train( lambda p, pid: pid in to_update and p.update_target() ) self._counters[NUM_TARGET_UPDATES] += 1 self._counters[LAST_TARGET_UPDATE_TS] = cur_ts # Update weights and global_vars - after learning on the local worker - on all # remote workers. global_vars = { "timestep": self._counters[NUM_ENV_STEPS_SAMPLED], } # Update remote workers' weights and global vars after learning on local worker. with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]: self.workers.sync_weights(global_vars=global_vars) # Return all collected metrics for the iteration. return train_results
QMIX training iteration function. - Sample n MultiAgentBatches from n workers synchronously. - Store new samples in the replay buffer. - Sample one training MultiAgentBatch from the replay buffer. - Learn on the training batch. - Update the target network every `target_network_update_freq` steps. - Return all collected training metrics for the iteration. Returns: The results dict from executing the training iteration.
61
224
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def training_iteration(self) -> ResultDict: # Sample n batches from n workers. new_sample_batches = synchronous_parallel_sample( worker_set=self.workers, concat=False ) for batch in new_sample_batches: # Update counters. self._counters[NUM_ENV_STEPS_SAMPLED] += batch.env_steps() self._counters[NUM_AGENT_STEPS_SAMPLED] += batch.agent_steps() # Store new samples in the replay buffer. self.local_replay_buffer.add(batch) # Sample n batches from replay buffer until the total number of timesteps # reaches `train_batch_size`. train_batch = sample_min_n_steps_from_buffer( replay_buffer=self.local_replay_buffer, min_steps=self.config["train_batch_size"], count_by_agent_steps=self._by_agent_steps, ) if train_batch is None: return {} # Learn on the training batch. # Use simple optimizer (only for multi-agent or tf-eager; all other # cases should use the multi-GPU optimizer, even if only using 1 GPU) if self.config.get("simple_optimizer") is True: train_results = train_one_step(self, train_batch) else: train_results = multi_gpu_train_one_step(self, train_batch) # TODO: Move training steps counter update outside of `train_one_step()` method. # # Update train step counters. # self._counters[NUM_ENV_STEPS_TRAINED] += train_batch.env_steps() # self._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps() # Update target network every `target_network_update_freq` steps. cur_ts = self._counters[NUM_ENV_STEPS_SAMPLED] last_update = self._counters[LAST_TARGET_UPDATE_TS] if cur_ts - last_update >= self.config["target_network_update_freq"]: to_update = self.workers.local_worker().get_policies_to_train() self.workers.local_worker().foreach_policy_to_train( lambda p, pid: pid in to_update and p.update_target() ) self._counters[NUM_TARGET_UPDATES] += 1 self._counters[LAST_TARGET_UPDATE_TS] = cur_ts # Update weights and global_vars - after learning on the local worker - on all # remote workers. global_vars = { "timestep": self._counters[NUM_ENV_STEPS_SAMPLED], } # Update remote workers' weights and global vars after learning on local worker. with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]: self.workers.sync_weights(global_vars=global_vars) # Return all collected metrics for the iteration. return train_results ``` ###Assistant : QMIX training iteration function. - Sample n MultiAgentBatches from n workers synchronously. - Store new samples in the replay buffer. - Sample one training MultiAgentBatch from the replay buffer. - Learn on the training batch. - Update the target network every `target_network_update_freq` steps. - Return all collected training metrics for the iteration. Returns: The results dict from executing the training iteration.
504
def _announce() -> None: current_version = get_package_version() tag_name = f"v{current_version}" click.echo( f ) if "rc" in tag_name: click.echo( ) else: click.echo( ) @cli.command() @click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
Generate markdown to announce the release. Hi everyone. Synapse {current_version} has just been released. [notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) | \ [docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \ [debs](https://packages.matrix.org/debian/) | \ [pypi](https://pypi.org/project/matrix-synapse/{current_version}/) Announce the RC in - #homeowners:matrix.org (Synapse Announcements) - #synapse-dev:matrix.org Announce the release in - #homeowners:matrix.org (Synapse Announcements), bumping the version in the topic - #synapse:matrix.org (Synapse Admins), bumping the version in the topic - #synapse-dev:matrix.org - #synapse-package-maintainers:matrix.org Ask the designated people to do the blog and tweets.
72
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _announce() -> None: current_version = get_package_version() tag_name = f"v{current_version}" click.echo( f ) if "rc" in tag_name: click.echo( ) else: click.echo( ) @cli.command() @click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True) ``` ###Assistant : Generate markdown to announce the release. Hi everyone. Synapse {current_version} has just been released. [notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) | \ [docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \ [debs](https://packages.matrix.org/debian/) | \ [pypi](https://pypi.org/project/matrix-synapse/{current_version}/) Announce the RC in - #homeowners:matrix.org (Synapse Announcements) - #synapse-dev:matrix.org Announce the release in - #homeowners:matrix.org (Synapse Announcements), bumping the version in the topic - #synapse:matrix.org (Synapse Admins), bumping the version in the topic - #synapse-dev:matrix.org - #synapse-package-maintainers:matrix.org Ask the designated people to do the blog and tweets.
505
def update(self, value=None, visible=None): if not self._widget_was_created(): # if widget hasn't been created yet, then don't allow return if value is not None: self._TKOut.output.delete('1.0', tk.END) self._TKOut.output.insert(tk.END, value) if visible is False: self._pack_forget_save_settings(self._TKOut.frame) elif visible is True: self._pack_restore_settings(self._TKOut.frame) if visible is not None: self._visible = visible
Changes some of the settings for the Output Element. Must call `Window.Read` or `Window.Finalize` prior Changes will not be visible in your window until you call window.read or window.refresh. If you change visibility, your element may MOVE. If you want it to remain stationary, use the "layout helper" function "pin" to ensure your element is "pinned" to that location in your layout so that it returns there when made visible. :param value: string that will replace current contents of the output area :type value: (str) :param visible: control visibility of element :type visible: (bool)
94
45
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update(self, value=None, visible=None): if not self._widget_was_created(): # if widget hasn't been created yet, then don't allow return if value is not None: self._TKOut.output.delete('1.0', tk.END) self._TKOut.output.insert(tk.END, value) if visible is False: self._pack_forget_save_settings(self._TKOut.frame) elif visible is True: self._pack_restore_settings(self._TKOut.frame) if visible is not None: self._visible = visible ``` ###Assistant : Changes some of the settings for the Output Element. Must call `Window.Read` or `Window.Finalize` prior Changes will not be visible in your window until you call window.read or window.refresh. If you change visibility, your element may MOVE. If you want it to remain stationary, use the "layout helper" function "pin" to ensure your element is "pinned" to that location in your layout so that it returns there when made visible. :param value: string that will replace current contents of the output area :type value: (str) :param visible: control visibility of element :type visible: (bool)
506
def example(self): result = getattr(self, "_example", None) if result is None: # No example batch was found, so get one from the `.train` dataset result = next(iter(self.train)) # And cache it for next time self._example = result return result
Get and cache an example batch of `inputs, labels` for plotting.
11
39
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def example(self): result = getattr(self, "_example", None) if result is None: # No example batch was found, so get one from the `.train` dataset result = next(iter(self.train)) # And cache it for next time self._example = result return result ``` ###Assistant : Get and cache an example batch of `inputs, labels` for plotting.
507
def unit_attname(cls, unit_str): lower = unit_str.lower() if unit_str in cls.UNITS: return unit_str elif lower in cls.UNITS: return lower elif lower in cls.LALIAS: return cls.LALIAS[lower] else: raise Exception( 'Could not find a unit keyword associated with "%s"' % unit_str )
Retrieve the unit attribute name for the given unit string. For example, if the given unit string is 'metre', return 'm'. Raise an exception if an attribute cannot be found.
30
39
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def unit_attname(cls, unit_str): lower = unit_str.lower() if unit_str in cls.UNITS: return unit_str elif lower in cls.UNITS: return lower elif lower in cls.LALIAS: return cls.LALIAS[lower] else: raise Exception( 'Could not find a unit keyword associated with "%s"' % unit_str ) ``` ###Assistant : Retrieve the unit attribute name for the given unit string. For example, if the given unit string is 'metre', return 'm'. Raise an exception if an attribute cannot be found.
508
def test_dont_import_tf_error(): # Do not import tf for testing purposes. os.environ["RLLIB_TEST_NO_TF_IMPORT"] = "1" config = ppo.PPOConfig().environment("CartPole-v1") for _ in framework_iterator(config, frameworks=("tf", "tf2")): with pytest.raises(ImportError, match="However, no installation was found"): config.build()
Check error being thrown, if tf not installed but configured.
10
30
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_dont_import_tf_error(): # Do not import tf for testing purposes. os.environ["RLLIB_TEST_NO_TF_IMPORT"] = "1" config = ppo.PPOConfig().environment("CartPole-v1") for _ in framework_iterator(config, frameworks=("tf", "tf2")): with pytest.raises(ImportError, match="However, no installation was found"): config.build() ``` ###Assistant : Check error being thrown, if tf not installed but configured.
509
def _current(self): if self._hmac: return self._hmac else: h = self._outer.copy() h.update(self._inner.digest()) return h
Return a hash object for the current state. To be used only internally with digest() and hexdigest().
17
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _current(self): if self._hmac: return self._hmac else: h = self._outer.copy() h.update(self._inner.digest()) return h ``` ###Assistant : Return a hash object for the current state. To be used only internally with digest() and hexdigest().
510
def delete_links_from_desktop_icons(report): desktop_icons = frappe.db.get_values("Desktop Icon", {"_report": report}, ["name"]) for desktop_icon in desktop_icons: frappe.delete_doc("Desktop Icon", desktop_icon[0])
Check for one or multiple Desktop Icons and delete
9
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def delete_links_from_desktop_icons(report): desktop_icons = frappe.db.get_values("Desktop Icon", {"_report": report}, ["name"]) for desktop_icon in desktop_icons: frappe.delete_doc("Desktop Icon", desktop_icon[0]) ``` ###Assistant : Check for one or multiple Desktop Icons and delete
511
def get_bboxes_list(end2end_result, structure_master_result): # end2end end2end_xyxy_list = [] end2end_xywh_list = [] for end2end_item in end2end_result: src_bbox = end2end_item['bbox'] end2end_xyxy_list.append(src_bbox) xywh_bbox = xyxy2xywh(src_bbox) end2end_xywh_list.append(xywh_bbox) end2end_xyxy_bboxes = np.array(end2end_xyxy_list) end2end_xywh_bboxes = np.array(end2end_xywh_list) # structure master src_bboxes = structure_master_result['bbox'] src_bboxes = remove_empty_bboxes(src_bboxes) # structure_master_xywh_bboxes = src_bboxes # xyxy_bboxes = xywh2xyxy(src_bboxes) # structure_master_xyxy_bboxes = xyxy_bboxes structure_master_xyxy_bboxes = src_bboxes xywh_bbox = xyxy2xywh(src_bboxes) structure_master_xywh_bboxes = xywh_bbox return end2end_xyxy_bboxes, end2end_xywh_bboxes, structure_master_xywh_bboxes, structure_master_xyxy_bboxes
This function is use to convert end2end results and structure master results to List of xyxy bbox format and List of xywh bbox format :param end2end_result: bbox's format is xyxy :param structure_master_result: bbox's format is xywh :return: 4 kind list of bbox ()
43
64
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_bboxes_list(end2end_result, structure_master_result): # end2end end2end_xyxy_list = [] end2end_xywh_list = [] for end2end_item in end2end_result: src_bbox = end2end_item['bbox'] end2end_xyxy_list.append(src_bbox) xywh_bbox = xyxy2xywh(src_bbox) end2end_xywh_list.append(xywh_bbox) end2end_xyxy_bboxes = np.array(end2end_xyxy_list) end2end_xywh_bboxes = np.array(end2end_xywh_list) # structure master src_bboxes = structure_master_result['bbox'] src_bboxes = remove_empty_bboxes(src_bboxes) # structure_master_xywh_bboxes = src_bboxes # xyxy_bboxes = xywh2xyxy(src_bboxes) # structure_master_xyxy_bboxes = xyxy_bboxes structure_master_xyxy_bboxes = src_bboxes xywh_bbox = xyxy2xywh(src_bboxes) structure_master_xywh_bboxes = xywh_bbox return end2end_xyxy_bboxes, end2end_xywh_bboxes, structure_master_xywh_bboxes, structure_master_xyxy_bboxes ``` ###Assistant : This function is use to convert end2end results and structure master results to List of xyxy bbox format and List of xywh bbox format :param end2end_result: bbox's format is xyxy :param structure_master_result: bbox's format is xywh :return: 4 kind list of bbox ()
512
def autoname_elements() -> None: for name, var in sys._getframe().f_back.f_locals.items(): if isinstance(var, ParserElement) and not var.customName: var.set_name(name) dbl_quoted_string = Combine( Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' ).set_name("string enclosed in double quotes") sgl_quoted_string = Combine( Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" ).set_name("string enclosed in single quotes") quoted_string = Combine( Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" ).set_name("quotedString using single or double quotes") unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal") alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") # build list of built-in expressions, for future reference if a global default value # gets updated _builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)] # backward compatibility names tokenMap = token_map conditionAsParseAction = condition_as_parse_action nullDebugAction = null_debug_action sglQuotedString = sgl_quoted_string dblQuotedString = dbl_quoted_string quotedString = quoted_string unicodeString = unicode_string lineStart = line_start lineEnd = line_end stringStart = string_start stringEnd = string_end traceParseAction = trace_parse_action
Utility to simplify mass-naming of parser elements, for generating railroad diagram with named subdiagrams.
14
134
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def autoname_elements() -> None: for name, var in sys._getframe().f_back.f_locals.items(): if isinstance(var, ParserElement) and not var.customName: var.set_name(name) dbl_quoted_string = Combine( Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' ).set_name("string enclosed in double quotes") sgl_quoted_string = Combine( Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" ).set_name("string enclosed in single quotes") quoted_string = Combine( Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" ).set_name("quotedString using single or double quotes") unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal") alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") # build list of built-in expressions, for future reference if a global default value # gets updated _builtin_exprs = [v for v in vars().values() if isinstance(v, ParserElement)] # backward compatibility names tokenMap = token_map conditionAsParseAction = condition_as_parse_action nullDebugAction = null_debug_action sglQuotedString = sgl_quoted_string dblQuotedString = dbl_quoted_string quotedString = quoted_string unicodeString = unicode_string lineStart = line_start lineEnd = line_end stringStart = string_start stringEnd = string_end traceParseAction = trace_parse_action ``` ###Assistant : Utility to simplify mass-naming of parser elements, for generating railroad diagram with named subdiagrams.
513
def test_valid_zero_ops_doesnt_require_backend_dispatch_key(self) -> None: yaml_str = # External codegen on a yaml file with no operators is effectively a no-op, # so there's no reason to parse the backend self.assert_success_from_gen_backend_stubs(yaml_str)
\ backend: BAD_XLA cpp_namespace: torch_xla supported:
6
30
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_valid_zero_ops_doesnt_require_backend_dispatch_key(self) -> None: yaml_str = # External codegen on a yaml file with no operators is effectively a no-op, # so there's no reason to parse the backend self.assert_success_from_gen_backend_stubs(yaml_str) ``` ###Assistant : \ backend: BAD_XLA cpp_namespace: torch_xla supported:
514
async def test_pseudo_remote_peas_topologies(gateway, head, worker): worker_port = random_port() head_port = random_port() port_expose = random_port() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' if head == 'remote': pods_addresses = f'{{"pod0": ["{HOST}:{head_port}"]}}' else: pods_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}' # create a single head pea head_pea = _create_head_pea(head, head_port) # create a single worker pea worker_pea = _create_worker_pea(worker, worker_port) # create a single gateway pea gateway_pea = _create_gateway_pea( gateway, graph_description, pods_addresses, port_expose ) with gateway_pea, worker_pea, head_pea: await asyncio.sleep(1.0) # this would be done by the Pod, its adding the worker to the head activate_msg = ControlRequest(command='ACTIVATE') worker_host, worker_port = worker_pea.runtime_ctrl_address.split(':') if head == 'remote': worker_host = __docker_host__ activate_msg.add_related_entity('worker', worker_host, int(worker_port)) assert GrpcConnectionPool.send_request_sync( activate_msg, head_pea.runtime_ctrl_address ) # send requests to the gateway c = Client(host='127.0.0.1', port=port_expose, asyncio=True) responses = c.post( '/', inputs=async_inputs, request_size=1, return_results=True ) response_list = []
g(l)-h(l)-w(l) - works g(l)-h(l)-w(r) - works - head connects to worker via localhost g(l)-h(r)-w(r) - works - head (inside docker) connects to worker via dockerhost g(l)-h(r)-w(l) - doesn't work remote head need remote worker g(r)-... - doesn't work, as distributed parser not enabled for gateway After any 1 failure, segfault
50
132
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_pseudo_remote_peas_topologies(gateway, head, worker): worker_port = random_port() head_port = random_port() port_expose = random_port() graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}' if head == 'remote': pods_addresses = f'{{"pod0": ["{HOST}:{head_port}"]}}' else: pods_addresses = f'{{"pod0": ["0.0.0.0:{head_port}"]}}' # create a single head pea head_pea = _create_head_pea(head, head_port) # create a single worker pea worker_pea = _create_worker_pea(worker, worker_port) # create a single gateway pea gateway_pea = _create_gateway_pea( gateway, graph_description, pods_addresses, port_expose ) with gateway_pea, worker_pea, head_pea: await asyncio.sleep(1.0) # this would be done by the Pod, its adding the worker to the head activate_msg = ControlRequest(command='ACTIVATE') worker_host, worker_port = worker_pea.runtime_ctrl_address.split(':') if head == 'remote': worker_host = __docker_host__ activate_msg.add_related_entity('worker', worker_host, int(worker_port)) assert GrpcConnectionPool.send_request_sync( activate_msg, head_pea.runtime_ctrl_address ) # send requests to the gateway c = Client(host='127.0.0.1', port=port_expose, asyncio=True) responses = c.post( '/', inputs=async_inputs, request_size=1, return_results=True ) response_list = [] ``` ###Assistant : g(l)-h(l)-w(l) - works g(l)-h(l)-w(r) - works - head connects to worker via localhost g(l)-h(r)-w(r) - works - head (inside docker) connects to worker via dockerhost g(l)-h(r)-w(l) - doesn't work remote head need remote worker g(r)-... - doesn't work, as distributed parser not enabled for gateway After any 1 failure, segfault
515
async def async_wait_start_success(self): import asyncio _timeout = self.args.timeout_ready if _timeout <= 0: _timeout = None else: _timeout /= 1e3 timeout_ns = 1e9 * _timeout if _timeout else None now = time.time_ns() while timeout_ns is None or time.time_ns() - now < timeout_ns: if self.ready_or_shutdown.event.is_set(): self._check_failed_to_start() self.logger.debug(__ready_msg__) return else: await asyncio.sleep(0.1) self._fail_start_timeout(_timeout)
Wait for the `Pea` to start successfully in a non-blocking manner
11
50
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_wait_start_success(self): import asyncio _timeout = self.args.timeout_ready if _timeout <= 0: _timeout = None else: _timeout /= 1e3 timeout_ns = 1e9 * _timeout if _timeout else None now = time.time_ns() while timeout_ns is None or time.time_ns() - now < timeout_ns: if self.ready_or_shutdown.event.is_set(): self._check_failed_to_start() self.logger.debug(__ready_msg__) return else: await asyncio.sleep(0.1) self._fail_start_timeout(_timeout) ``` ###Assistant : Wait for the `Pea` to start successfully in a non-blocking manner
516
def set_fontsize(self, s=None): if s is None: s = mpl.rcParams["legend.fontsize"] self.prop = FontProperties(size=s) self.stale = True
Set the fontsize in points. If *s* is not given, reset to :rc:`legend.fontsize`.
13
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_fontsize(self, s=None): if s is None: s = mpl.rcParams["legend.fontsize"] self.prop = FontProperties(size=s) self.stale = True ``` ###Assistant : Set the fontsize in points. If *s* is not given, reset to :rc:`legend.fontsize`.
517
def get_next(self, timeout=None): if not self.has_next(): raise StopIteration("No more results to get") if self._next_return_index >= self._next_task_index: raise ValueError( "It is not allowed to call get_next() after " "get_next_unordered()." ) future = self._index_to_future[self._next_return_index] if timeout is not None: res, _ = ray.wait([future], timeout=timeout) if not res: raise TimeoutError("Timed out waiting for result") del self._index_to_future[self._next_return_index] self._next_return_index += 1 future_key = tuple(future) if isinstance(future, list) else future i, a = self._future_to_actor.pop(future_key) self._return_actor(a) return ray.get(future)
Returns the next pending result in order. This returns the next result produced by submit(), blocking for up to the specified timeout until it is available. Returns: The next result. Raises: TimeoutError if the timeout is reached. Examples: >>> pool = ActorPool(...) >>> pool.submit(lambda a, v: a.double.remote(v), 1) >>> print(pool.get_next()) 2
51
71
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_next(self, timeout=None): if not self.has_next(): raise StopIteration("No more results to get") if self._next_return_index >= self._next_task_index: raise ValueError( "It is not allowed to call get_next() after " "get_next_unordered()." ) future = self._index_to_future[self._next_return_index] if timeout is not None: res, _ = ray.wait([future], timeout=timeout) if not res: raise TimeoutError("Timed out waiting for result") del self._index_to_future[self._next_return_index] self._next_return_index += 1 future_key = tuple(future) if isinstance(future, list) else future i, a = self._future_to_actor.pop(future_key) self._return_actor(a) return ray.get(future) ``` ###Assistant : Returns the next pending result in order. This returns the next result produced by submit(), blocking for up to the specified timeout until it is available. Returns: The next result. Raises: TimeoutError if the timeout is reached. Examples: >>> pool = ActorPool(...) >>> pool.submit(lambda a, v: a.double.remote(v), 1) >>> print(pool.get_next()) 2
518
def expand_egg_links(self) -> None: prefixes = [ Path(prefix) for prefix in self.base_paths["libdirs"].split(os.pathsep) if vistir.path.is_in_path(prefix, self.prefix.as_posix()) ] for loc in prefixes: if not loc.exists(): continue for pth in loc.iterdir(): if not pth.suffix == ".egg-link": continue contents = [ vistir.path.normalize_path(line.strip()) for line in pth.read_text().splitlines() ] pth.write_text("\n".join(contents))
Expand paths specified in egg-link files to prevent pip errors during reinstall
12
44
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def expand_egg_links(self) -> None: prefixes = [ Path(prefix) for prefix in self.base_paths["libdirs"].split(os.pathsep) if vistir.path.is_in_path(prefix, self.prefix.as_posix()) ] for loc in prefixes: if not loc.exists(): continue for pth in loc.iterdir(): if not pth.suffix == ".egg-link": continue contents = [ vistir.path.normalize_path(line.strip()) for line in pth.read_text().splitlines() ] pth.write_text("\n".join(contents)) ``` ###Assistant : Expand paths specified in egg-link files to prevent pip errors during reinstall
519
def test_page_with_og(self) -> None: html = b parser = OpenGraphParser(html, "text/html; charset=UTF-8") result = parser.extract_data() self.assertEqual(result.title, "The Rock") self.assertEqual(result.description, "The Rock film")
<html> <head> <meta property="og:title" content="The Rock" /> <meta property="og:type" content="video.movie" /> <meta property="og:url" content="http://www.imdb.com/title/tt0117500/" /> <meta property="og:image" content="http://ia.media-imdb.com/images/rock.jpg" /> <meta property="og:description" content="The Rock film" /> </head> </html>
27
22
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_page_with_og(self) -> None: html = b parser = OpenGraphParser(html, "text/html; charset=UTF-8") result = parser.extract_data() self.assertEqual(result.title, "The Rock") self.assertEqual(result.description, "The Rock film") ``` ###Assistant : <html> <head> <meta property="og:title" content="The Rock" /> <meta property="og:type" content="video.movie" /> <meta property="og:url" content="http://www.imdb.com/title/tt0117500/" /> <meta property="og:image" content="http://ia.media-imdb.com/images/rock.jpg" /> <meta property="og:description" content="The Rock film" /> </head> </html>
520
def is_rational_function(self, *syms): if self in _illegal: return False if syms: syms = set(map(sympify, syms)) else: syms = self.free_symbols if not syms: return True return self._eval_is_rational_function(syms)
Test whether function is a ratio of two polynomials in the given symbols, syms. When syms is not given, all free symbols will be used. The rational function does not have to be in expanded or in any kind of canonical form. This function returns False for expressions that are "rational functions" with symbolic exponents. Thus, you should be able to call .as_numer_denom() and apply polynomial algorithms to the result for expressions for which this returns True. This is not part of the assumptions system. You cannot do Symbol('z', rational_function=True). Examples ======== >>> from sympy import Symbol, sin >>> from sympy.abc import x, y >>> (x/y).is_rational_function() True >>> (x**2).is_rational_function() True >>> (x/sin(y)).is_rational_function(y) False >>> n = Symbol('n', integer=True) >>> (x**n + 1).is_rational_function(x) False This function does not attempt any nontrivial simplifications that may result in an expression that does not appear to be a rational function to become one. >>> from sympy import sqrt, factor >>> y = Symbol('y', positive=True) >>> a = sqrt(y**2 + 2*y + 1)/y >>> a.is_rational_function(y) False >>> factor(a) (y + 1)/y >>> factor(a).is_rational_function(y) True See also is_algebraic_expr().
182
26
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_rational_function(self, *syms): if self in _illegal: return False if syms: syms = set(map(sympify, syms)) else: syms = self.free_symbols if not syms: return True return self._eval_is_rational_function(syms) ``` ###Assistant : Test whether function is a ratio of two polynomials in the given symbols, syms. When syms is not given, all free symbols will be used. The rational function does not have to be in expanded or in any kind of canonical form. This function returns False for expressions that are "rational functions" with symbolic exponents. Thus, you should be able to call .as_numer_denom() and apply polynomial algorithms to the result for expressions for which this returns True. This is not part of the assumptions system. You cannot do Symbol('z', rational_function=True). Examples ======== >>> from sympy import Symbol, sin >>> from sympy.abc import x, y >>> (x/y).is_rational_function() True >>> (x**2).is_rational_function() True >>> (x/sin(y)).is_rational_function(y) False >>> n = Symbol('n', integer=True) >>> (x**n + 1).is_rational_function(x) False This function does not attempt any nontrivial simplifications that may result in an expression that does not appear to be a rational function to become one. >>> from sympy import sqrt, factor >>> y = Symbol('y', positive=True) >>> a = sqrt(y**2 + 2*y + 1)/y >>> a.is_rational_function(y) False >>> factor(a) (y + 1)/y >>> factor(a).is_rational_function(y) True See also is_algebraic_expr().
521
def reduce_alg_num(self, a): elt = self.ZK.parent.element_from_alg_num(a) red = self.reduce_element(elt) return a.field_element(list(reversed(red.QQ_col.flat())))
Reduce an :py:class:`~.AlgebraicNumber` to a "small representative" modulo this prime ideal. Parameters ========== elt : :py:class:`~.AlgebraicNumber` The element to be reduced. Returns ======= :py:class:`~.AlgebraicNumber` The reduced element. See Also ======== reduce_element reduce_ANP .Submodule.reduce_element
33
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def reduce_alg_num(self, a): elt = self.ZK.parent.element_from_alg_num(a) red = self.reduce_element(elt) return a.field_element(list(reversed(red.QQ_col.flat()))) ``` ###Assistant : Reduce an :py:class:`~.AlgebraicNumber` to a "small representative" modulo this prime ideal. Parameters ========== elt : :py:class:`~.AlgebraicNumber` The element to be reduced. Returns ======= :py:class:`~.AlgebraicNumber` The reduced element. See Also ======== reduce_element reduce_ANP .Submodule.reduce_element
522
def _auto_joiner(self, short_results, input_mapping, is_dict=False): concat_results = [] elem_type = {} if is_dict else [] for k, vs in input_mapping.items(): single_results = elem_type for v in vs: if len(single_results) == 0: single_results = short_results[v] elif isinstance(elem_type, list): single_results.extend(short_results[v]) elif isinstance(elem_type, dict): for sk in single_results.keys(): if isinstance(single_results[sk], str): single_results[sk] += short_results[v][sk] else: single_results[sk].extend(short_results[v][sk]) else: raise ValueError( "Invalid element type, the type of results " "for each element should be list of dict, " "but {} received.".format(type(single_results))) concat_results.append(single_results) return concat_results
Join the short results automatically and generate the final results to match with the user inputs. Args: short_results (List[dict] / List[List[str]] / List[str]): input raw texts. input_mapping (dict): cutting length. is_dict (bool): whether the element type is dict, default to False. return: short_input_texts (List[str]): the short input texts for model inference.
51
79
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _auto_joiner(self, short_results, input_mapping, is_dict=False): concat_results = [] elem_type = {} if is_dict else [] for k, vs in input_mapping.items(): single_results = elem_type for v in vs: if len(single_results) == 0: single_results = short_results[v] elif isinstance(elem_type, list): single_results.extend(short_results[v]) elif isinstance(elem_type, dict): for sk in single_results.keys(): if isinstance(single_results[sk], str): single_results[sk] += short_results[v][sk] else: single_results[sk].extend(short_results[v][sk]) else: raise ValueError( "Invalid element type, the type of results " "for each element should be list of dict, " "but {} received.".format(type(single_results))) concat_results.append(single_results) return concat_results ``` ###Assistant : Join the short results automatically and generate the final results to match with the user inputs. Args: short_results (List[dict] / List[List[str]] / List[str]): input raw texts. input_mapping (dict): cutting length. is_dict (bool): whether the element type is dict, default to False. return: short_input_texts (List[str]): the short input texts for model inference.
523
def get_variable_names_from_ckpt(path_ckpt, use_ema=True): v_all = tf.train.list_variables(path_ckpt) # keep name only v_name_all = [x[0] for x in v_all] if use_ema: v_name_all = [x for x in v_name_all if "ExponentialMovingAverage" in x] else: v_name_all = [ x for x in v_name_all if "ExponentialMovingAverage" not in x ] # remove util variables used for RMSprop v_name_all = [x for x in v_name_all if "RMS" not in x] return v_name_all
Get list of tensor names from checkpoint. Args: path_ckpt: str, path to the ckpt files use_ema: Bool, whether to use ExponentialMovingAverage result or not. Returns: List of variable names from checkpoint.
31
66
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_variable_names_from_ckpt(path_ckpt, use_ema=True): v_all = tf.train.list_variables(path_ckpt) # keep name only v_name_all = [x[0] for x in v_all] if use_ema: v_name_all = [x for x in v_name_all if "ExponentialMovingAverage" in x] else: v_name_all = [ x for x in v_name_all if "ExponentialMovingAverage" not in x ] # remove util variables used for RMSprop v_name_all = [x for x in v_name_all if "RMS" not in x] return v_name_all ``` ###Assistant : Get list of tensor names from checkpoint. Args: path_ckpt: str, path to the ckpt files use_ema: Bool, whether to use ExponentialMovingAverage result or not. Returns: List of variable names from checkpoint.
524
def ode_order(expr, func): a = Wild('a', exclude=[func]) if expr.match(a): return 0 if isinstance(expr, Derivative): if expr.args[0] == func: return len(expr.variables) else: return max(ode_order(arg, func) for arg in expr.args[0].args) + len(expr.variables) else: return max(ode_order(arg, func) for arg in expr.args)
Returns the order of a given differential equation with respect to func. This function is implemented recursively. Examples ======== >>> from sympy import Function >>> from sympy.solvers.deutils import ode_order >>> from sympy.abc import x >>> f, g = map(Function, ['f', 'g']) >>> ode_order(f(x).diff(x, 2) + f(x).diff(x)**2 + ... f(x).diff(x), f(x)) 2 >>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), f(x)) 2 >>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), g(x)) 3
67
38
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ode_order(expr, func): a = Wild('a', exclude=[func]) if expr.match(a): return 0 if isinstance(expr, Derivative): if expr.args[0] == func: return len(expr.variables) else: return max(ode_order(arg, func) for arg in expr.args[0].args) + len(expr.variables) else: return max(ode_order(arg, func) for arg in expr.args) ``` ###Assistant : Returns the order of a given differential equation with respect to func. This function is implemented recursively. Examples ======== >>> from sympy import Function >>> from sympy.solvers.deutils import ode_order >>> from sympy.abc import x >>> f, g = map(Function, ['f', 'g']) >>> ode_order(f(x).diff(x, 2) + f(x).diff(x)**2 + ... f(x).diff(x), f(x)) 2 >>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), f(x)) 2 >>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), g(x)) 3
525
async def async_send_message(self, message, **kwargs): nextcord.VoiceClient.warn_nacl = False discord_bot = nextcord.Client() images = None embedding = None if ATTR_TARGET not in kwargs: _LOGGER.error("No target specified") return None data = kwargs.get(ATTR_DATA) or {} embeds: list[nextcord.Embed] = [] if ATTR_EMBED in data: embedding = data[ATTR_EMBED] fields = embedding.get(ATTR_EMBED_FIELDS) or [] if embedding: embed = nextcord.Embed(**embedding) for field in fields: embed.add_field(**field) if ATTR_EMBED_FOOTER in embedding: embed.set_footer(**embedding[ATTR_EMBED_FOOTER]) if ATTR_EMBED_AUTHOR in embedding: embed.set_author(**embedding[ATTR_EMBED_AUTHOR]) if ATTR_EMBED_THUMBNAIL in embedding: embed.set_thumbnail(**embedding[ATTR_EMBED_THUMBNAIL]) embeds.append(embed) if ATTR_IMAGES in data: images = [] for image in data.get(ATTR_IMAGES, []): image_exists = await self.hass.async_add_executor_job( self.file_exists, image ) if image_exists: images.append(image) else: _LOGGER.warning("Image not found: %s", image) await discord_bot.login(self.token) try: for channelid in kwargs[ATTR_TARGET]: channelid = int(channelid) try: channel = await discord_bot.fetch_channel(channelid) except nextcord.NotFound: try: channel = await discord_bot.fetch_user(channelid) except nextcord.NotFound: _LOGGER.warning("Channel not found for ID: %s", channelid) continue # Must create new instances of File for each channel. files = [nextcord.File(image) for image in images] if images else [] await channel.send(message, files=files, embeds=embeds) except (nextcord.HTTPException, nextcord.NotFound) as error: _LOGGER.warning("Communication error: %s", error) await discord_bot.close()
Login to Discord, send message to channel(s) and log out.
10
170
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_send_message(self, message, **kwargs): nextcord.VoiceClient.warn_nacl = False discord_bot = nextcord.Client() images = None embedding = None if ATTR_TARGET not in kwargs: _LOGGER.error("No target specified") return None data = kwargs.get(ATTR_DATA) or {} embeds: list[nextcord.Embed] = [] if ATTR_EMBED in data: embedding = data[ATTR_EMBED] fields = embedding.get(ATTR_EMBED_FIELDS) or [] if embedding: embed = nextcord.Embed(**embedding) for field in fields: embed.add_field(**field) if ATTR_EMBED_FOOTER in embedding: embed.set_footer(**embedding[ATTR_EMBED_FOOTER]) if ATTR_EMBED_AUTHOR in embedding: embed.set_author(**embedding[ATTR_EMBED_AUTHOR]) if ATTR_EMBED_THUMBNAIL in embedding: embed.set_thumbnail(**embedding[ATTR_EMBED_THUMBNAIL]) embeds.append(embed) if ATTR_IMAGES in data: images = [] for image in data.get(ATTR_IMAGES, []): image_exists = await self.hass.async_add_executor_job( self.file_exists, image ) if image_exists: images.append(image) else: _LOGGER.warning("Image not found: %s", image) await discord_bot.login(self.token) try: for channelid in kwargs[ATTR_TARGET]: channelid = int(channelid) try: channel = await discord_bot.fetch_channel(channelid) except nextcord.NotFound: try: channel = await discord_bot.fetch_user(channelid) except nextcord.NotFound: _LOGGER.warning("Channel not found for ID: %s", channelid) continue # Must create new instances of File for each channel. files = [nextcord.File(image) for image in images] if images else [] await channel.send(message, files=files, embeds=embeds) except (nextcord.HTTPException, nextcord.NotFound) as error: _LOGGER.warning("Communication error: %s", error) await discord_bot.close() ``` ###Assistant : Login to Discord, send message to channel(s) and log out.
526
def rand_series_with_duplicate_datetimeindex() -> Series: dates = [ datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 3), datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 4), datetime(2000, 1, 4), datetime(2000, 1, 5), ] return Series(np.random.randn(len(dates)), index=dates) # ---------------------------------------------------------------- # Scalars # ---------------------------------------------------------------- @pytest.fixture( params=[ ( Interval(left=0, right=5, inclusive="right"), IntervalDtype("int64", inclusive="right"), ), ( Interval(left=0.1, right=0.5, inclusive="right"), IntervalDtype("float64", inclusive="right"), ), (Period("2012-01", freq="M"), "period[M]"), (Period("2012-02-01", freq="D"), "period[D]"), ( Timestamp("2011-01-01", tz="US/Eastern"), DatetimeTZDtype(tz="US/Eastern"), ), (Timedelta(seconds=500), "timedelta64[ns]"), ] )
Fixture for Series with a DatetimeIndex that has duplicates.
9
78
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rand_series_with_duplicate_datetimeindex() -> Series: dates = [ datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 3), datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 4), datetime(2000, 1, 4), datetime(2000, 1, 5), ] return Series(np.random.randn(len(dates)), index=dates) # ---------------------------------------------------------------- # Scalars # ---------------------------------------------------------------- @pytest.fixture( params=[ ( Interval(left=0, right=5, inclusive="right"), IntervalDtype("int64", inclusive="right"), ), ( Interval(left=0.1, right=0.5, inclusive="right"), IntervalDtype("float64", inclusive="right"), ), (Period("2012-01", freq="M"), "period[M]"), (Period("2012-02-01", freq="D"), "period[D]"), ( Timestamp("2011-01-01", tz="US/Eastern"), DatetimeTZDtype(tz="US/Eastern"), ), (Timedelta(seconds=500), "timedelta64[ns]"), ] ) ``` ###Assistant : Fixture for Series with a DatetimeIndex that has duplicates.
527
def get_major_minor_version(): # type: () -> str return "{}.{}".format(*sys.version_info)
Return the major-minor version of the current Python as a string, e.g. "3.7" or "3.10".
15
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_major_minor_version(): # type: () -> str return "{}.{}".format(*sys.version_info) ``` ###Assistant : Return the major-minor version of the current Python as a string, e.g. "3.7" or "3.10".
528
def test_ssd_head_loss(self): s = 300 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, }] cfg = Config( dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0., ignore_iof_thr=-1, gt_max_assign_all=False), sampler=dict(type='PseudoSampler'), smoothl1_beta=1., allowed_border=-1, pos_weight=-1, neg_pos_ratio=3, debug=False)) ssd_head = SSDHead( num_classes=4, in_channels=(1, 1, 1, 1, 1, 1), stacked_convs=1, feat_channels=1, use_depthwise=True, anchor_generator=dict( type='SSDAnchorGenerator', scale_major=False, input_size=s, basesize_ratio_range=(0.15, 0.9), strides=[8, 16, 32, 64, 100, 300], ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), train_cfg=cfg) # SSD head expects a multiple levels of features per image feats = ( torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0])) for stride in ssd_head.prior_generator.strides) cls_scores, bbox_preds = ssd_head.forward(feats) # Test that empty ground truth encourages the network to # predict background gt_instances = InstanceData() gt_instances.bboxes = torch.empty((0, 4)) gt_instances.labels = torch.LongTensor([]) empty_gt_losses = ssd_head.loss(cls_scores, bbox_preds, [gt_instances], img_metas) # When there is no truth, cls_loss and box_loss should all be zero. empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) self.assertEqual( empty_cls_loss.item(), 0, 'there should be no cls loss when there are no true boxes') self.assertEqual( empty_box_loss.item(), 0, 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss # should be nonzero for random inputs gt_instances = InstanceData() gt_instances.bboxes = torch.Tensor( [[23.6667, 23.8757, 238.6326, 151.8874]]) gt_instances.labels = torch.LongTensor([2]) one_gt_losses = ssd_head.loss(cls_scores, bbox_preds, [gt_instances], img_metas) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_loss = sum(one_gt_losses['loss_bbox']) self.assertGreater(onegt_cls_loss.item(), 0, 'cls loss should be non-zero') self.assertGreater(onegt_box_loss.item(), 0, 'box loss should be non-zero')
Tests ssd head loss when truth is empty and non-empty.
10
232
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_ssd_head_loss(self): s = 300 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, }] cfg = Config( dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0., ignore_iof_thr=-1, gt_max_assign_all=False), sampler=dict(type='PseudoSampler'), smoothl1_beta=1., allowed_border=-1, pos_weight=-1, neg_pos_ratio=3, debug=False)) ssd_head = SSDHead( num_classes=4, in_channels=(1, 1, 1, 1, 1, 1), stacked_convs=1, feat_channels=1, use_depthwise=True, anchor_generator=dict( type='SSDAnchorGenerator', scale_major=False, input_size=s, basesize_ratio_range=(0.15, 0.9), strides=[8, 16, 32, 64, 100, 300], ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), train_cfg=cfg) # SSD head expects a multiple levels of features per image feats = ( torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0])) for stride in ssd_head.prior_generator.strides) cls_scores, bbox_preds = ssd_head.forward(feats) # Test that empty ground truth encourages the network to # predict background gt_instances = InstanceData() gt_instances.bboxes = torch.empty((0, 4)) gt_instances.labels = torch.LongTensor([]) empty_gt_losses = ssd_head.loss(cls_scores, bbox_preds, [gt_instances], img_metas) # When there is no truth, cls_loss and box_loss should all be zero. empty_cls_loss = sum(empty_gt_losses['loss_cls']) empty_box_loss = sum(empty_gt_losses['loss_bbox']) self.assertEqual( empty_cls_loss.item(), 0, 'there should be no cls loss when there are no true boxes') self.assertEqual( empty_box_loss.item(), 0, 'there should be no box loss when there are no true boxes') # When truth is non-empty then both cls and box loss # should be nonzero for random inputs gt_instances = InstanceData() gt_instances.bboxes = torch.Tensor( [[23.6667, 23.8757, 238.6326, 151.8874]]) gt_instances.labels = torch.LongTensor([2]) one_gt_losses = ssd_head.loss(cls_scores, bbox_preds, [gt_instances], img_metas) onegt_cls_loss = sum(one_gt_losses['loss_cls']) onegt_box_loss = sum(one_gt_losses['loss_bbox']) self.assertGreater(onegt_cls_loss.item(), 0, 'cls loss should be non-zero') self.assertGreater(onegt_box_loss.item(), 0, 'box loss should be non-zero') ``` ###Assistant : Tests ssd head loss when truth is empty and non-empty.
529
def _legacy_check(self) -> None: if self._min_size > 0 or self._arguments.extract_every_n != 1: logger.warning("This alignments file was generated with the legacy extraction method.") logger.warning("You should run this extraction job, but with 'min_size' set to 0 and " "'extract-every-n' set to 1 to update the alignments file.") logger.warning("You can then re-run this extraction job with your chosen options.") sys.exit(0) maskers = ["components", "extended"] nn_masks = [mask for mask in list(self._alignments.mask_summary) if mask not in maskers] logtype = logger.warning if nn_masks else logger.info logtype("This alignments file was created with the legacy extraction method and will be " "updated.") logtype("Faces will be extracted using the new method and landmarks based masks will be " "regenerated.") if nn_masks: logtype("However, the NN based masks '%s' will be cropped to the legacy extraction " "method, so you may want to run the mask tool to regenerate these " "masks.", "', '".join(nn_masks)) self._mask_pipeline = Extractor(None, None, maskers, multiprocess=True) self._mask_pipeline.launch() # Update alignments versioning self._alignments._version = _VERSION # pylint:disable=protected-access
Check whether the alignments file was created with the legacy extraction method. If so, force user to re-extract all faces if any options have been specified, otherwise raise the appropriate warnings and set the legacy options.
36
160
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _legacy_check(self) -> None: if self._min_size > 0 or self._arguments.extract_every_n != 1: logger.warning("This alignments file was generated with the legacy extraction method.") logger.warning("You should run this extraction job, but with 'min_size' set to 0 and " "'extract-every-n' set to 1 to update the alignments file.") logger.warning("You can then re-run this extraction job with your chosen options.") sys.exit(0) maskers = ["components", "extended"] nn_masks = [mask for mask in list(self._alignments.mask_summary) if mask not in maskers] logtype = logger.warning if nn_masks else logger.info logtype("This alignments file was created with the legacy extraction method and will be " "updated.") logtype("Faces will be extracted using the new method and landmarks based masks will be " "regenerated.") if nn_masks: logtype("However, the NN based masks '%s' will be cropped to the legacy extraction " "method, so you may want to run the mask tool to regenerate these " "masks.", "', '".join(nn_masks)) self._mask_pipeline = Extractor(None, None, maskers, multiprocess=True) self._mask_pipeline.launch() # Update alignments versioning self._alignments._version = _VERSION # pylint:disable=protected-access ``` ###Assistant : Check whether the alignments file was created with the legacy extraction method. If so, force user to re-extract all faces if any options have been specified, otherwise raise the appropriate warnings and set the legacy options.
530
def FisherZ(name, d1, d2): r return rv(name, FisherZDistribution, (d1, d2)) #------------------------------------------------------------------------------- # Frechet distribution ---------------------------------------------------------
Create a Continuous Random Variable with an Fisher's Z distribution. Explanation =========== The density of the Fisher's Z distribution is given by .. math:: f(x) := \frac{2d_1^{d_1/2} d_2^{d_2/2}} {\mathrm{B}(d_1/2, d_2/2)} \frac{e^{d_1z}}{\left(d_1e^{2z}+d_2\right)^{\left(d_1+d_2\right)/2}} .. TODO - What is the difference between these degrees of freedom? Parameters ========== d1 : `d_1 > 0` Degree of freedom. d2 : `d_2 > 0` Degree of freedom. Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import FisherZ, density >>> from sympy import Symbol, pprint >>> d1 = Symbol("d1", positive=True) >>> d2 = Symbol("d2", positive=True) >>> z = Symbol("z") >>> X = FisherZ("x", d1, d2) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) d1 d2 d1 d2 - -- - -- -- -- 2 2 2 2 / 2*z \ d1*z 2*d1 *d2 *\d1*e + d2/ *e ----------------------------------------- /d1 d2\ B|--, --| \2 2 / References ========== .. [1] https://en.wikipedia.org/wiki/Fisher%27s_z-distribution .. [2] http://mathworld.wolfram.com/Fishersz-Distribution.html
145
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def FisherZ(name, d1, d2): r return rv(name, FisherZDistribution, (d1, d2)) #------------------------------------------------------------------------------- # Frechet distribution --------------------------------------------------------- ``` ###Assistant : Create a Continuous Random Variable with an Fisher's Z distribution. Explanation =========== The density of the Fisher's Z distribution is given by .. math:: f(x) := \frac{2d_1^{d_1/2} d_2^{d_2/2}} {\mathrm{B}(d_1/2, d_2/2)} \frac{e^{d_1z}}{\left(d_1e^{2z}+d_2\right)^{\left(d_1+d_2\right)/2}} .. TODO - What is the difference between these degrees of freedom? Parameters ========== d1 : `d_1 > 0` Degree of freedom. d2 : `d_2 > 0` Degree of freedom. Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import FisherZ, density >>> from sympy import Symbol, pprint >>> d1 = Symbol("d1", positive=True) >>> d2 = Symbol("d2", positive=True) >>> z = Symbol("z") >>> X = FisherZ("x", d1, d2) >>> D = density(X)(z) >>> pprint(D, use_unicode=False) d1 d2 d1 d2 - -- - -- -- -- 2 2 2 2 / 2*z \ d1*z 2*d1 *d2 *\d1*e + d2/ *e ----------------------------------------- /d1 d2\ B|--, --| \2 2 / References ========== .. [1] https://en.wikipedia.org/wiki/Fisher%27s_z-distribution .. [2] http://mathworld.wolfram.com/Fishersz-Distribution.html
531
def find_batch_size(tensors): if isinstance(tensors, (list, tuple)): for t in tensors: result = find_batch_size(t) if result is not None: return result elif isinstance(tensors, Mapping): for key, value in tensors.items(): result = find_batch_size(value) if result is not None: return result elif isinstance(tensors, torch.Tensor): return tensors.shape[0] if len(tensors.shape) >= 1 else None elif isinstance(tensors, np.ndarray): return tensors.shape[0] if len(tensors.shape) >= 1 else None
Find the first dimension of a tensor in a nested list/tuple/dict of tensors.
13
60
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_batch_size(tensors): if isinstance(tensors, (list, tuple)): for t in tensors: result = find_batch_size(t) if result is not None: return result elif isinstance(tensors, Mapping): for key, value in tensors.items(): result = find_batch_size(value) if result is not None: return result elif isinstance(tensors, torch.Tensor): return tensors.shape[0] if len(tensors.shape) >= 1 else None elif isinstance(tensors, np.ndarray): return tensors.shape[0] if len(tensors.shape) >= 1 else None ``` ###Assistant : Find the first dimension of a tensor in a nested list/tuple/dict of tensors.
532
def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5): out_logits, raw_masks = outputs.logits, outputs.pred_masks preds = []
Converts the output of [`DetrForSegmentation`] into image segmentation predictions. Only supports PyTorch. Parameters: outputs ([`DetrSegmentationOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`): Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction. threshold (`float`, *optional*, defaults to 0.9): Threshold to use to filter out queries. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image in the batch as predicted by the model.
101
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5): out_logits, raw_masks = outputs.logits, outputs.pred_masks preds = [] ``` ###Assistant : Converts the output of [`DetrForSegmentation`] into image segmentation predictions. Only supports PyTorch. Parameters: outputs ([`DetrSegmentationOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`): Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction. threshold (`float`, *optional*, defaults to 0.9): Threshold to use to filter out queries. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image in the batch as predicted by the model.
533
def iterate_graycode(self, k): unranked_code = GrayCode.unrank(self.superset_size, (self.rank_gray + k) % self.cardinality) return Subset.subset_from_bitlist(self.superset, unranked_code)
Helper function used for prev_gray and next_gray. It performs ``k`` step overs to get the respective Gray codes. Examples ======== >>> from sympy.combinatorics import Subset >>> a = Subset([1, 2, 3], [1, 2, 3, 4]) >>> a.iterate_graycode(3).subset [1, 4] >>> a.iterate_graycode(-2).subset [1, 2, 4] See Also ======== next_gray, prev_gray
49
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def iterate_graycode(self, k): unranked_code = GrayCode.unrank(self.superset_size, (self.rank_gray + k) % self.cardinality) return Subset.subset_from_bitlist(self.superset, unranked_code) ``` ###Assistant : Helper function used for prev_gray and next_gray. It performs ``k`` step overs to get the respective Gray codes. Examples ======== >>> from sympy.combinatorics import Subset >>> a = Subset([1, 2, 3], [1, 2, 3, 4]) >>> a.iterate_graycode(3).subset [1, 4] >>> a.iterate_graycode(-2).subset [1, 2, 4] See Also ======== next_gray, prev_gray
534
def is_sequence_right_padded(mask): max_seq_length = tf.shape(mask)[1] count_of_true = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1) right_padded_mask = tf.sequence_mask(count_of_true, maxlen=max_seq_length) return tf.reduce_all(tf.equal(mask, right_padded_mask))
Check the mask tensor and see if it right padded. For cuDNN kernel, it uses the sequence length param to skip the tailing timestep. If the data is left padded, or not a strict right padding (has masked value in the middle of the sequence), then cuDNN kernel won't be work properly in those cases. Left padded data: [[False, False, True, True, True]]. Right padded data: [[True, True, True, False, False]]. Mixture of mask/unmasked data: [[True, False, True, False, False]]. Note that for the mixed data example above, the actually data RNN should see are those 2 Trues (index 0 and 2), the index 1 False should be ignored and not pollute the internal states. Args: mask: the Boolean tensor with shape [batch, timestep] Returns: boolean scalar tensor, whether the mask is strictly right padded.
135
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_sequence_right_padded(mask): max_seq_length = tf.shape(mask)[1] count_of_true = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1) right_padded_mask = tf.sequence_mask(count_of_true, maxlen=max_seq_length) return tf.reduce_all(tf.equal(mask, right_padded_mask)) ``` ###Assistant : Check the mask tensor and see if it right padded. For cuDNN kernel, it uses the sequence length param to skip the tailing timestep. If the data is left padded, or not a strict right padding (has masked value in the middle of the sequence), then cuDNN kernel won't be work properly in those cases. Left padded data: [[False, False, True, True, True]]. Right padded data: [[True, True, True, False, False]]. Mixture of mask/unmasked data: [[True, False, True, False, False]]. Note that for the mixed data example above, the actually data RNN should see are those 2 Trues (index 0 and 2), the index 1 False should be ignored and not pollute the internal states. Args: mask: the Boolean tensor with shape [batch, timestep] Returns: boolean scalar tensor, whether the mask is strictly right padded.
535
def _send_event_over_federation(self) -> None: body = { "pdus": [ { "sender": self.user_id, "type": EventTypes.Message, "state_key": "", "content": {"body": "hello world", "msgtype": "m.text"}, "room_id": self.room_id, "depth": 0, "origin_server_ts": self.clock.time_msec(), "prev_events": [], "auth_events": [], "signatures": {}, "unsigned": {}, } ], } channel = self.make_signed_federation_request( method="PUT", path="/_matrix/federation/v1/send/1", content=body, ) self.assertEqual(channel.code, 200, channel.result)
Send a dummy event over federation and check that the request succeeds.
12
49
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _send_event_over_federation(self) -> None: body = { "pdus": [ { "sender": self.user_id, "type": EventTypes.Message, "state_key": "", "content": {"body": "hello world", "msgtype": "m.text"}, "room_id": self.room_id, "depth": 0, "origin_server_ts": self.clock.time_msec(), "prev_events": [], "auth_events": [], "signatures": {}, "unsigned": {}, } ], } channel = self.make_signed_federation_request( method="PUT", path="/_matrix/federation/v1/send/1", content=body, ) self.assertEqual(channel.code, 200, channel.result) ``` ###Assistant : Send a dummy event over federation and check that the request succeeds.
536
def _print_loss(self, loss): output = ", ".join([f"Loss {side}: {side_loss:.5f}" for side, side_loss in zip(("A", "B"), loss)]) timestamp = time.strftime("%H:%M:%S") output = f"[{timestamp}] [#{self._model.iterations:05d}] {output}" print(f"\r{output}", end="")
Outputs the loss for the current iteration to the console. Parameters ---------- loss: list The loss for each side. List should contain 2 ``floats`` side "a" in position 0 and side "b" in position `.
35
26
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _print_loss(self, loss): output = ", ".join([f"Loss {side}: {side_loss:.5f}" for side, side_loss in zip(("A", "B"), loss)]) timestamp = time.strftime("%H:%M:%S") output = f"[{timestamp}] [#{self._model.iterations:05d}] {output}" print(f"\r{output}", end="") ``` ###Assistant : Outputs the loss for the current iteration to the console. Parameters ---------- loss: list The loss for each side. List should contain 2 ``floats`` side "a" in position 0 and side "b" in position `.
537
def itermonthdays2(self, year, month): for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday): yield d, i % 7
Like itermonthdates(), but will yield (day number, weekday number) tuples. For days outside the specified month the day number is 0.
21
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def itermonthdays2(self, year, month): for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday): yield d, i % 7 ``` ###Assistant : Like itermonthdates(), but will yield (day number, weekday number) tuples. For days outside the specified month the day number is 0.
538
def test_distance_function_return_none_if_invalid_state(hass): hass.states.async_set("test.object_2", "happy", {"latitude": 10}) tpl = template.Template("{{ distance(states.test.object_2) | round }}", hass) with pytest.raises(TemplateError): tpl.async_render()
Test distance function return None if invalid state.
8
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_distance_function_return_none_if_invalid_state(hass): hass.states.async_set("test.object_2", "happy", {"latitude": 10}) tpl = template.Template("{{ distance(states.test.object_2) | round }}", hass) with pytest.raises(TemplateError): tpl.async_render() ``` ###Assistant : Test distance function return None if invalid state.
539
def Concatenate(self, parameters): return _concatenate_getitem(self, parameters) # 3.7-8 elif sys.version_info[:2] >= (3, 7):
Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a higher order function which adds, removes or transforms parameters of a callable. For example:: Callable[Concatenate[int, P], int] See PEP 612 for detailed information.
33
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def Concatenate(self, parameters): return _concatenate_getitem(self, parameters) # 3.7-8 elif sys.version_info[:2] >= (3, 7): ``` ###Assistant : Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a higher order function which adds, removes or transforms parameters of a callable. For example:: Callable[Concatenate[int, P], int] See PEP 612 for detailed information.
540
def compute_recall(result_neighbors, ground_truth_neighbors) -> float: assert len( result_neighbors.shape) == 2, "shape = [num_queries, neighbors_per_query]" assert len(ground_truth_neighbors.shape ) == 2, "shape = [num_queries, ground_truth_neighbors_per_query]" assert result_neighbors.shape[0] == ground_truth_neighbors.shape[0] gt_sets = [set(np.asarray(x)) for x in ground_truth_neighbors] hits = sum( len(list(x for x in nn_per_q if x.item() in gt_sets[q])) for q, nn_per_q in enumerate(result_neighbors)) return hits / ground_truth_neighbors.size
Computes the recall of an approximate nearest neighbor search. Args: result_neighbors: int32 numpy array of the shape [num_queries, neighbors_per_query] where the values are the indices of the dataset. ground_truth_neighbors: int32 numpy array of with shape [num_queries, ground_truth_neighbors_per_query] where the values are the indices of the dataset. Returns: The recall.
49
55
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def compute_recall(result_neighbors, ground_truth_neighbors) -> float: assert len( result_neighbors.shape) == 2, "shape = [num_queries, neighbors_per_query]" assert len(ground_truth_neighbors.shape ) == 2, "shape = [num_queries, ground_truth_neighbors_per_query]" assert result_neighbors.shape[0] == ground_truth_neighbors.shape[0] gt_sets = [set(np.asarray(x)) for x in ground_truth_neighbors] hits = sum( len(list(x for x in nn_per_q if x.item() in gt_sets[q])) for q, nn_per_q in enumerate(result_neighbors)) return hits / ground_truth_neighbors.size ``` ###Assistant : Computes the recall of an approximate nearest neighbor search. Args: result_neighbors: int32 numpy array of the shape [num_queries, neighbors_per_query] where the values are the indices of the dataset. ground_truth_neighbors: int32 numpy array of with shape [num_queries, ground_truth_neighbors_per_query] where the values are the indices of the dataset. Returns: The recall.
541
def isocalendar(self) -> DataFrame: from pandas import DataFrame values = self._local_timestamps() sarray = fields.build_isocalendar_sarray(values) iso_calendar_df = DataFrame( sarray, columns=["year", "week", "day"], dtype="UInt32" ) if self._hasna: iso_calendar_df.iloc[self._isnan] = None return iso_calendar_df
Returns a DataFrame with the year, week, and day calculated according to the ISO 8601 standard. .. versionadded:: 1.1.0 Returns ------- DataFrame with columns year, week and day See Also -------- Timestamp.isocalendar : Function return a 3-tuple containing ISO year, week number, and weekday for the given Timestamp object. datetime.date.isocalendar : Return a named tuple object with three components: year, week and weekday. Examples -------- >>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4) >>> idx.isocalendar() year week day 2019-12-29 2019 52 7 2019-12-30 2020 1 1 2019-12-31 2020 1 2 2020-01-01 2020 1 3 >>> idx.isocalendar().week 2019-12-29 52 2019-12-30 1 2019-12-31 1 2020-01-01 1 Freq: D, Name: week, dtype: UInt32
108
30
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def isocalendar(self) -> DataFrame: from pandas import DataFrame values = self._local_timestamps() sarray = fields.build_isocalendar_sarray(values) iso_calendar_df = DataFrame( sarray, columns=["year", "week", "day"], dtype="UInt32" ) if self._hasna: iso_calendar_df.iloc[self._isnan] = None return iso_calendar_df ``` ###Assistant : Returns a DataFrame with the year, week, and day calculated according to the ISO 8601 standard. .. versionadded:: 1.1.0 Returns ------- DataFrame with columns year, week and day See Also -------- Timestamp.isocalendar : Function return a 3-tuple containing ISO year, week number, and weekday for the given Timestamp object. datetime.date.isocalendar : Return a named tuple object with three components: year, week and weekday. Examples -------- >>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4) >>> idx.isocalendar() year week day 2019-12-29 2019 52 7 2019-12-30 2020 1 1 2019-12-31 2020 1 2 2020-01-01 2020 1 3 >>> idx.isocalendar().week 2019-12-29 52 2019-12-30 1 2019-12-31 1 2020-01-01 1 Freq: D, Name: week, dtype: UInt32
542
def putpixel(self, xy, value): if self.readonly: self._copy() self.load() if self.pyaccess: return self.pyaccess.putpixel(xy, value) if ( self.mode in ("P", "PA") and isinstance(value, (list, tuple)) and len(value) in [3, 4] ): # RGB or RGBA value for a P or PA image if self.mode == "PA": alpha = value[3] if len(value) == 4 else 255 value = value[:3] value = self.palette.getcolor(value, self) if self.mode == "PA": value = (value, alpha) return self.im.putpixel(xy, value)
Modifies the pixel at the given position. The color is given as a single numerical value for single-band images, and a tuple for multi-band images. In addition to this, RGB and RGBA tuples are accepted for P and PA images. Note that this method is relatively slow. For more extensive changes, use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw` module instead. See: * :py:meth:`~PIL.Image.Image.paste` * :py:meth:`~PIL.Image.Image.putdata` * :py:mod:`~PIL.ImageDraw` :param xy: The pixel coordinate, given as (x, y). See :ref:`coordinate-system`. :param value: The pixel value.
81
71
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def putpixel(self, xy, value): if self.readonly: self._copy() self.load() if self.pyaccess: return self.pyaccess.putpixel(xy, value) if ( self.mode in ("P", "PA") and isinstance(value, (list, tuple)) and len(value) in [3, 4] ): # RGB or RGBA value for a P or PA image if self.mode == "PA": alpha = value[3] if len(value) == 4 else 255 value = value[:3] value = self.palette.getcolor(value, self) if self.mode == "PA": value = (value, alpha) return self.im.putpixel(xy, value) ``` ###Assistant : Modifies the pixel at the given position. The color is given as a single numerical value for single-band images, and a tuple for multi-band images. In addition to this, RGB and RGBA tuples are accepted for P and PA images. Note that this method is relatively slow. For more extensive changes, use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw` module instead. See: * :py:meth:`~PIL.Image.Image.paste` * :py:meth:`~PIL.Image.Image.putdata` * :py:mod:`~PIL.ImageDraw` :param xy: The pixel coordinate, given as (x, y). See :ref:`coordinate-system`. :param value: The pixel value.
543
async def test_available_template_with_entities(hass): await setup.async_setup_component( hass, "switch", { "switch": { "platform": "template", "switches": { "test_template_switch": { **OPTIMISTIC_SWITCH_CONFIG, "value_template": "{{ 1 == 1 }}", "availability_template": "{{ is_state('availability_state.state', 'on') }}", } }, } }, ) await hass.async_block_till_done() await hass.async_start() await hass.async_block_till_done() hass.states.async_set("availability_state.state", STATE_ON) await hass.async_block_till_done() assert hass.states.get("switch.test_template_switch").state != STATE_UNAVAILABLE hass.states.async_set("availability_state.state", STATE_OFF) await hass.async_block_till_done() assert hass.states.get("switch.test_template_switch").state == STATE_UNAVAILABLE
Test availability templates with values from other entities.
8
55
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_available_template_with_entities(hass): await setup.async_setup_component( hass, "switch", { "switch": { "platform": "template", "switches": { "test_template_switch": { **OPTIMISTIC_SWITCH_CONFIG, "value_template": "{{ 1 == 1 }}", "availability_template": "{{ is_state('availability_state.state', 'on') }}", } }, } }, ) await hass.async_block_till_done() await hass.async_start() await hass.async_block_till_done() hass.states.async_set("availability_state.state", STATE_ON) await hass.async_block_till_done() assert hass.states.get("switch.test_template_switch").state != STATE_UNAVAILABLE hass.states.async_set("availability_state.state", STATE_OFF) await hass.async_block_till_done() assert hass.states.get("switch.test_template_switch").state == STATE_UNAVAILABLE ``` ###Assistant : Test availability templates with values from other entities.
544
def etfs_disc_command(sort=""): # Debug if cfg.DEBUG: logger.debug("etfs") df_etfs = wsj_model.etf_movers(sort, export=True) if df_etfs.empty: raise Exception("No available data found") df_etfs.set_index(" ", inplace=True) prfx = "Top" if sort == "active": prfx = "Most" title = f"ETF Movers ({prfx} {sort.capitalize()})" dindex = len(df_etfs.index) if dindex > 15: embeds: list = [] # Output i, i2, end = 0, 0, 15 df_pg, embeds_img, images_list = [], [], [] while i < dindex: df_pg = df_etfs.iloc[i:end] df_pg.append(df_pg) fig = df2img.plot_dataframe( df_pg, fig_size=(1200, (40 + (40 * dindex))), col_width=[1, 9, 1.5, 1.5, 1.5, 1.5], tbl_header=cfg.PLT_TBL_HEADER, tbl_cells=cfg.PLT_TBL_CELLS, font=cfg.PLT_TBL_FONT, row_fill_color=cfg.PLT_TBL_ROW_COLORS, paper_bgcolor="rgba(0, 0, 0, 0)", ) fig.update_traces(cells=(dict(align=["left"]))) imagefile = "disc-etfs.png" imagefile = helpers.save_image(imagefile, fig) if cfg.IMAGES_URL or cfg.IMGUR_CLIENT_ID != "REPLACE_ME": image_link = cfg.IMAGES_URL + imagefile images_list.append(imagefile) else: imagefile_save = cfg.IMG_DIR / imagefile uploaded_image = gst_imgur.upload_image( imagefile_save, title="something" ) image_link = uploaded_image.link os.remove(imagefile_save) embeds_img.append( f"{image_link}", ) embeds.append( disnake.Embed( title=title, colour=cfg.COLOR, ), ) i2 += 1 i += 15 end += 15 # Author/Footer for i in range(0, i2): embeds[i].set_author( name=cfg.AUTHOR_NAME, url=cfg.AUTHOR_URL, icon_url=cfg.AUTHOR_ICON_URL, ) embeds[i].set_footer( text=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) i = 0 for i in range(0, i2): embeds[i].set_image(url=embeds_img[i]) i += 1 embeds[0].set_footer(text=f"Page 1 of {len(embeds)}") choices = [ disnake.SelectOption(label="Home", value="0", emoji="🟢"), ] output = { "view": Menu, "title": title, "embed": embeds, "choices": choices, "embeds_img": embeds_img, "images_list": images_list, } else: fig = df2img.plot_dataframe( df_etfs, fig_size=(1200, (40 + (40 * dindex))), col_width=[1, 9, 1.5, 1.5, 1.5, 1.5], tbl_header=cfg.PLT_TBL_HEADER, tbl_cells=cfg.PLT_TBL_CELLS, font=cfg.PLT_TBL_FONT, row_fill_color=cfg.PLT_TBL_ROW_COLORS, paper_bgcolor="rgba(0, 0, 0, 0)", ) fig.update_traces(cells=(dict(align=["left"]))) imagefile = helpers.save_image("disc-etfs.png", fig) output = { "title": title, "imagefile": imagefile, } return output
Displays ETF's Top Gainers/Decliners, Most Active [Wall Street Journal]
9
247
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def etfs_disc_command(sort=""): # Debug if cfg.DEBUG: logger.debug("etfs") df_etfs = wsj_model.etf_movers(sort, export=True) if df_etfs.empty: raise Exception("No available data found") df_etfs.set_index(" ", inplace=True) prfx = "Top" if sort == "active": prfx = "Most" title = f"ETF Movers ({prfx} {sort.capitalize()})" dindex = len(df_etfs.index) if dindex > 15: embeds: list = [] # Output i, i2, end = 0, 0, 15 df_pg, embeds_img, images_list = [], [], [] while i < dindex: df_pg = df_etfs.iloc[i:end] df_pg.append(df_pg) fig = df2img.plot_dataframe( df_pg, fig_size=(1200, (40 + (40 * dindex))), col_width=[1, 9, 1.5, 1.5, 1.5, 1.5], tbl_header=cfg.PLT_TBL_HEADER, tbl_cells=cfg.PLT_TBL_CELLS, font=cfg.PLT_TBL_FONT, row_fill_color=cfg.PLT_TBL_ROW_COLORS, paper_bgcolor="rgba(0, 0, 0, 0)", ) fig.update_traces(cells=(dict(align=["left"]))) imagefile = "disc-etfs.png" imagefile = helpers.save_image(imagefile, fig) if cfg.IMAGES_URL or cfg.IMGUR_CLIENT_ID != "REPLACE_ME": image_link = cfg.IMAGES_URL + imagefile images_list.append(imagefile) else: imagefile_save = cfg.IMG_DIR / imagefile uploaded_image = gst_imgur.upload_image( imagefile_save, title="something" ) image_link = uploaded_image.link os.remove(imagefile_save) embeds_img.append( f"{image_link}", ) embeds.append( disnake.Embed( title=title, colour=cfg.COLOR, ), ) i2 += 1 i += 15 end += 15 # Author/Footer for i in range(0, i2): embeds[i].set_author( name=cfg.AUTHOR_NAME, url=cfg.AUTHOR_URL, icon_url=cfg.AUTHOR_ICON_URL, ) embeds[i].set_footer( text=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) i = 0 for i in range(0, i2): embeds[i].set_image(url=embeds_img[i]) i += 1 embeds[0].set_footer(text=f"Page 1 of {len(embeds)}") choices = [ disnake.SelectOption(label="Home", value="0", emoji="🟢"), ] output = { "view": Menu, "title": title, "embed": embeds, "choices": choices, "embeds_img": embeds_img, "images_list": images_list, } else: fig = df2img.plot_dataframe( df_etfs, fig_size=(1200, (40 + (40 * dindex))), col_width=[1, 9, 1.5, 1.5, 1.5, 1.5], tbl_header=cfg.PLT_TBL_HEADER, tbl_cells=cfg.PLT_TBL_CELLS, font=cfg.PLT_TBL_FONT, row_fill_color=cfg.PLT_TBL_ROW_COLORS, paper_bgcolor="rgba(0, 0, 0, 0)", ) fig.update_traces(cells=(dict(align=["left"]))) imagefile = helpers.save_image("disc-etfs.png", fig) output = { "title": title, "imagefile": imagefile, } return output ``` ###Assistant : Displays ETF's Top Gainers/Decliners, Most Active [Wall Street Journal]
545
def get(self): logger = get_logger() logger.debug(f"ENTER::Partition.get::{self._identity}") if len(self.call_queue): self.drain_call_queue() result = ray.get(self.oid) logger.debug(f"EXIT::Partition.get::{self._identity}") return result
Get the object wrapped by this partition out of the Plasma store. Returns ------- pandas.DataFrame The object from the Plasma store.
21
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get(self): logger = get_logger() logger.debug(f"ENTER::Partition.get::{self._identity}") if len(self.call_queue): self.drain_call_queue() result = ray.get(self.oid) logger.debug(f"EXIT::Partition.get::{self._identity}") return result ``` ###Assistant : Get the object wrapped by this partition out of the Plasma store. Returns ------- pandas.DataFrame The object from the Plasma store.
546
def dag_bag_ext(): clear_db_runs() dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False) dag_0 = DAG("dag_0", start_date=DEFAULT_DATE, schedule_interval=None) task_a_0 = EmptyOperator(task_id="task_a_0", dag=dag_0) task_b_0 = ExternalTaskMarker( task_id="task_b_0", external_dag_id="dag_1", external_task_id="task_a_1", recursion_depth=3, dag=dag_0 ) task_a_0 >> task_b_0 dag_1 = DAG("dag_1", start_date=DEFAULT_DATE, schedule_interval=None) task_a_1 = ExternalTaskSensor( task_id="task_a_1", external_dag_id=dag_0.dag_id, external_task_id=task_b_0.task_id, dag=dag_1 ) task_b_1 = ExternalTaskMarker( task_id="task_b_1", external_dag_id="dag_2", external_task_id="task_a_2", recursion_depth=2, dag=dag_1 ) task_a_1 >> task_b_1 dag_2 = DAG("dag_2", start_date=DEFAULT_DATE, schedule_interval=None) task_a_2 = ExternalTaskSensor( task_id="task_a_2", external_dag_id=dag_1.dag_id, external_task_id=task_b_1.task_id, dag=dag_2 ) task_b_2 = ExternalTaskMarker( task_id="task_b_2", external_dag_id="dag_3", external_task_id="task_a_3", recursion_depth=1, dag=dag_2 ) task_a_2 >> task_b_2 dag_3 = DAG("dag_3", start_date=DEFAULT_DATE, schedule_interval=None) task_a_3 = ExternalTaskSensor( task_id="task_a_3", external_dag_id=dag_2.dag_id, external_task_id=task_b_2.task_id, dag=dag_3 ) task_b_3 = EmptyOperator(task_id="task_b_3", dag=dag_3) task_a_3 >> task_b_3 for dag in [dag_0, dag_1, dag_2, dag_3]: dag_bag.bag_dag(dag=dag, root_dag=dag) yield dag_bag clear_db_runs() @pytest.fixture
Create a DagBag with DAGs looking like this. The dotted lines represent external dependencies set up using ExternalTaskMarker and ExternalTaskSensor. dag_0: task_a_0 >> task_b_0 | | dag_1: ---> task_a_1 >> task_b_1 | | dag_2: ---> task_a_2 >> task_b_2 | | dag_3: ---> task_a_3 >> task_b_3
45
111
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dag_bag_ext(): clear_db_runs() dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False) dag_0 = DAG("dag_0", start_date=DEFAULT_DATE, schedule_interval=None) task_a_0 = EmptyOperator(task_id="task_a_0", dag=dag_0) task_b_0 = ExternalTaskMarker( task_id="task_b_0", external_dag_id="dag_1", external_task_id="task_a_1", recursion_depth=3, dag=dag_0 ) task_a_0 >> task_b_0 dag_1 = DAG("dag_1", start_date=DEFAULT_DATE, schedule_interval=None) task_a_1 = ExternalTaskSensor( task_id="task_a_1", external_dag_id=dag_0.dag_id, external_task_id=task_b_0.task_id, dag=dag_1 ) task_b_1 = ExternalTaskMarker( task_id="task_b_1", external_dag_id="dag_2", external_task_id="task_a_2", recursion_depth=2, dag=dag_1 ) task_a_1 >> task_b_1 dag_2 = DAG("dag_2", start_date=DEFAULT_DATE, schedule_interval=None) task_a_2 = ExternalTaskSensor( task_id="task_a_2", external_dag_id=dag_1.dag_id, external_task_id=task_b_1.task_id, dag=dag_2 ) task_b_2 = ExternalTaskMarker( task_id="task_b_2", external_dag_id="dag_3", external_task_id="task_a_3", recursion_depth=1, dag=dag_2 ) task_a_2 >> task_b_2 dag_3 = DAG("dag_3", start_date=DEFAULT_DATE, schedule_interval=None) task_a_3 = ExternalTaskSensor( task_id="task_a_3", external_dag_id=dag_2.dag_id, external_task_id=task_b_2.task_id, dag=dag_3 ) task_b_3 = EmptyOperator(task_id="task_b_3", dag=dag_3) task_a_3 >> task_b_3 for dag in [dag_0, dag_1, dag_2, dag_3]: dag_bag.bag_dag(dag=dag, root_dag=dag) yield dag_bag clear_db_runs() @pytest.fixture ``` ###Assistant : Create a DagBag with DAGs looking like this. The dotted lines represent external dependencies set up using ExternalTaskMarker and ExternalTaskSensor. dag_0: task_a_0 >> task_b_0 | | dag_1: ---> task_a_1 >> task_b_1 | | dag_2: ---> task_a_2 >> task_b_2 | | dag_3: ---> task_a_3 >> task_b_3
547
def test_constrained_layout3(): fig, axs = plt.subplots(2, 2, layout="constrained") for nn, ax in enumerate(axs.flat): pcm = example_pcolor(ax, fontsize=24) if nn == 3: pad = 0.08 else: pad = 0.02 # default fig.colorbar(pcm, ax=ax, pad=pad) @image_comparison(['constrained_layout4.png'])
Test constrained_layout for colorbars with subplots
6
34
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_constrained_layout3(): fig, axs = plt.subplots(2, 2, layout="constrained") for nn, ax in enumerate(axs.flat): pcm = example_pcolor(ax, fontsize=24) if nn == 3: pad = 0.08 else: pad = 0.02 # default fig.colorbar(pcm, ax=ax, pad=pad) @image_comparison(['constrained_layout4.png']) ``` ###Assistant : Test constrained_layout for colorbars with subplots
548
def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: return self.grouper.indices
Dict {group name -> group indices}.
6
7
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: return self.grouper.indices ``` ###Assistant : Dict {group name -> group indices}.
549
def get_hash(self, data, hasher=None): if hasher is None: hasher = self.hasher if hasher is None: hasher = hashlib.md5 prefix = '' else: hasher = getattr(hashlib, hasher) prefix = '%s=' % self.hasher digest = hasher(data).digest() digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii') return '%s%s' % (prefix, digest)
Get the hash of some data, using a particular hash algorithm, if specified. :param data: The data to be hashed. :type data: bytes :param hasher: The name of a hash implementation, supported by hashlib, or ``None``. Examples of valid values are ``'sha1'``, ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and ``'sha512'``. If no hasher is specified, the ``hasher`` attribute of the :class:`InstalledDistribution` instance is used. If the hasher is determined to be ``None``, MD5 is used as the hashing algorithm. :returns: The hash of the data. If a hasher was explicitly specified, the returned hash will be prefixed with the specified hasher followed by '='. :rtype: str
104
42
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_hash(self, data, hasher=None): if hasher is None: hasher = self.hasher if hasher is None: hasher = hashlib.md5 prefix = '' else: hasher = getattr(hashlib, hasher) prefix = '%s=' % self.hasher digest = hasher(data).digest() digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii') return '%s%s' % (prefix, digest) ``` ###Assistant : Get the hash of some data, using a particular hash algorithm, if specified. :param data: The data to be hashed. :type data: bytes :param hasher: The name of a hash implementation, supported by hashlib, or ``None``. Examples of valid values are ``'sha1'``, ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and ``'sha512'``. If no hasher is specified, the ``hasher`` attribute of the :class:`InstalledDistribution` instance is used. If the hasher is determined to be ``None``, MD5 is used as the hashing algorithm. :returns: The hash of the data. If a hasher was explicitly specified, the returned hash will be prefixed with the specified hasher followed by '='. :rtype: str
550
def test_async_call_same_actor_multiple_times(self): actors = [Actor.remote(i, maybe_crash=False) for i in range(4)] manager = FaultTolerantActorManager(actors=actors) # 2 asynchronous call to actor 0. num_of_calls = manager.foreach_actor_async( lambda w: w.call(), healthy_only=False, remote_actor_indices=[0, 0], ) self.assertEqual(num_of_calls, 2) # Now, let's actually fetch the results. results = manager.fetch_ready_async_reqs(timeout_seconds=None) # Returns 1 and 2, representing the first and second calls to actor 0. self.assertEqual([r.get() for r in results.ignore_errors()], [1, 2])
Test multiple asynchronous remote calls to the same actor.
9
63
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_async_call_same_actor_multiple_times(self): actors = [Actor.remote(i, maybe_crash=False) for i in range(4)] manager = FaultTolerantActorManager(actors=actors) # 2 asynchronous call to actor 0. num_of_calls = manager.foreach_actor_async( lambda w: w.call(), healthy_only=False, remote_actor_indices=[0, 0], ) self.assertEqual(num_of_calls, 2) # Now, let's actually fetch the results. results = manager.fetch_ready_async_reqs(timeout_seconds=None) # Returns 1 and 2, representing the first and second calls to actor 0. self.assertEqual([r.get() for r in results.ignore_errors()], [1, 2]) ``` ###Assistant : Test multiple asynchronous remote calls to the same actor.
551
def _load(self): data = {} if not self._is_extract: if not self.have_alignments_file: return data data = super()._load() return data skip_existing = hasattr(self._args, 'skip_existing') and self._args.skip_existing skip_faces = hasattr(self._args, 'skip_faces') and self._args.skip_faces if not skip_existing and not skip_faces: logger.debug("No skipping selected. Returning empty dictionary") return data if not self.have_alignments_file and (skip_existing or skip_faces): logger.warning("Skip Existing/Skip Faces selected, but no alignments file found!") return data data = super()._load() if skip_faces: # Remove items from alignments that have no faces so they will # be re-detected del_keys = [key for key, val in data.items() if not val["faces"]] logger.debug("Frames with no faces selected for redetection: %s", len(del_keys)) for key in del_keys: if key in data: logger.trace("Selected for redetection: '%s'", key) del data[key] return data
Override the parent :func:`~lib.align.Alignments._load` to handle skip existing frames and faces on extract. If skip existing has been selected, existing alignments are loaded and returned to the calling script. Returns ------- dict Any alignments that have already been extracted if skip existing has been selected otherwise an empty dictionary
49
119
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _load(self): data = {} if not self._is_extract: if not self.have_alignments_file: return data data = super()._load() return data skip_existing = hasattr(self._args, 'skip_existing') and self._args.skip_existing skip_faces = hasattr(self._args, 'skip_faces') and self._args.skip_faces if not skip_existing and not skip_faces: logger.debug("No skipping selected. Returning empty dictionary") return data if not self.have_alignments_file and (skip_existing or skip_faces): logger.warning("Skip Existing/Skip Faces selected, but no alignments file found!") return data data = super()._load() if skip_faces: # Remove items from alignments that have no faces so they will # be re-detected del_keys = [key for key, val in data.items() if not val["faces"]] logger.debug("Frames with no faces selected for redetection: %s", len(del_keys)) for key in del_keys: if key in data: logger.trace("Selected for redetection: '%s'", key) del data[key] return data ``` ###Assistant : Override the parent :func:`~lib.align.Alignments._load` to handle skip existing frames and faces on extract. If skip existing has been selected, existing alignments are loaded and returned to the calling script. Returns ------- dict Any alignments that have already been extracted if skip existing has been selected otherwise an empty dictionary
552
def get_gtech() -> pd.DataFrame: return get_df( "https://finance.yahoo.com/screener/predefined/growth_technology_stocks" ) @log_start_end(log=logger)
Get technology stocks with revenue and earnings growth in excess of 25%. [Source: Yahoo Finance] Returns ------- pd.DataFrame Growth technology stocks
21
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_gtech() -> pd.DataFrame: return get_df( "https://finance.yahoo.com/screener/predefined/growth_technology_stocks" ) @log_start_end(log=logger) ``` ###Assistant : Get technology stocks with revenue and earnings growth in excess of 25%. [Source: Yahoo Finance] Returns ------- pd.DataFrame Growth technology stocks
553
def test_bert2gpt2_summarization(self): model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2gpt2-cnn_dailymail-fp16") model.to(torch_device) tokenizer_in = AutoTokenizer.from_pretrained("bert-base-cased") tokenizer_out = AutoTokenizer.from_pretrained("../gpt2") ARTICLE_STUDENTS = EXPECTED_SUMMARY_STUDENTS = input_dict = tokenizer_in(ARTICLE_STUDENTS, return_tensors="pt") output_ids = model.generate(input_dict["input_ids"].to(torch_device)) summary = tokenizer_out.batch_decode(output_ids, skip_special_tokens=True) self.assertEqual(summary, [EXPECTED_SUMMARY_STUDENTS]) @require_torch
(CNN)Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members singing a racist chant. SAE's national chapter suspended the students, but University of Oklahoma President David Boren took it a step further, saying the university's affiliation with the fraternity is permanently done. The news is shocking, but it's not the first time SAE has faced controversy. SAE was founded March 9, 1856, at the University of Alabama, five years before the American Civil War, according to the fraternity website. When the war began, the group had fewer than 400 members, of which "369 went to war for the Confederate States and seven for the Union Army," the website says. The fraternity now boasts more than 200,000 living alumni, along with about 15,000 undergraduates populating 219 chapters and 20 "colonies" seeking full membership at universities. SAE has had to work hard to change recently after a string of member deaths, many blamed on the hazing of new recruits, SAE national President Bradley Cohen wrote in a message on the fraternity's website. The fraternity's website lists more than 130 chapters cited or suspended for "health and safety incidents" since 2010. At least 30 of the incidents involved hazing, and dozens more involved alcohol. However, the list is missing numerous incidents from recent months. Among them, according to various media outlets: Yale University banned the SAEs from campus activities last month after members allegedly tried to interfere with a sexual misconduct investigation connected to an initiation rite. Stanford University in December suspended SAE housing privileges after finding sorority members attending a fraternity function were subjected to graphic sexual content. And Johns Hopkins University in November suspended the fraternity for underage drinking. "The media has labeled us as the 'nation's deadliest fraternity,' " Cohen said. In 2011, for example, a student died while being coerced into excessive alcohol consumption, according to a lawsuit. SAE's previous insurer dumped the fraternity. "As a result, we are paying Lloyd's of London the highest insurance rates in the Greek-letter world," Cohen said. Universities have turned down SAE's attempts to open new chapters, and the fraternity had to close 12 in 18 months over hazing incidents.SAS Alpha Epsilon suspended the students, but university president says it's permanent.\nThe fraternity has had to deal with a string of student deaths since 2010.\nSAS has more than 200,000 members, many of whom are students.\nA student died while being forced into excessive alcohol consumption.
403
30
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_bert2gpt2_summarization(self): model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2gpt2-cnn_dailymail-fp16") model.to(torch_device) tokenizer_in = AutoTokenizer.from_pretrained("bert-base-cased") tokenizer_out = AutoTokenizer.from_pretrained("../gpt2") ARTICLE_STUDENTS = EXPECTED_SUMMARY_STUDENTS = input_dict = tokenizer_in(ARTICLE_STUDENTS, return_tensors="pt") output_ids = model.generate(input_dict["input_ids"].to(torch_device)) summary = tokenizer_out.batch_decode(output_ids, skip_special_tokens=True) self.assertEqual(summary, [EXPECTED_SUMMARY_STUDENTS]) @require_torch ``` ###Assistant : (CNN)Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members singing a racist chant. SAE's national chapter suspended the students, but University of Oklahoma President David Boren took it a step further, saying the university's affiliation with the fraternity is permanently done. The news is shocking, but it's not the first time SAE has faced controversy. SAE was founded March 9, 1856, at the University of Alabama, five years before the American Civil War, according to the fraternity website. When the war began, the group had fewer than 400 members, of which "369 went to war for the Confederate States and seven for the Union Army," the website says. The fraternity now boasts more than 200,000 living alumni, along with about 15,000 undergraduates populating 219 chapters and 20 "colonies" seeking full membership at universities. SAE has had to work hard to change recently after a string of member deaths, many blamed on the hazing of new recruits, SAE national President Bradley Cohen wrote in a message on the fraternity's website. The fraternity's website lists more than 130 chapters cited or suspended for "health and safety incidents" since 2010. At least 30 of the incidents involved hazing, and dozens more involved alcohol. However, the list is missing numerous incidents from recent months. Among them, according to various media outlets: Yale University banned the SAEs from campus activities last month after members allegedly tried to interfere with a sexual misconduct investigation connected to an initiation rite. Stanford University in December suspended SAE housing privileges after finding sorority members attending a fraternity function were subjected to graphic sexual content. And Johns Hopkins University in November suspended the fraternity for underage drinking. "The media has labeled us as the 'nation's deadliest fraternity,' " Cohen said. In 2011, for example, a student died while being coerced into excessive alcohol consumption, according to a lawsuit. SAE's previous insurer dumped the fraternity. "As a result, we are paying Lloyd's of London the highest insurance rates in the Greek-letter world," Cohen said. Universities have turned down SAE's attempts to open new chapters, and the fraternity had to close 12 in 18 months over hazing incidents.SAS Alpha Epsilon suspended the students, but university president says it's permanent.\nThe fraternity has had to deal with a string of student deaths since 2010.\nSAS has more than 200,000 members, many of whom are students.\nA student died while being forced into excessive alcohol consumption.
554
def _get_builtin_permissions(opts): # noqa: D205, D212 perms = [] for action in opts.default_permissions: perms.append( ( get_permission_codename(action, opts), "Can %s %s" % (action, opts.verbose_name_raw), ) ) return perms
Return (codename, name) for all autogenerated permissions. By default, this is ('add', 'change', 'delete', 'view')
15
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_builtin_permissions(opts): # noqa: D205, D212 perms = [] for action in opts.default_permissions: perms.append( ( get_permission_codename(action, opts), "Can %s %s" % (action, opts.verbose_name_raw), ) ) return perms ``` ###Assistant : Return (codename, name) for all autogenerated permissions. By default, this is ('add', 'change', 'delete', 'view')
555
def accumulate(self, model): self._do_sync() if self.sync_gradients: context = contextlib.nullcontext else: context = self.no_sync with context(model): yield
A context manager that will lightly wrap around and perform gradient accumulation automatically Args: model (`torch.nn.Module`): PyTorch Module that was prepared with `Accelerator.prepare`
23
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def accumulate(self, model): self._do_sync() if self.sync_gradients: context = contextlib.nullcontext else: context = self.no_sync with context(model): yield ``` ###Assistant : A context manager that will lightly wrap around and perform gradient accumulation automatically Args: model (`torch.nn.Module`): PyTorch Module that was prepared with `Accelerator.prepare`
556
def get_attribute(value): attribute = Attribute() if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) attribute.append(token) if value and value[0] in ATTRIBUTE_ENDS: raise errors.HeaderParseError( "expected token but found '{}'".format(value)) token, value = get_attrtext(value) attribute.append(token) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) attribute.append(token) return attribute, value
[CFWS] 1*attrtext [CFWS] This version of the BNF makes the CFWS explicit, and as usual we use a value terminal for the actual run of characters. The RFC equivalent of attrtext is the token characters, with the subtraction of '*', "'", and '%'. We include tab in the excluded set just as we do for token.
56
48
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_attribute(value): attribute = Attribute() if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) attribute.append(token) if value and value[0] in ATTRIBUTE_ENDS: raise errors.HeaderParseError( "expected token but found '{}'".format(value)) token, value = get_attrtext(value) attribute.append(token) if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) attribute.append(token) return attribute, value ``` ###Assistant : [CFWS] 1*attrtext [CFWS] This version of the BNF makes the CFWS explicit, and as usual we use a value terminal for the actual run of characters. The RFC equivalent of attrtext is the token characters, with the subtraction of '*', "'", and '%'. We include tab in the excluded set just as we do for token.
557
def eval_to_file(modin_obj, pandas_obj, fn, extension, **fn_kwargs): with ensure_clean_dir() as dirname: unique_filename_modin = get_unique_filename( extension=extension, data_dir=dirname ) unique_filename_pandas = get_unique_filename( extension=extension, data_dir=dirname ) # parameter `max_retries=0` is set for `to_csv` function on Ray engine, # in order to increase the stability of tests, we repeat the call of # the entire function manually last_exception = None for _ in range(3): try: getattr(modin_obj, fn)(unique_filename_modin, **fn_kwargs) except EXCEPTIONS as exc: last_exception = exc continue break else: raise last_exception getattr(pandas_obj, fn)(unique_filename_pandas, **fn_kwargs) assert assert_files_eq(unique_filename_modin, unique_filename_pandas) @pytest.fixture
Helper function to test `to_<extension>` methods. Args: modin_obj: Modin DataFrame or Series to test `to_<extension>` method. pandas_obj: Pandas DataFrame or Series to test `to_<extension>` method. fn: name of the method, that should be tested. extension: Extension of the test file.
40
82
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def eval_to_file(modin_obj, pandas_obj, fn, extension, **fn_kwargs): with ensure_clean_dir() as dirname: unique_filename_modin = get_unique_filename( extension=extension, data_dir=dirname ) unique_filename_pandas = get_unique_filename( extension=extension, data_dir=dirname ) # parameter `max_retries=0` is set for `to_csv` function on Ray engine, # in order to increase the stability of tests, we repeat the call of # the entire function manually last_exception = None for _ in range(3): try: getattr(modin_obj, fn)(unique_filename_modin, **fn_kwargs) except EXCEPTIONS as exc: last_exception = exc continue break else: raise last_exception getattr(pandas_obj, fn)(unique_filename_pandas, **fn_kwargs) assert assert_files_eq(unique_filename_modin, unique_filename_pandas) @pytest.fixture ``` ###Assistant : Helper function to test `to_<extension>` methods. Args: modin_obj: Modin DataFrame or Series to test `to_<extension>` method. pandas_obj: Pandas DataFrame or Series to test `to_<extension>` method. fn: name of the method, that should be tested. extension: Extension of the test file.
558
def _is_matching_generic_foreign_key(self, field): return ( isinstance(field, GenericForeignKey) and field.ct_field == self.content_type_field_name and field.fk_field == self.object_id_field_name )
Return True if field is a GenericForeignKey whose content type and object id fields correspond to the equivalent attributes on this GenericRelation.
22
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _is_matching_generic_foreign_key(self, field): return ( isinstance(field, GenericForeignKey) and field.ct_field == self.content_type_field_name and field.fk_field == self.object_id_field_name ) ``` ###Assistant : Return True if field is a GenericForeignKey whose content type and object id fields correspond to the equivalent attributes on this GenericRelation.
559
def hashes_to_frame(self): if not self._hashes_to_frame: logger.debug("Generating hashes to frame") for frame_name, val in self._data.items(): for idx, face in enumerate(val["faces"]): self._hashes_to_frame.setdefault(face["hash"], {})[frame_name] = idx return self._hashes_to_frame
dict: The SHA1 hash of the face mapped to the frame(s) and face index within the frame that the hash corresponds to. The structure of the dictionary is: {**SHA1_hash** (`str`): {**filename** (`str`): **face_index** (`int`)}}. Notes ----- This method is depractated and exists purely for updating legacy hash based alignments to new png header storage in :class:`lib.align.update_legacy_png_header`. The first time this property is referenced, the dictionary will be created and cached. Subsequent references will be made to this cached dictionary.
79
25
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def hashes_to_frame(self): if not self._hashes_to_frame: logger.debug("Generating hashes to frame") for frame_name, val in self._data.items(): for idx, face in enumerate(val["faces"]): self._hashes_to_frame.setdefault(face["hash"], {})[frame_name] = idx return self._hashes_to_frame ``` ###Assistant : dict: The SHA1 hash of the face mapped to the frame(s) and face index within the frame that the hash corresponds to. The structure of the dictionary is: {**SHA1_hash** (`str`): {**filename** (`str`): **face_index** (`int`)}}. Notes ----- This method is depractated and exists purely for updating legacy hash based alignments to new png header storage in :class:`lib.align.update_legacy_png_header`. The first time this property is referenced, the dictionary will be created and cached. Subsequent references will be made to this cached dictionary.
560
def _get_textdoc(self, index): assert self._opt is not None # FIXME we probably should do eliding here. See # qcommonstyle.cpp:viewItemDrawText # https://github.com/qutebrowser/qutebrowser/issues/118 text_option = QTextOption() if self._opt.features & QStyleOptionViewItem.WrapText: text_option.setWrapMode(QTextOption.WordWrap) else: text_option.setWrapMode(QTextOption.ManualWrap) text_option.setTextDirection(self._opt.direction) text_option.setAlignment(QStyle.visualAlignment( self._opt.direction, self._opt.displayAlignment)) if self._doc is not None: self._doc.deleteLater() self._doc = QTextDocument(self) self._doc.setDefaultFont(self._opt.font) self._doc.setDefaultTextOption(text_option) self._doc.setDocumentMargin(2) if index.parent().isValid(): view = self.parent() assert isinstance(view, completionwidget.CompletionView), view pattern = view.pattern columns_to_filter = index.model().columns_to_filter(index) if index.column() in columns_to_filter and pattern: if self._opt.state & QStyle.State_Selected: color = config.val.colors.completion.item.selected.match.fg else: color = config.val.colors.completion.match.fg _Highlighter(self._doc, pattern, color) self._doc.setPlainText(self._opt.text) else: self._doc.setHtml( '<span style="font: {};">{}</span>'.format( html.escape(config.val.fonts.completion.category), html.escape(self._opt.text)))
Create the QTextDocument of an item. Args: index: The QModelIndex of the item to draw.
15
90
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_textdoc(self, index): assert self._opt is not None # FIXME we probably should do eliding here. See # qcommonstyle.cpp:viewItemDrawText # https://github.com/qutebrowser/qutebrowser/issues/118 text_option = QTextOption() if self._opt.features & QStyleOptionViewItem.WrapText: text_option.setWrapMode(QTextOption.WordWrap) else: text_option.setWrapMode(QTextOption.ManualWrap) text_option.setTextDirection(self._opt.direction) text_option.setAlignment(QStyle.visualAlignment( self._opt.direction, self._opt.displayAlignment)) if self._doc is not None: self._doc.deleteLater() self._doc = QTextDocument(self) self._doc.setDefaultFont(self._opt.font) self._doc.setDefaultTextOption(text_option) self._doc.setDocumentMargin(2) if index.parent().isValid(): view = self.parent() assert isinstance(view, completionwidget.CompletionView), view pattern = view.pattern columns_to_filter = index.model().columns_to_filter(index) if index.column() in columns_to_filter and pattern: if self._opt.state & QStyle.State_Selected: color = config.val.colors.completion.item.selected.match.fg else: color = config.val.colors.completion.match.fg _Highlighter(self._doc, pattern, color) self._doc.setPlainText(self._opt.text) else: self._doc.setHtml( '<span style="font: {};">{}</span>'.format( html.escape(config.val.fonts.completion.category), html.escape(self._opt.text))) ``` ###Assistant : Create the QTextDocument of an item. Args: index: The QModelIndex of the item to draw.
561
def feed(self, *args): assert len(args) != 0 self.terminals = [] for fed_layer in args: if isinstance(fed_layer, str): try: fed_layer = self.layers[fed_layer] except KeyError: raise KeyError('Unknown layer name fed: %s' % fed_layer) self.terminals.append(fed_layer) return self
Set the input(s) for the next operation by replacing the terminal nodes. The arguments can be either layer names or the actual layers.
23
34
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def feed(self, *args): assert len(args) != 0 self.terminals = [] for fed_layer in args: if isinstance(fed_layer, str): try: fed_layer = self.layers[fed_layer] except KeyError: raise KeyError('Unknown layer name fed: %s' % fed_layer) self.terminals.append(fed_layer) return self ``` ###Assistant : Set the input(s) for the next operation by replacing the terminal nodes. The arguments can be either layer names or the actual layers.
562
def unpack_iterator_input(iterator): try: next_element = iterator.get_next() except tf.errors.OutOfRangeError: raise RuntimeError( "Your dataset iterator ran out of data; " "Make sure that your dataset can generate " "required number of samples." ) if isinstance(next_element, (list, tuple)): if len(next_element) not in [2, 3]: raise ValueError( "Please provide model inputs as a list or tuple of 2 or 3 " "elements: (input, target) or (input, target, sample_weights) " "Received %s" % next_element ) if len(next_element) == 2: x, y = next_element weights = None else: x, y, weights = next_element else: x = next_element y = None weights = None return x, y, weights
Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`. Args: iterator: Instance of a dataset iterator. Returns: Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.
33
101
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def unpack_iterator_input(iterator): try: next_element = iterator.get_next() except tf.errors.OutOfRangeError: raise RuntimeError( "Your dataset iterator ran out of data; " "Make sure that your dataset can generate " "required number of samples." ) if isinstance(next_element, (list, tuple)): if len(next_element) not in [2, 3]: raise ValueError( "Please provide model inputs as a list or tuple of 2 or 3 " "elements: (input, target) or (input, target, sample_weights) " "Received %s" % next_element ) if len(next_element) == 2: x, y = next_element weights = None else: x, y, weights = next_element else: x = next_element y = None weights = None return x, y, weights ``` ###Assistant : Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`. Args: iterator: Instance of a dataset iterator. Returns: Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.
563
def mock_2x2x4_devices(one_device_per_chip): return mock_devices(2, 2, 4, 'TPU v4', one_device_per_chip)
Hard-coded reproduction of jax.devices() output on 2x2x4.
7
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mock_2x2x4_devices(one_device_per_chip): return mock_devices(2, 2, 4, 'TPU v4', one_device_per_chip) ``` ###Assistant : Hard-coded reproduction of jax.devices() output on 2x2x4.
564
def export_triton(model_path, output_path="model_repository", model_name="ludwig_model", model_version=1, **kwargs): logger.info(f"Model path: {model_path}") logger.info(f"Output path: {output_path}") logger.info(f"Model name: {model_name}") logger.info(f"Model version: {model_version}") logger.info("\n") model = LudwigModel.load(model_path) os.makedirs(output_path, exist_ok=True) utils_export_triton(model, output_path, model_name, model_version) logger.info(f"Saved to: {output_path}")
Exports a model in torchscript format with config for Triton serving. # Inputs :param model_path: (str) filepath to pre-trained model. :param output_path: (str, default: `'model_repository'`) directory to store the triton models. :param model_name: (str, default: `'ludwig_model'`) save triton under this name. :param model_name: (int, default: `1`) save neuropod under this verison. # Return :returns: (`None`)
55
31
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def export_triton(model_path, output_path="model_repository", model_name="ludwig_model", model_version=1, **kwargs): logger.info(f"Model path: {model_path}") logger.info(f"Output path: {output_path}") logger.info(f"Model name: {model_name}") logger.info(f"Model version: {model_version}") logger.info("\n") model = LudwigModel.load(model_path) os.makedirs(output_path, exist_ok=True) utils_export_triton(model, output_path, model_name, model_version) logger.info(f"Saved to: {output_path}") ``` ###Assistant : Exports a model in torchscript format with config for Triton serving. # Inputs :param model_path: (str) filepath to pre-trained model. :param output_path: (str, default: `'model_repository'`) directory to store the triton models. :param model_name: (str, default: `'ludwig_model'`) save triton under this name. :param model_name: (int, default: `1`) save neuropod under this verison. # Return :returns: (`None`)
565
def tags(self) -> Sequence[Tuple[str, str]]: tags_key_column = self._get_column_name(Columns.TAGS_KEY) tags_value_column = self._get_column_name(Columns.TAGS_VALUE) if tags_key_column in self._snuba_data and tags_value_column in self._snuba_data: keys = self._snuba_data[tags_key_column] values = self._snuba_data[tags_value_column] if keys and values and len(keys) == len(values): return sorted(zip(keys, values)) else: return [] # Nodestore implementation try: rv = sorted( (t, v) for t, v in get_path(self.data, "tags", filter=True) or () if t is not None and v is not None ) return rv except ValueError: # at one point Sentry allowed invalid tag sets such as (foo, bar) # vs ((tag, foo), (tag, bar)) return []
Tags property uses tags from snuba if loaded otherwise falls back to nodestore.
13
93
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def tags(self) -> Sequence[Tuple[str, str]]: tags_key_column = self._get_column_name(Columns.TAGS_KEY) tags_value_column = self._get_column_name(Columns.TAGS_VALUE) if tags_key_column in self._snuba_data and tags_value_column in self._snuba_data: keys = self._snuba_data[tags_key_column] values = self._snuba_data[tags_value_column] if keys and values and len(keys) == len(values): return sorted(zip(keys, values)) else: return [] # Nodestore implementation try: rv = sorted( (t, v) for t, v in get_path(self.data, "tags", filter=True) or () if t is not None and v is not None ) return rv except ValueError: # at one point Sentry allowed invalid tag sets such as (foo, bar) # vs ((tag, foo), (tag, bar)) return [] ``` ###Assistant : Tags property uses tags from snuba if loaded otherwise falls back to nodestore.
566
def test_cable_validates_compatible_types(self): # An interface cannot be connected to a power port cable = Cable(a_terminations=[self.interface1, self.interface2], b_terminations=[self.interface3]) with self.assertRaises(ValidationError): cable.clean() # TODO: Remove this? # def test_cable_front_port_cannot_connect_to_corresponding_rear_port(self): # # cable = Cable(a_terminations=[self.front_port1], b_terminations=[self.rear_port1]) # with self.assertRaises(ValidationError): # cable.clean()
The clean method should have a check to ensure only compatible port types can be connected by a cable # A cable cannot connect a front port to its corresponding rear port #
33
38
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_cable_validates_compatible_types(self): # An interface cannot be connected to a power port cable = Cable(a_terminations=[self.interface1, self.interface2], b_terminations=[self.interface3]) with self.assertRaises(ValidationError): cable.clean() # TODO: Remove this? # def test_cable_front_port_cannot_connect_to_corresponding_rear_port(self): # # cable = Cable(a_terminations=[self.front_port1], b_terminations=[self.rear_port1]) # with self.assertRaises(ValidationError): # cable.clean() ``` ###Assistant : The clean method should have a check to ensure only compatible port types can be connected by a cable # A cable cannot connect a front port to its corresponding rear port #
567
def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True): if h5py is None: raise ImportError( "`save_model()` using h5 format requires h5py. Could not " "import h5py." ) # TODO(psv) Add warning when we save models that contain non-serializable # entities like metrics added using `add_metric` and losses added using # `add_loss.` if len(model.weights) != len(model._undeduplicated_weights): logging.warning( "Found duplicated `Variable`s in Model's `weights`. " "This is usually caused by `Variable`s being shared by " "Layers in the Model. These `Variable`s will be treated " "as separate `Variable`s when the Model is restored. To " 'avoid this, please save with `save_format="tf"`.' ) if not isinstance(filepath, h5py.File): # If file exists and should not be overwritten. if not overwrite and os.path.isfile(filepath): proceed = ask_to_proceed_with_overwrite(filepath) if not proceed: return # Try creating dir if not exist dirpath = os.path.dirname(filepath) if not os.path.exists(dirpath): tf.io.gfile.makedirs(dirpath) f = h5py.File(filepath, mode="w") opened_new_file = True else: f = filepath opened_new_file = False try: model_metadata = saving_utils.model_metadata(model, include_optimizer) for k, v in model_metadata.items(): if isinstance(v, (dict, list, tuple)): f.attrs[k] = json.dumps( v, default=json_utils.get_json_type ).encode("utf8") else: f.attrs[k] = v model_weights_group = f.create_group("model_weights") save_weights_to_hdf5_group(model_weights_group, model) # TODO(b/128683857): Add integration tests between tf.keras and external # Keras, to avoid breaking TF.js users. if isinstance(model.optimizer, optimizer_experimental.Optimizer): logging.warning( "HDF5 format does not save weights of" " `optimizer_experimental.Optimizer`, your optimizer will" " be recompiled at loading time." ) elif ( include_optimizer and model.optimizer and not isinstance(model.optimizer, optimizer_v1.TFOptimizer) ): save_optimizer_weights_to_hdf5_group(f, model.optimizer) f.flush() finally: if opened_new_file: f.close()
Saves a model to a HDF5 file. The saved model contains: - the model's configuration (topology) - the model's weights - the model's optimizer's state (if any) Thus the saved model can be reinstantiated in the exact same state, without any of the code used for model definition or training. Args: model: Keras model instance to be saved. filepath: One of the following: - String, path where to save the model - `h5py.File` object where to save the model overwrite: Whether we should overwrite any existing model at the target location, or instead ask the user with a manual prompt. include_optimizer: If True, save optimizer's state together. Raises: ImportError: if h5py is not available.
114
235
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True): if h5py is None: raise ImportError( "`save_model()` using h5 format requires h5py. Could not " "import h5py." ) # TODO(psv) Add warning when we save models that contain non-serializable # entities like metrics added using `add_metric` and losses added using # `add_loss.` if len(model.weights) != len(model._undeduplicated_weights): logging.warning( "Found duplicated `Variable`s in Model's `weights`. " "This is usually caused by `Variable`s being shared by " "Layers in the Model. These `Variable`s will be treated " "as separate `Variable`s when the Model is restored. To " 'avoid this, please save with `save_format="tf"`.' ) if not isinstance(filepath, h5py.File): # If file exists and should not be overwritten. if not overwrite and os.path.isfile(filepath): proceed = ask_to_proceed_with_overwrite(filepath) if not proceed: return # Try creating dir if not exist dirpath = os.path.dirname(filepath) if not os.path.exists(dirpath): tf.io.gfile.makedirs(dirpath) f = h5py.File(filepath, mode="w") opened_new_file = True else: f = filepath opened_new_file = False try: model_metadata = saving_utils.model_metadata(model, include_optimizer) for k, v in model_metadata.items(): if isinstance(v, (dict, list, tuple)): f.attrs[k] = json.dumps( v, default=json_utils.get_json_type ).encode("utf8") else: f.attrs[k] = v model_weights_group = f.create_group("model_weights") save_weights_to_hdf5_group(model_weights_group, model) # TODO(b/128683857): Add integration tests between tf.keras and external # Keras, to avoid breaking TF.js users. if isinstance(model.optimizer, optimizer_experimental.Optimizer): logging.warning( "HDF5 format does not save weights of" " `optimizer_experimental.Optimizer`, your optimizer will" " be recompiled at loading time." ) elif ( include_optimizer and model.optimizer and not isinstance(model.optimizer, optimizer_v1.TFOptimizer) ): save_optimizer_weights_to_hdf5_group(f, model.optimizer) f.flush() finally: if opened_new_file: f.close() ``` ###Assistant : Saves a model to a HDF5 file. The saved model contains: - the model's configuration (topology) - the model's weights - the model's optimizer's state (if any) Thus the saved model can be reinstantiated in the exact same state, without any of the code used for model definition or training. Args: model: Keras model instance to be saved. filepath: One of the following: - String, path where to save the model - `h5py.File` object where to save the model overwrite: Whether we should overwrite any existing model at the target location, or instead ask the user with a manual prompt. include_optimizer: If True, save optimizer's state together. Raises: ImportError: if h5py is not available.
568
def set(self, **kwargs) -> None: # nosec attributes = {} user_id = kwargs["user_id"] user = self.first(id_int=int(user_id)) if not user: raise UserNotFoundError for k, v in kwargs.items(): if k in user.__attr_searchable__: attributes[k] = v if kwargs.get("email", None): user.email = kwargs["email"] elif kwargs.get("role", None): user.role = kwargs["role"] elif kwargs.get("name", None): user.name = kwargs["name"] elif kwargs.get("budget", None): user.budget = kwargs["budget"] elif kwargs.get("website", None): user.website = kwargs["website"] elif kwargs.get("institution", None): user.institution = kwargs["institution"] else: raise Exception attributes["__blob__"] = user.to_bytes() self.update_one({"id_int": int(user_id)}, {"$set": attributes})
Updates the information for the given user id. Args: user_id (str): unique id of the user in the database. email (str, optional): email of the user. Defaults to "". password (str, optional): password of the user. Defaults to "". role (int, optional): role of the user. Defaults to 0. name (str, optional): name of the user. Defaults to "". website (str, optional): website of the institution of the user. Defaults to "". institution (str, optional): name of the institution of the user. Defaults to "". budget (float, optional): privacy budget allocated to the user. Defaults to 0.0. Raises: UserNotFoundError: Raised when a user does not exits for the given user id. Exception: Raised when an invalid argument/property is passed.
119
79
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set(self, **kwargs) -> None: # nosec attributes = {} user_id = kwargs["user_id"] user = self.first(id_int=int(user_id)) if not user: raise UserNotFoundError for k, v in kwargs.items(): if k in user.__attr_searchable__: attributes[k] = v if kwargs.get("email", None): user.email = kwargs["email"] elif kwargs.get("role", None): user.role = kwargs["role"] elif kwargs.get("name", None): user.name = kwargs["name"] elif kwargs.get("budget", None): user.budget = kwargs["budget"] elif kwargs.get("website", None): user.website = kwargs["website"] elif kwargs.get("institution", None): user.institution = kwargs["institution"] else: raise Exception attributes["__blob__"] = user.to_bytes() self.update_one({"id_int": int(user_id)}, {"$set": attributes}) ``` ###Assistant : Updates the information for the given user id. Args: user_id (str): unique id of the user in the database. email (str, optional): email of the user. Defaults to "". password (str, optional): password of the user. Defaults to "". role (int, optional): role of the user. Defaults to 0. name (str, optional): name of the user. Defaults to "". website (str, optional): website of the institution of the user. Defaults to "". institution (str, optional): name of the institution of the user. Defaults to "". budget (float, optional): privacy budget allocated to the user. Defaults to 0.0. Raises: UserNotFoundError: Raised when a user does not exits for the given user id. Exception: Raised when an invalid argument/property is passed.
569
def test_get_entity_and_validate_dependency_tree_of_a_single_entity_derived_metric(self): use_case_id = UseCaseKey.RELEASE_HEALTH expected_derived_metrics_entities = { SessionMRI.ALL.value: "metrics_counters", SessionMRI.ALL_USER.value: "metrics_sets", SessionMRI.CRASHED.value: "metrics_counters", SessionMRI.CRASHED_USER.value: "metrics_sets", SessionMRI.ABNORMAL.value: "metrics_counters", SessionMRI.ABNORMAL_USER.value: "metrics_sets", SessionMRI.CRASH_FREE_RATE.value: "metrics_counters", SessionMRI.CRASH_FREE_USER_RATE.value: "metrics_sets", SessionMRI.ERRORED_PREAGGREGATED.value: "metrics_counters", SessionMRI.ERRORED_SET.value: "metrics_sets", SessionMRI.ERRORED_USER_ALL.value: "metrics_sets", SessionMRI.CRASHED_AND_ABNORMAL_USER.value: "metrics_sets", SessionMRI.ERRORED_USER.value: "metrics_sets", } for key, value in expected_derived_metrics_entities.items(): assert ( MOCKED_DERIVED_METRICS[key].get_entity( projects=[self.project], use_case_id=use_case_id ) ) == value # Incorrectly setup SingularEntityDerivedMetric with metrics spanning multiple entities with pytest.raises(DerivedMetricParseException): self.crash_free_fake.get_entity(projects=[self.project], use_case_id=use_case_id)
Tests that ensures that get_entity method works expected in the sense that: - Since it is the first function that is called by the query_builder, validation is applied there to ensure that if it is an instance of a SingleEntityDerivedMetric, then it is composed of only other SingleEntityDerivedMetric or RawMetric that belong to the same entity - Return the entity of that derived metric
64
62
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_entity_and_validate_dependency_tree_of_a_single_entity_derived_metric(self): use_case_id = UseCaseKey.RELEASE_HEALTH expected_derived_metrics_entities = { SessionMRI.ALL.value: "metrics_counters", SessionMRI.ALL_USER.value: "metrics_sets", SessionMRI.CRASHED.value: "metrics_counters", SessionMRI.CRASHED_USER.value: "metrics_sets", SessionMRI.ABNORMAL.value: "metrics_counters", SessionMRI.ABNORMAL_USER.value: "metrics_sets", SessionMRI.CRASH_FREE_RATE.value: "metrics_counters", SessionMRI.CRASH_FREE_USER_RATE.value: "metrics_sets", SessionMRI.ERRORED_PREAGGREGATED.value: "metrics_counters", SessionMRI.ERRORED_SET.value: "metrics_sets", SessionMRI.ERRORED_USER_ALL.value: "metrics_sets", SessionMRI.CRASHED_AND_ABNORMAL_USER.value: "metrics_sets", SessionMRI.ERRORED_USER.value: "metrics_sets", } for key, value in expected_derived_metrics_entities.items(): assert ( MOCKED_DERIVED_METRICS[key].get_entity( projects=[self.project], use_case_id=use_case_id ) ) == value # Incorrectly setup SingularEntityDerivedMetric with metrics spanning multiple entities with pytest.raises(DerivedMetricParseException): self.crash_free_fake.get_entity(projects=[self.project], use_case_id=use_case_id) ``` ###Assistant : Tests that ensures that get_entity method works expected in the sense that: - Since it is the first function that is called by the query_builder, validation is applied there to ensure that if it is an instance of a SingleEntityDerivedMetric, then it is composed of only other SingleEntityDerivedMetric or RawMetric that belong to the same entity - Return the entity of that derived metric
570
def transform(self, X): check_is_fitted(self) X = self._validate_data(X, reset=False) X = X - self.mean_ U = ridge_regression( self.components_.T, X.T, self.ridge_alpha, solver="cholesky" ) return U
Least Squares projection of the data onto the sparse components. To avoid instability issues in case the system is under-determined, regularization can be applied (Ridge regression) via the `ridge_alpha` parameter. Note that Sparse PCA components orthogonality is not enforced as in PCA hence one cannot use a simple linear projection. Parameters ---------- X : ndarray of shape (n_samples, n_features) Test data to be transformed, must have the same number of features as the data used to train the model. Returns ------- X_new : ndarray of shape (n_samples, n_components) Transformed data.
90
23
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def transform(self, X): check_is_fitted(self) X = self._validate_data(X, reset=False) X = X - self.mean_ U = ridge_regression( self.components_.T, X.T, self.ridge_alpha, solver="cholesky" ) return U ``` ###Assistant : Least Squares projection of the data onto the sparse components. To avoid instability issues in case the system is under-determined, regularization can be applied (Ridge regression) via the `ridge_alpha` parameter. Note that Sparse PCA components orthogonality is not enforced as in PCA hence one cannot use a simple linear projection. Parameters ---------- X : ndarray of shape (n_samples, n_features) Test data to be transformed, must have the same number of features as the data used to train the model. Returns ------- X_new : ndarray of shape (n_samples, n_components) Transformed data.
571
def serialize_object(obj, extra=None): json_str = serialize('json', [obj]) print(json_str) data = json.loads(json_str)[0]['fields'] # Exclude any MPTTModel fields if issubclass(obj.__class__, MPTTModel): for field in ['level', 'lft', 'rght', 'tree_id']: data.pop(field) # Include custom_field_data as "custom_fields" if hasattr(obj, 'custom_field_data'): data['custom_fields'] = data.pop('custom_field_data') # Include any tags. Check for tags cached on the instance; fall back to using the manager. if is_taggable(obj): tags = getattr(obj, '_tags', None) or obj.tags.all() data['tags'] = [tag.name for tag in tags] # Append any extra data if extra is not None: data.update(extra) # Copy keys to list to avoid 'dictionary changed size during iteration' exception for key in list(data): # Private fields shouldn't be logged in the object change if isinstance(key, str) and key.startswith('_'): data.pop(key) return data
Return a generic JSON representation of an object using Django's built-in serializer. (This is used for things like change logging, not the REST API.) Optionally include a dictionary to supplement the object data. A list of keys can be provided to exclude them from the returned dictionary. Private fields (prefaced with an underscore) are implicitly excluded.
56
117
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def serialize_object(obj, extra=None): json_str = serialize('json', [obj]) print(json_str) data = json.loads(json_str)[0]['fields'] # Exclude any MPTTModel fields if issubclass(obj.__class__, MPTTModel): for field in ['level', 'lft', 'rght', 'tree_id']: data.pop(field) # Include custom_field_data as "custom_fields" if hasattr(obj, 'custom_field_data'): data['custom_fields'] = data.pop('custom_field_data') # Include any tags. Check for tags cached on the instance; fall back to using the manager. if is_taggable(obj): tags = getattr(obj, '_tags', None) or obj.tags.all() data['tags'] = [tag.name for tag in tags] # Append any extra data if extra is not None: data.update(extra) # Copy keys to list to avoid 'dictionary changed size during iteration' exception for key in list(data): # Private fields shouldn't be logged in the object change if isinstance(key, str) and key.startswith('_'): data.pop(key) return data ``` ###Assistant : Return a generic JSON representation of an object using Django's built-in serializer. (This is used for things like change logging, not the REST API.) Optionally include a dictionary to supplement the object data. A list of keys can be provided to exclude them from the returned dictionary. Private fields (prefaced with an underscore) are implicitly excluded.
572
async def test_statistics_during_period(recorder_mock, hass, hass_ws_client, caplog): now = dt_util.utcnow() await async_setup_component(hass, "history", {}) client = await hass_ws_client() # Test the WS API works and issues a warning await client.send_json( { "id": 1, "type": "history/statistics_during_period", "start_time": now.isoformat(), "end_time": now.isoformat(), "statistic_ids": ["sensor.test"], "period": "hour", } ) response = await client.receive_json() assert response["success"] assert response["result"] == {} assert ( "WS API 'history/statistics_during_period' is deprecated and will be removed in " "Home Assistant Core 2022.12. Use 'recorder/statistics_during_period' instead" ) in caplog.text # Test the WS API forwards to recorder with patch( "homeassistant.components.history.recorder_ws.ws_handle_get_statistics_during_period", wraps=ws_handle_get_statistics_during_period, ) as ws_mock: await client.send_json( { "id": 2, "type": "history/statistics_during_period", "start_time": now.isoformat(), "end_time": now.isoformat(), "statistic_ids": ["sensor.test"], "period": "hour", } ) await client.receive_json() ws_mock.assert_awaited_once()
Test history/statistics_during_period forwards to recorder.
5
112
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_statistics_during_period(recorder_mock, hass, hass_ws_client, caplog): now = dt_util.utcnow() await async_setup_component(hass, "history", {}) client = await hass_ws_client() # Test the WS API works and issues a warning await client.send_json( { "id": 1, "type": "history/statistics_during_period", "start_time": now.isoformat(), "end_time": now.isoformat(), "statistic_ids": ["sensor.test"], "period": "hour", } ) response = await client.receive_json() assert response["success"] assert response["result"] == {} assert ( "WS API 'history/statistics_during_period' is deprecated and will be removed in " "Home Assistant Core 2022.12. Use 'recorder/statistics_during_period' instead" ) in caplog.text # Test the WS API forwards to recorder with patch( "homeassistant.components.history.recorder_ws.ws_handle_get_statistics_during_period", wraps=ws_handle_get_statistics_during_period, ) as ws_mock: await client.send_json( { "id": 2, "type": "history/statistics_during_period", "start_time": now.isoformat(), "end_time": now.isoformat(), "statistic_ids": ["sensor.test"], "period": "hour", } ) await client.receive_json() ws_mock.assert_awaited_once() ``` ###Assistant : Test history/statistics_during_period forwards to recorder.
573
def queryset_chunks(self, qs, chunk_size=DEFAULT_CHUNK_SIZE): i = 0 while True: items = list(qs[i * chunk_size :][:chunk_size]) if not items: break yield items i += 1
Yield a queryset in chunks of at most ``chunk_size``. The chunk yielded will be a list, not a queryset. Iterating over the chunks is done in a transaction so that the order and count of items in the queryset remains stable.
41
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def queryset_chunks(self, qs, chunk_size=DEFAULT_CHUNK_SIZE): i = 0 while True: items = list(qs[i * chunk_size :][:chunk_size]) if not items: break yield items i += 1 ``` ###Assistant : Yield a queryset in chunks of at most ``chunk_size``. The chunk yielded will be a list, not a queryset. Iterating over the chunks is done in a transaction so that the order and count of items in the queryset remains stable.
574
def flattened_having(self) -> List[Condition]: flattened: List[Condition] = [] boolean_conditions: List[BooleanCondition] = [] for condition in self.having: if isinstance(condition, Condition): flattened.append(condition) elif isinstance(condition, BooleanCondition): boolean_conditions.append(condition) while len(boolean_conditions) > 0: boolean_condition = boolean_conditions.pop() for condition in boolean_condition.conditions: if isinstance(condition, Condition): flattened.append(condition) elif isinstance(condition, BooleanCondition): boolean_conditions.append(condition) return flattened
Return self.having as a flattened list ignoring boolean operators This is because self.having can have a mix of BooleanConditions and Conditions. And each BooleanCondition can in turn be a mix of either type.
33
45
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def flattened_having(self) -> List[Condition]: flattened: List[Condition] = [] boolean_conditions: List[BooleanCondition] = [] for condition in self.having: if isinstance(condition, Condition): flattened.append(condition) elif isinstance(condition, BooleanCondition): boolean_conditions.append(condition) while len(boolean_conditions) > 0: boolean_condition = boolean_conditions.pop() for condition in boolean_condition.conditions: if isinstance(condition, Condition): flattened.append(condition) elif isinstance(condition, BooleanCondition): boolean_conditions.append(condition) return flattened ``` ###Assistant : Return self.having as a flattened list ignoring boolean operators This is because self.having can have a mix of BooleanConditions and Conditions. And each BooleanCondition can in turn be a mix of either type.
575
def _add_timedelta_arraylike(self, other): # overridden by PeriodArray if len(self) != len(other): raise ValueError("cannot add indices of unequal length") if isinstance(other, np.ndarray): # ndarray[timedelta64]; wrap in TimedeltaIndex for op from pandas.core.arrays import TimedeltaArray other = TimedeltaArray._from_sequence(other) self_i8 = self.asi8 other_i8 = other.asi8 new_values = checked_add_with_arr( self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan ) if self._hasna or other._hasna: mask = self._isnan | other._isnan np.putmask(new_values, mask, iNaT) return type(self)(new_values, dtype=self.dtype)
Add a delta of a TimedeltaIndex Returns ------- Same type as self
12
64
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _add_timedelta_arraylike(self, other): # overridden by PeriodArray if len(self) != len(other): raise ValueError("cannot add indices of unequal length") if isinstance(other, np.ndarray): # ndarray[timedelta64]; wrap in TimedeltaIndex for op from pandas.core.arrays import TimedeltaArray other = TimedeltaArray._from_sequence(other) self_i8 = self.asi8 other_i8 = other.asi8 new_values = checked_add_with_arr( self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan ) if self._hasna or other._hasna: mask = self._isnan | other._isnan np.putmask(new_values, mask, iNaT) return type(self)(new_values, dtype=self.dtype) ``` ###Assistant : Add a delta of a TimedeltaIndex Returns ------- Same type as self
576
def _map_graph_network(inputs, outputs): # "depth" is number of layers between output Node and the Node. # Nodes are ordered from inputs -> outputs. nodes_in_decreasing_depth, layer_indices = _build_map(outputs) network_nodes = { _make_node_key(node.layer.name, node.layer._inbound_nodes.index(node)) for node in nodes_in_decreasing_depth } nodes_depths = {} # dict {node: depth value} layers_depths = {} # dict {layer: depth value} for node in reversed(nodes_in_decreasing_depth): # If the depth is not set, the node has no outbound nodes (depth 0). depth = nodes_depths.setdefault(node, 0) # Update the depth of the corresponding layer previous_depth = layers_depths.get(node.layer, 0) # If we've seen this layer before at a higher depth, # we should use that depth instead of the node depth. # This is necessary for shared layers that have inputs at different # depth levels in the graph. depth = max(depth, previous_depth) layers_depths[node.layer] = depth nodes_depths[node] = depth # Update the depth of inbound nodes. # The "depth" of a node is the max of the depths # of all nodes it is connected to + 1. for node_dep in node.parent_nodes: previous_depth = nodes_depths.get(node_dep, 0) nodes_depths[node_dep] = max(depth + 1, previous_depth) # Handle inputs that are not connected to outputs. # We do not error out here because the inputs may be used to compute losses # and metrics. for input_t in inputs: input_layer = input_t._keras_history[0] if input_layer not in layers_depths: layers_depths[input_layer] = 0 layer_indices[input_layer] = -1 nodes_depths[input_layer._inbound_nodes[0]] = 0 network_nodes.add(_make_node_key(input_layer.name, 0)) # Build a dict {depth: list of nodes with this depth} nodes_by_depth = collections.defaultdict(list) for node, depth in nodes_depths.items(): nodes_by_depth[depth].append(node) # Build a dict {depth: list of layers with this depth} layers_by_depth = collections.defaultdict(list) for layer, depth in layers_depths.items(): layers_by_depth[depth].append(layer) # Get sorted list of layer depths. depth_keys = list(layers_by_depth.keys()) depth_keys.sort(reverse=True) # Set self.layers ordered by depth. layers = [] for depth in depth_keys: layers_for_depth = layers_by_depth[depth] # Network.layers needs to have a deterministic order: # here we order them by traversal order. layers_for_depth.sort(key=lambda x: layer_indices[x]) layers.extend(layers_for_depth) # Get sorted list of node depths. depth_keys = list(nodes_by_depth.keys()) depth_keys.sort(reverse=True) # Check that all tensors required are computable. # computable_tensors: all tensors in the graph # that can be computed from the inputs provided. computable_tensors = set() for x in inputs: computable_tensors.add(id(x)) layers_with_complete_input = [] # To provide a better error msg. for depth in depth_keys: for node in nodes_by_depth[depth]: layer = node.layer if layer and not node.is_input: for x in tf.nest.flatten(node.keras_inputs): if id(x) not in computable_tensors: raise ValueError( f"Graph disconnected: cannot obtain value for tensor {x} " f'at layer "{layer.name}". The following previous layers ' f"were accessed without issue: {layers_with_complete_input}" ) for x in tf.nest.flatten(node.outputs): computable_tensors.add(id(x)) layers_with_complete_input.append(layer.name) # Ensure name unicity, which will be crucial for serialization # (since serialized nodes refer to layers by their name). all_names = [layer.name for layer in layers] for name in all_names: if all_names.count(name) != 1: raise ValueError( f'The name "{name}" is used {all_names.count(name)} ' "times in the model. All layer names should be unique." ) return network_nodes, nodes_by_depth, layers, layers_by_depth
Validates a network's topology and gather its layers and nodes. Args: inputs: List of input tensors. outputs: List of outputs tensors. Returns: A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`. - nodes: list of Node instances. - nodes_by_depth: dict mapping ints (depth) to lists of node instances. - layers: list of Layer instances. - layers_by_depth: dict mapping ints (depth) to lists of layer instances. Raises: ValueError: In case the network is not valid (e.g. disconnected graph).
74
488
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _map_graph_network(inputs, outputs): # "depth" is number of layers between output Node and the Node. # Nodes are ordered from inputs -> outputs. nodes_in_decreasing_depth, layer_indices = _build_map(outputs) network_nodes = { _make_node_key(node.layer.name, node.layer._inbound_nodes.index(node)) for node in nodes_in_decreasing_depth } nodes_depths = {} # dict {node: depth value} layers_depths = {} # dict {layer: depth value} for node in reversed(nodes_in_decreasing_depth): # If the depth is not set, the node has no outbound nodes (depth 0). depth = nodes_depths.setdefault(node, 0) # Update the depth of the corresponding layer previous_depth = layers_depths.get(node.layer, 0) # If we've seen this layer before at a higher depth, # we should use that depth instead of the node depth. # This is necessary for shared layers that have inputs at different # depth levels in the graph. depth = max(depth, previous_depth) layers_depths[node.layer] = depth nodes_depths[node] = depth # Update the depth of inbound nodes. # The "depth" of a node is the max of the depths # of all nodes it is connected to + 1. for node_dep in node.parent_nodes: previous_depth = nodes_depths.get(node_dep, 0) nodes_depths[node_dep] = max(depth + 1, previous_depth) # Handle inputs that are not connected to outputs. # We do not error out here because the inputs may be used to compute losses # and metrics. for input_t in inputs: input_layer = input_t._keras_history[0] if input_layer not in layers_depths: layers_depths[input_layer] = 0 layer_indices[input_layer] = -1 nodes_depths[input_layer._inbound_nodes[0]] = 0 network_nodes.add(_make_node_key(input_layer.name, 0)) # Build a dict {depth: list of nodes with this depth} nodes_by_depth = collections.defaultdict(list) for node, depth in nodes_depths.items(): nodes_by_depth[depth].append(node) # Build a dict {depth: list of layers with this depth} layers_by_depth = collections.defaultdict(list) for layer, depth in layers_depths.items(): layers_by_depth[depth].append(layer) # Get sorted list of layer depths. depth_keys = list(layers_by_depth.keys()) depth_keys.sort(reverse=True) # Set self.layers ordered by depth. layers = [] for depth in depth_keys: layers_for_depth = layers_by_depth[depth] # Network.layers needs to have a deterministic order: # here we order them by traversal order. layers_for_depth.sort(key=lambda x: layer_indices[x]) layers.extend(layers_for_depth) # Get sorted list of node depths. depth_keys = list(nodes_by_depth.keys()) depth_keys.sort(reverse=True) # Check that all tensors required are computable. # computable_tensors: all tensors in the graph # that can be computed from the inputs provided. computable_tensors = set() for x in inputs: computable_tensors.add(id(x)) layers_with_complete_input = [] # To provide a better error msg. for depth in depth_keys: for node in nodes_by_depth[depth]: layer = node.layer if layer and not node.is_input: for x in tf.nest.flatten(node.keras_inputs): if id(x) not in computable_tensors: raise ValueError( f"Graph disconnected: cannot obtain value for tensor {x} " f'at layer "{layer.name}". The following previous layers ' f"were accessed without issue: {layers_with_complete_input}" ) for x in tf.nest.flatten(node.outputs): computable_tensors.add(id(x)) layers_with_complete_input.append(layer.name) # Ensure name unicity, which will be crucial for serialization # (since serialized nodes refer to layers by their name). all_names = [layer.name for layer in layers] for name in all_names: if all_names.count(name) != 1: raise ValueError( f'The name "{name}" is used {all_names.count(name)} ' "times in the model. All layer names should be unique." ) return network_nodes, nodes_by_depth, layers, layers_by_depth ``` ###Assistant : Validates a network's topology and gather its layers and nodes. Args: inputs: List of input tensors. outputs: List of outputs tensors. Returns: A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`. - nodes: list of Node instances. - nodes_by_depth: dict mapping ints (depth) to lists of node instances. - layers: list of Layer instances. - layers_by_depth: dict mapping ints (depth) to lists of layer instances. Raises: ValueError: In case the network is not valid (e.g. disconnected graph).
577
def default_batch_format(self) -> Type: # noqa: E501 import pandas as pd import pyarrow as pa schema = self.schema() assert isinstance(schema, (type, PandasBlockSchema, pa.Schema)) if isinstance(schema, type): return list if isinstance(schema, (PandasBlockSchema, pa.Schema)): if schema.names == [VALUE_COL_NAME]: return np.ndarray return pd.DataFrame
Return this dataset's default batch format. The default batch format describes what batches of data look like. To learn more about batch formats, read :ref:`writing user-defined functions <transform_datasets_writing_udfs>`. Example: If your dataset represents a list of Python objects, then the default batch format is ``list``. >>> ds = ray.data.range(100) >>> ds # doctest: +SKIP Dataset(num_blocks=20, num_rows=100, schema=<class 'int'>) >>> ds.default_batch_format() <class 'list'> >>> next(ds.iter_batches(batch_size=4)) [0, 1, 2, 3] If your dataset contains a single ``TensorDtype`` or ``ArrowTensorType`` column named ``__value__`` (as created by :func:`ray.data.from_numpy`), then the default batch format is ``np.ndarray``. For more information on tensor datasets, read the :ref:`tensor support guide <datasets_tensor_support>`. >>> ds = ray.data.range_tensor(100) >>> ds # doctest: +SKIP Dataset(num_blocks=20, num_rows=100, schema={__value__: ArrowTensorType(shape=(1,), dtype=int64)}) >>> ds.default_batch_format() <class 'numpy.ndarray'> >>> next(ds.iter_batches(batch_size=4)) array([[0], [1], [2], [3]]) If your dataset represents tabular data and doesn't only consist of a ``__value__`` tensor column (such as is created by :meth:`ray.data.from_numpy`), then the default batch format is ``pd.DataFrame``. >>> import pandas as pd >>> df = pd.DataFrame({"foo": ["a", "b"], "bar": [0, 1]}) >>> ds = ray.data.from_pandas(df) >>> ds # doctest: +SKIP Dataset(num_blocks=1, num_rows=2, schema={foo: object, bar: int64}) >>> ds.default_batch_format() <class 'pandas.core.frame.DataFrame'> >>> next(ds.iter_batches(batch_size=4)) foo bar 0 a 0 1 b 1 .. seealso:: :meth:`~Dataset.map_batches` Call this function to transform batches of data. :meth:`~Dataset.iter_batches` Call this function to iterate over batches of data.
219
40
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def default_batch_format(self) -> Type: # noqa: E501 import pandas as pd import pyarrow as pa schema = self.schema() assert isinstance(schema, (type, PandasBlockSchema, pa.Schema)) if isinstance(schema, type): return list if isinstance(schema, (PandasBlockSchema, pa.Schema)): if schema.names == [VALUE_COL_NAME]: return np.ndarray return pd.DataFrame ``` ###Assistant : Return this dataset's default batch format. The default batch format describes what batches of data look like. To learn more about batch formats, read :ref:`writing user-defined functions <transform_datasets_writing_udfs>`. Example: If your dataset represents a list of Python objects, then the default batch format is ``list``. >>> ds = ray.data.range(100) >>> ds # doctest: +SKIP Dataset(num_blocks=20, num_rows=100, schema=<class 'int'>) >>> ds.default_batch_format() <class 'list'> >>> next(ds.iter_batches(batch_size=4)) [0, 1, 2, 3] If your dataset contains a single ``TensorDtype`` or ``ArrowTensorType`` column named ``__value__`` (as created by :func:`ray.data.from_numpy`), then the default batch format is ``np.ndarray``. For more information on tensor datasets, read the :ref:`tensor support guide <datasets_tensor_support>`. >>> ds = ray.data.range_tensor(100) >>> ds # doctest: +SKIP Dataset(num_blocks=20, num_rows=100, schema={__value__: ArrowTensorType(shape=(1,), dtype=int64)}) >>> ds.default_batch_format() <class 'numpy.ndarray'> >>> next(ds.iter_batches(batch_size=4)) array([[0], [1], [2], [3]]) If your dataset represents tabular data and doesn't only consist of a ``__value__`` tensor column (such as is created by :meth:`ray.data.from_numpy`), then the default batch format is ``pd.DataFrame``. >>> import pandas as pd >>> df = pd.DataFrame({"foo": ["a", "b"], "bar": [0, 1]}) >>> ds = ray.data.from_pandas(df) >>> ds # doctest: +SKIP Dataset(num_blocks=1, num_rows=2, schema={foo: object, bar: int64}) >>> ds.default_batch_format() <class 'pandas.core.frame.DataFrame'> >>> next(ds.iter_batches(batch_size=4)) foo bar 0 a 0 1 b 1 .. seealso:: :meth:`~Dataset.map_batches` Call this function to transform batches of data. :meth:`~Dataset.iter_batches` Call this function to iterate over batches of data.
578
def test_decision_tree_regressor_sample_weight_consistency(criterion): tree_params = dict(criterion=criterion) tree = DecisionTreeRegressor(**tree_params, random_state=42) for kind in ["zeros", "ones"]: check_sample_weights_invariance( "DecisionTreeRegressor_" + criterion, tree, kind="zeros" ) rng = np.random.RandomState(0) n_samples, n_features = 10, 5 X = rng.rand(n_samples, n_features) y = np.mean(X, axis=1) + rng.rand(n_samples) # make it positive in order to work also for poisson criterion y += np.min(y) + 0.1 # check that multiplying sample_weight by 2 is equivalent # to repeating corresponding samples twice X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) y2 = np.concatenate([y, y[: n_samples // 2]]) sample_weight_1 = np.ones(len(y)) sample_weight_1[: n_samples // 2] = 2 tree1 = DecisionTreeRegressor(**tree_params).fit( X, y, sample_weight=sample_weight_1 ) tree2 = DecisionTreeRegressor(**tree_params).fit(X2, y2, sample_weight=None) assert tree1.tree_.node_count == tree2.tree_.node_count # Thresholds, tree.tree_.threshold, and values, tree.tree_.value, are not # exactly the same, but on the training set, those differences do not # matter and thus predictions are the same. assert_allclose(tree1.predict(X), tree2.predict(X)) # TODO: Remove in v1.2 @pytest.mark.parametrize("Tree", REG_TREES.values()) @pytest.mark.parametrize( "old_criterion, new_criterion", [ ("mse", "squared_error"), ("mae", "absolute_error"), ], )
Test that the impact of sample_weight is consistent.
8
159
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_decision_tree_regressor_sample_weight_consistency(criterion): tree_params = dict(criterion=criterion) tree = DecisionTreeRegressor(**tree_params, random_state=42) for kind in ["zeros", "ones"]: check_sample_weights_invariance( "DecisionTreeRegressor_" + criterion, tree, kind="zeros" ) rng = np.random.RandomState(0) n_samples, n_features = 10, 5 X = rng.rand(n_samples, n_features) y = np.mean(X, axis=1) + rng.rand(n_samples) # make it positive in order to work also for poisson criterion y += np.min(y) + 0.1 # check that multiplying sample_weight by 2 is equivalent # to repeating corresponding samples twice X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) y2 = np.concatenate([y, y[: n_samples // 2]]) sample_weight_1 = np.ones(len(y)) sample_weight_1[: n_samples // 2] = 2 tree1 = DecisionTreeRegressor(**tree_params).fit( X, y, sample_weight=sample_weight_1 ) tree2 = DecisionTreeRegressor(**tree_params).fit(X2, y2, sample_weight=None) assert tree1.tree_.node_count == tree2.tree_.node_count # Thresholds, tree.tree_.threshold, and values, tree.tree_.value, are not # exactly the same, but on the training set, those differences do not # matter and thus predictions are the same. assert_allclose(tree1.predict(X), tree2.predict(X)) # TODO: Remove in v1.2 @pytest.mark.parametrize("Tree", REG_TREES.values()) @pytest.mark.parametrize( "old_criterion, new_criterion", [ ("mse", "squared_error"), ("mae", "absolute_error"), ], ) ``` ###Assistant : Test that the impact of sample_weight is consistent.
579
def test_with_fk_to_field(self): response = self.client.get( reverse("admin:auth_user_changelist") + "?q=joe&%s=id" % TO_FIELD_VAR ) self.assertContains(response, "\n1 user\n") self.assertContains( response, '<input type="hidden" name="%s" value="id">' % TO_FIELD_VAR, html=True, )
The to_field GET parameter is preserved when a search is performed. Refs #10918.
13
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_with_fk_to_field(self): response = self.client.get( reverse("admin:auth_user_changelist") + "?q=joe&%s=id" % TO_FIELD_VAR ) self.assertContains(response, "\n1 user\n") self.assertContains( response, '<input type="hidden" name="%s" value="id">' % TO_FIELD_VAR, html=True, ) ``` ###Assistant : The to_field GET parameter is preserved when a search is performed. Refs #10918.
580
def remove_module_load(state_dict): new_state_dict = OrderedDict() for k, v in state_dict.items(): new_state_dict[k[7:]] = v return new_state_dict
create new OrderedDict that does not contain `module.`
8
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def remove_module_load(state_dict): new_state_dict = OrderedDict() for k, v in state_dict.items(): new_state_dict[k[7:]] = v return new_state_dict ``` ###Assistant : create new OrderedDict that does not contain `module.`
581
def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) LAYOUTLMV3_START_DOCSTRING = r LAYOUTLMV3_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare LayoutLMv3 Model transformer outputting raw hidden-states without any specific head on top.", LAYOUTLMV3_START_DOCSTRING, )
Method used for serving the model. Args: inputs (`Dict[str, tf.Tensor]`): The input of the saved model as a dictionary of tensors. This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. </Tip> Parameters: config ([`LayoutLMv3Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. Args: input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. Indices can be obtained using [`LayoutLMv3Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) bbox (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size, config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height / config.patch_size) * (width / config.patch_size))`. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are attention masks?](../glossary#attention-mask) token_type_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are token type IDs?](../glossary#token-type-ids) position_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are position IDs?](../glossary#position-ids) head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
689
31
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) LAYOUTLMV3_START_DOCSTRING = r LAYOUTLMV3_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare LayoutLMv3 Model transformer outputting raw hidden-states without any specific head on top.", LAYOUTLMV3_START_DOCSTRING, ) ``` ###Assistant : Method used for serving the model. Args: inputs (`Dict[str, tf.Tensor]`): The input of the saved model as a dictionary of tensors. This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. </Tip> Parameters: config ([`LayoutLMv3Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. Args: input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. Indices can be obtained using [`LayoutLMv3Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) bbox (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size, config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height / config.patch_size) * (width / config.patch_size))`. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are attention masks?](../glossary#attention-mask) token_type_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are token type IDs?](../glossary#token-type-ids) position_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are position IDs?](../glossary#position-ids) head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
582
def media_image_url(self): if self._table.active_track: return self._table.active_track.get_thumbnail_url(Track.ThumbnailSize.LARGE) return super().media_image_url
Return the URL for a thumbnail image of the current track.
11
8
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def media_image_url(self): if self._table.active_track: return self._table.active_track.get_thumbnail_url(Track.ThumbnailSize.LARGE) return super().media_image_url ``` ###Assistant : Return the URL for a thumbnail image of the current track.
583
def test_fluctuating_ongoing_requests(delay_s): config = AutoscalingConfig( min_replicas=1, max_replicas=10, target_num_ongoing_requests_per_replica=50, upscale_delay_s=delay_s, downscale_delay_s=delay_s) policy = BasicAutoscalingPolicy(config) if delay_s > 0: wait_periods = int(delay_s / CONTROL_LOOP_PERIOD_S) assert wait_periods > 1 underload_requests, overload_requests = [20, 20], [100] trials = 1000 new_num_replicas = None for trial in range(trials): if trial % 2 == 0: new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1) if delay_s > 0: assert new_num_replicas == 1, trial else: assert new_num_replicas == 2, trial else: new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=underload_requests, curr_target_num_replicas=2) if delay_s > 0: assert new_num_replicas == 2, trial else: assert new_num_replicas == 1, trial @pytest.mark.parametrize( "ongoing_requests", [[7, 1, 8, 4], [8, 1, 8, 4], [6, 1, 8, 4], [0, 1, 8, 4]])
Simulates a workload that switches between too many and too few ongoing requests.
13
107
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_fluctuating_ongoing_requests(delay_s): config = AutoscalingConfig( min_replicas=1, max_replicas=10, target_num_ongoing_requests_per_replica=50, upscale_delay_s=delay_s, downscale_delay_s=delay_s) policy = BasicAutoscalingPolicy(config) if delay_s > 0: wait_periods = int(delay_s / CONTROL_LOOP_PERIOD_S) assert wait_periods > 1 underload_requests, overload_requests = [20, 20], [100] trials = 1000 new_num_replicas = None for trial in range(trials): if trial % 2 == 0: new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=overload_requests, curr_target_num_replicas=1) if delay_s > 0: assert new_num_replicas == 1, trial else: assert new_num_replicas == 2, trial else: new_num_replicas = policy.get_decision_num_replicas( current_num_ongoing_requests=underload_requests, curr_target_num_replicas=2) if delay_s > 0: assert new_num_replicas == 2, trial else: assert new_num_replicas == 1, trial @pytest.mark.parametrize( "ongoing_requests", [[7, 1, 8, 4], [8, 1, 8, 4], [6, 1, 8, 4], [0, 1, 8, 4]]) ``` ###Assistant : Simulates a workload that switches between too many and too few ongoing requests.
584
def librosa_pad_lr(x, fsize, fshift, pad_sides=1): assert pad_sides in (1, 2) # return int(fsize // 2) pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0] if pad_sides == 1: return 0, pad else: return pad // 2, pad // 2 + pad % 2 # Conversions
compute right padding (final frame) or both sides padding (first and final frames)
13
47
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def librosa_pad_lr(x, fsize, fshift, pad_sides=1): assert pad_sides in (1, 2) # return int(fsize // 2) pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0] if pad_sides == 1: return 0, pad else: return pad // 2, pad // 2 + pad % 2 # Conversions ``` ###Assistant : compute right padding (final frame) or both sides padding (first and final frames)
585
def setup_tpu(tpu_driver_version='tpu_driver-0.2'): global TPU_DRIVER_MODE if not TPU_DRIVER_MODE: colab_tpu_addr = os.environ['COLAB_TPU_ADDR'].split(':')[0] url = f'http://{colab_tpu_addr}:8475/requestversion/{tpu_driver_version}' requests.post(url) TPU_DRIVER_MODE = 1 # The following is required to use TPU Driver as JAX's backend. config.FLAGS.jax_xla_backend = "tpu_driver" config.FLAGS.jax_backend_target = "grpc://" + os.environ['COLAB_TPU_ADDR']
Sets up Colab to run on TPU. Note: make sure the Colab Runtime is set to Accelerator: TPU. Args ---- tpu_driver_version : (str) specify the version identifier for the tpu driver. Defaults to "tpu_driver-0.2", which can be used with jaxlib 0.3.20. Set to "tpu_driver_nightly" to use the nightly tpu driver build.
51
37
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def setup_tpu(tpu_driver_version='tpu_driver-0.2'): global TPU_DRIVER_MODE if not TPU_DRIVER_MODE: colab_tpu_addr = os.environ['COLAB_TPU_ADDR'].split(':')[0] url = f'http://{colab_tpu_addr}:8475/requestversion/{tpu_driver_version}' requests.post(url) TPU_DRIVER_MODE = 1 # The following is required to use TPU Driver as JAX's backend. config.FLAGS.jax_xla_backend = "tpu_driver" config.FLAGS.jax_backend_target = "grpc://" + os.environ['COLAB_TPU_ADDR'] ``` ###Assistant : Sets up Colab to run on TPU. Note: make sure the Colab Runtime is set to Accelerator: TPU. Args ---- tpu_driver_version : (str) specify the version identifier for the tpu driver. Defaults to "tpu_driver-0.2", which can be used with jaxlib 0.3.20. Set to "tpu_driver_nightly" to use the nightly tpu driver build.
586
def test_timeout_lock(self): lock = self.get_success(self.store.try_acquire_lock("name", "key")) assert lock is not None self.get_success(lock.__aenter__()) # We simulate the process getting stuck by cancelling the looping call # that keeps the lock active. lock._looping_call.stop() # Wait for the lock to timeout. self.reactor.advance(2 * _LOCK_TIMEOUT_MS / 1000) lock2 = self.get_success(self.store.try_acquire_lock("name", "key")) self.assertIsNotNone(lock2) self.assertFalse(self.get_success(lock.is_still_valid()))
Test that we time out locks if they're not updated for ages
12
49
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_timeout_lock(self): lock = self.get_success(self.store.try_acquire_lock("name", "key")) assert lock is not None self.get_success(lock.__aenter__()) # We simulate the process getting stuck by cancelling the looping call # that keeps the lock active. lock._looping_call.stop() # Wait for the lock to timeout. self.reactor.advance(2 * _LOCK_TIMEOUT_MS / 1000) lock2 = self.get_success(self.store.try_acquire_lock("name", "key")) self.assertIsNotNone(lock2) self.assertFalse(self.get_success(lock.is_still_valid())) ``` ###Assistant : Test that we time out locks if they're not updated for ages
587
def random_brightness(x, brightness_range, scale=True): if len(brightness_range) != 2: raise ValueError( "`brightness_range should be tuple or list of two floats. " "Received: %s" % (brightness_range,) ) u = np.random.uniform(brightness_range[0], brightness_range[1]) return apply_brightness_shift(x, u, scale)
Performs a random brightness shift. Deprecated: `tf.keras.preprocessing.image.random_brightness` does not operate on tensors and is not recommended for new code. Prefer `tf.keras.layers.RandomBrightness` which provides equivalent functionality as a preprocessing layer. For more information, see the tutorial for [augmenting images]( https://www.tensorflow.org/tutorials/images/data_augmentation), as well as the [preprocessing layer guide]( https://www.tensorflow.org/guide/keras/preprocessing_layers). Args: x: Input tensor. Must be 3D. brightness_range: Tuple of floats; brightness range. scale: Whether to rescale the image such that minimum and maximum values are 0 and 255 respectively. Default: True. Returns: Numpy image tensor. Raises: ValueError if `brightness_range` isn't a tuple.
90
33
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def random_brightness(x, brightness_range, scale=True): if len(brightness_range) != 2: raise ValueError( "`brightness_range should be tuple or list of two floats. " "Received: %s" % (brightness_range,) ) u = np.random.uniform(brightness_range[0], brightness_range[1]) return apply_brightness_shift(x, u, scale) ``` ###Assistant : Performs a random brightness shift. Deprecated: `tf.keras.preprocessing.image.random_brightness` does not operate on tensors and is not recommended for new code. Prefer `tf.keras.layers.RandomBrightness` which provides equivalent functionality as a preprocessing layer. For more information, see the tutorial for [augmenting images]( https://www.tensorflow.org/tutorials/images/data_augmentation), as well as the [preprocessing layer guide]( https://www.tensorflow.org/guide/keras/preprocessing_layers). Args: x: Input tensor. Must be 3D. brightness_range: Tuple of floats; brightness range. scale: Whether to rescale the image such that minimum and maximum values are 0 and 255 respectively. Default: True. Returns: Numpy image tensor. Raises: ValueError if `brightness_range` isn't a tuple.
588
def test_check_loop_sync(caplog): hasync.check_loop() assert "Detected blocking call inside the event loop" not in caplog.text
Test check_loop does nothing when called from thread.
8
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_check_loop_sync(caplog): hasync.check_loop() assert "Detected blocking call inside the event loop" not in caplog.text ``` ###Assistant : Test check_loop does nothing when called from thread.
589
def update(self) -> bool: try: # Add or remove DeploymentReplica instances in self._replicas. # This should be the only place we adjust total number of replicas # we manage. running_replicas_changed = self._scale_deployment_replicas() # Check the state of existing replicas and transition if necessary. running_replicas_changed |= self._check_and_update_replicas() if running_replicas_changed: self._notify_running_replicas_changed() deleted = self._check_curr_status() except Exception: self._curr_status_info = DeploymentStatusInfo( name=self._name, status=DeploymentStatus.UNHEALTHY, message="Failed to update deployment:" f"\n{traceback.format_exc()}", ) deleted = False return deleted
Attempts to reconcile this deployment to match its goal state. This is an asynchronous call; it's expected to be called repeatedly. Also updates the internal DeploymentStatusInfo based on the current state of the system. Returns true if this deployment was successfully deleted.
42
70
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update(self) -> bool: try: # Add or remove DeploymentReplica instances in self._replicas. # This should be the only place we adjust total number of replicas # we manage. running_replicas_changed = self._scale_deployment_replicas() # Check the state of existing replicas and transition if necessary. running_replicas_changed |= self._check_and_update_replicas() if running_replicas_changed: self._notify_running_replicas_changed() deleted = self._check_curr_status() except Exception: self._curr_status_info = DeploymentStatusInfo( name=self._name, status=DeploymentStatus.UNHEALTHY, message="Failed to update deployment:" f"\n{traceback.format_exc()}", ) deleted = False return deleted ``` ###Assistant : Attempts to reconcile this deployment to match its goal state. This is an asynchronous call; it's expected to be called repeatedly. Also updates the internal DeploymentStatusInfo based on the current state of the system. Returns true if this deployment was successfully deleted.
590
def test_https_malformed_host(self): req = self._get_request(method="POST") req._is_secure_override = True req.META["HTTP_HOST"] = "@malformed" req.META["HTTP_REFERER"] = "https://www.evil.org/somepage" req.META["SERVER_PORT"] = "443" mw = CsrfViewMiddleware(token_view) expected = ( "Referer checking failed - https://www.evil.org/somepage does not " "match any trusted origins." ) with self.assertRaisesMessage(RejectRequest, expected): mw._check_referer(req) response = mw.process_view(req, token_view, (), {}) self.assertEqual(response.status_code, 403)
CsrfViewMiddleware generates a 403 response if it receives an HTTPS request with a bad host.
15
48
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_https_malformed_host(self): req = self._get_request(method="POST") req._is_secure_override = True req.META["HTTP_HOST"] = "@malformed" req.META["HTTP_REFERER"] = "https://www.evil.org/somepage" req.META["SERVER_PORT"] = "443" mw = CsrfViewMiddleware(token_view) expected = ( "Referer checking failed - https://www.evil.org/somepage does not " "match any trusted origins." ) with self.assertRaisesMessage(RejectRequest, expected): mw._check_referer(req) response = mw.process_view(req, token_view, (), {}) self.assertEqual(response.status_code, 403) ``` ###Assistant : CsrfViewMiddleware generates a 403 response if it receives an HTTPS request with a bad host.
591
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]: # TODO: self.authenticator should optionally pull from self._session.auth customers_stream = Customers(authenticator=self._session.auth) for customer in customers_stream.read_records(sync_mode=SyncMode.full_refresh): yield {"customer_id": customer["id"]}
This stream is sliced per `customer_id`. This has two implications: (1) State can be checkpointed after processing each slice (2) The other parameters (e.g. request_params, path) can be dependent on this slice. This allows us to pull data on a per customer_id basis, since that's what Orb exposes.
48
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]: # TODO: self.authenticator should optionally pull from self._session.auth customers_stream = Customers(authenticator=self._session.auth) for customer in customers_stream.read_records(sync_mode=SyncMode.full_refresh): yield {"customer_id": customer["id"]} ``` ###Assistant : This stream is sliced per `customer_id`. This has two implications: (1) State can be checkpointed after processing each slice (2) The other parameters (e.g. request_params, path) can be dependent on this slice. This allows us to pull data on a per customer_id basis, since that's what Orb exposes.
592
def ragged_assert_compatible_and_get_flat_values(values, mask=None): if isinstance(values, list): is_all_ragged = all(isinstance(rt, tf.RaggedTensor) for rt in values) is_any_ragged = any(isinstance(rt, tf.RaggedTensor) for rt in values) else: is_all_ragged = isinstance(values, tf.RaggedTensor) is_any_ragged = is_all_ragged if is_all_ragged and ((mask is None) or isinstance(mask, tf.RaggedTensor)): to_be_stripped = False if not isinstance(values, list): values = [values] to_be_stripped = True # NOTE: we leave the flat_values compatibility to # tf.TensorShape `assert_is_compatible_with` check if both dynamic # dimensions are equal and then use the flat_values. nested_row_split_list = [rt.nested_row_splits for rt in values] assertion_list = _assert_splits_match(nested_row_split_list) # if both are ragged sample_weights also should be ragged with same # dims. if isinstance(mask, tf.RaggedTensor): assertion_list_for_mask = _assert_splits_match( [nested_row_split_list[0], mask.nested_row_splits] ) with tf.control_dependencies(assertion_list_for_mask): mask = tf.expand_dims(mask.flat_values, -1) # values has at least 1 element. flat_values = [] for value in values: with tf.control_dependencies(assertion_list): flat_values.append(tf.expand_dims(value.flat_values, -1)) values = flat_values[0] if to_be_stripped else flat_values elif is_any_ragged: raise TypeError( "Some of the inputs are not tf.RaggedTensor. " f"Input received: {values}" ) # values are empty or value are not ragged and mask is ragged. elif isinstance(mask, tf.RaggedTensor): raise TypeError( "Ragged mask is not allowed with non-ragged inputs. " f"Input received: {values}, mask received: {mask}" ) return values, mask
If ragged, it checks the compatibility and then returns the flat_values. Note: If two tensors are dense, it does not check their compatibility. Note: Although two ragged tensors with different ragged ranks could have identical overall rank and dimension sizes and hence be compatible, we do not support those cases. Args: values: A list of potentially ragged tensor of the same ragged_rank. mask: A potentially ragged tensor of the same ragged_rank as elements in Values. Returns: A tuple in which the first element is the list of tensors and the second is the mask tensor. ([Values], mask). Mask and the element in Values are equal to the flat_values of the input arguments (if they were ragged).
116
193
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ragged_assert_compatible_and_get_flat_values(values, mask=None): if isinstance(values, list): is_all_ragged = all(isinstance(rt, tf.RaggedTensor) for rt in values) is_any_ragged = any(isinstance(rt, tf.RaggedTensor) for rt in values) else: is_all_ragged = isinstance(values, tf.RaggedTensor) is_any_ragged = is_all_ragged if is_all_ragged and ((mask is None) or isinstance(mask, tf.RaggedTensor)): to_be_stripped = False if not isinstance(values, list): values = [values] to_be_stripped = True # NOTE: we leave the flat_values compatibility to # tf.TensorShape `assert_is_compatible_with` check if both dynamic # dimensions are equal and then use the flat_values. nested_row_split_list = [rt.nested_row_splits for rt in values] assertion_list = _assert_splits_match(nested_row_split_list) # if both are ragged sample_weights also should be ragged with same # dims. if isinstance(mask, tf.RaggedTensor): assertion_list_for_mask = _assert_splits_match( [nested_row_split_list[0], mask.nested_row_splits] ) with tf.control_dependencies(assertion_list_for_mask): mask = tf.expand_dims(mask.flat_values, -1) # values has at least 1 element. flat_values = [] for value in values: with tf.control_dependencies(assertion_list): flat_values.append(tf.expand_dims(value.flat_values, -1)) values = flat_values[0] if to_be_stripped else flat_values elif is_any_ragged: raise TypeError( "Some of the inputs are not tf.RaggedTensor. " f"Input received: {values}" ) # values are empty or value are not ragged and mask is ragged. elif isinstance(mask, tf.RaggedTensor): raise TypeError( "Ragged mask is not allowed with non-ragged inputs. " f"Input received: {values}, mask received: {mask}" ) return values, mask ``` ###Assistant : If ragged, it checks the compatibility and then returns the flat_values. Note: If two tensors are dense, it does not check their compatibility. Note: Although two ragged tensors with different ragged ranks could have identical overall rank and dimension sizes and hence be compatible, we do not support those cases. Args: values: A list of potentially ragged tensor of the same ragged_rank. mask: A potentially ragged tensor of the same ragged_rank as elements in Values. Returns: A tuple in which the first element is the list of tensors and the second is the mask tensor. ([Values], mask). Mask and the element in Values are equal to the flat_values of the input arguments (if they were ragged).
593
def update_sandbox_args(self): if self.is_sandbox: host, port = HubIO.deploy_public_sandbox(self.args) self._sandbox_deployed = True self.first_pod_args.host = host self.first_pod_args.port = port if self.head_args: self.pod_args['head'].host = host self.pod_args['head'].port = port
Update args of all its pods based on the host and port returned by Hubble
15
25
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update_sandbox_args(self): if self.is_sandbox: host, port = HubIO.deploy_public_sandbox(self.args) self._sandbox_deployed = True self.first_pod_args.host = host self.first_pod_args.port = port if self.head_args: self.pod_args['head'].host = host self.pod_args['head'].port = port ``` ###Assistant : Update args of all its pods based on the host and port returned by Hubble
594
def download_all(): for name in DATA_HUB: download(name) DATA_HUB['kaggle_house_train'] = ( DATA_URL + 'kaggle_house_pred_train.csv', '585e9cc93e70b39160e7921475f9bcd7d31219ce') DATA_HUB['kaggle_house_test'] = ( DATA_URL + 'kaggle_house_pred_test.csv', 'fa19780a7b011d9b009e8bff8e99922a8ee2eb90')
Download all files in the DATA_HUB. Defined in :numref:`sec_kaggle_house`
9
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def download_all(): for name in DATA_HUB: download(name) DATA_HUB['kaggle_house_train'] = ( DATA_URL + 'kaggle_house_pred_train.csv', '585e9cc93e70b39160e7921475f9bcd7d31219ce') DATA_HUB['kaggle_house_test'] = ( DATA_URL + 'kaggle_house_pred_test.csv', 'fa19780a7b011d9b009e8bff8e99922a8ee2eb90') ``` ###Assistant : Download all files in the DATA_HUB. Defined in :numref:`sec_kaggle_house`
595
def generate_matches(self, nodes): r = {} if nodes and self.match(nodes[0], r): yield 1, r
Generator yielding all matches for this pattern. Default implementation for non-wildcard patterns.
12
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def generate_matches(self, nodes): r = {} if nodes and self.match(nodes[0], r): yield 1, r ``` ###Assistant : Generator yielding all matches for this pattern. Default implementation for non-wildcard patterns.
596
def user_can_delete_obj(self, user, obj): perm_codename = self.get_perm_codename("delete") return self.user_has_specific_permission(user, perm_codename)
Return a boolean to indicate whether `user` is permitted to 'delete' a specific `self.model` instance.
15
10
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def user_can_delete_obj(self, user, obj): perm_codename = self.get_perm_codename("delete") return self.user_has_specific_permission(user, perm_codename) ``` ###Assistant : Return a boolean to indicate whether `user` is permitted to 'delete' a specific `self.model` instance.
597
def _busy_indicator_trace(self, *args) -> None: logger.trace("Busy indicator trace: %s", args) # type: ignore if self._busy_tkvar.get(): self._start_busy_indicator() else: self._stop_busy_indicator()
Show or hide busy indicator based on whether the preview is updating. Parameters ---------- args: unused Required for tkinter event, but unused
22
18
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _busy_indicator_trace(self, *args) -> None: logger.trace("Busy indicator trace: %s", args) # type: ignore if self._busy_tkvar.get(): self._start_busy_indicator() else: self._stop_busy_indicator() ``` ###Assistant : Show or hide busy indicator based on whether the preview is updating. Parameters ---------- args: unused Required for tkinter event, but unused
598
def _gen_html_string(self): self.html_string = _hilite_me( self.code_string, self.language, self.style, self.insert_line_no, "border:solid gray;border-width:.1em .1em .1em .8em;padding:.2em .6em;", self.file_path, self.line_no_from, ) if self.generate_html_file: os.makedirs( os.path.join("assets", "codes", "generated_html_files"), exist_ok=True, ) with open( os.path.join( "assets", "codes", "generated_html_files", self.file_name + ".html", ), "w", ) as file: file.write(self.html_string)
Function to generate html string with code highlighted and stores in variable html_string.
13
41
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _gen_html_string(self): self.html_string = _hilite_me( self.code_string, self.language, self.style, self.insert_line_no, "border:solid gray;border-width:.1em .1em .1em .8em;padding:.2em .6em;", self.file_path, self.line_no_from, ) if self.generate_html_file: os.makedirs( os.path.join("assets", "codes", "generated_html_files"), exist_ok=True, ) with open( os.path.join( "assets", "codes", "generated_html_files", self.file_name + ".html", ), "w", ) as file: file.write(self.html_string) ``` ###Assistant : Function to generate html string with code highlighted and stores in variable html_string.
599
def __getstate__(self): state = self.__dict__.copy() # remove the unpickleable RLock object state.pop("_cookies_lock") return state
Unlike a normal CookieJar, this class is pickleable.
8
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __getstate__(self): state = self.__dict__.copy() # remove the unpickleable RLock object state.pop("_cookies_lock") return state ``` ###Assistant : Unlike a normal CookieJar, this class is pickleable.