Unnamed: 0
int64 0
2.93k
| code
stringlengths 101
62.2k
| docs
stringlengths 51
10.7k
| doc_len
int64 4
1.74k
| words
int64 4
4.82k
| lang
stringclasses 1
value | prompt
stringlengths 320
71.2k
|
---|---|---|---|---|---|---|
100 | def _tune_legacy_checkpoint_score_attr(self) -> Optional[str]:
if self.checkpoint_score_attribute is None:
return self.checkpoint_score_attribute
prefix = ""
if self.checkpoint_score_order == MIN:
prefix = "min-"
return f"{prefix}{self.checkpoint_score_attribute}"
# Alias for backwards compatibility
deprecation_message = (
"`CheckpointStrategy` is deprecated and will be removed in "
"the future. Please use `ray.air.config.CheckpointStrategy` "
"instead."
)
@Deprecated(message=deprecation_message)
@dataclass | Same as ``checkpoint_score_attr`` in ``tune.run``.
Only used for Legacy API compatibility.
| 11 | 49 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _tune_legacy_checkpoint_score_attr(self) -> Optional[str]:
if self.checkpoint_score_attribute is None:
return self.checkpoint_score_attribute
prefix = ""
if self.checkpoint_score_order == MIN:
prefix = "min-"
return f"{prefix}{self.checkpoint_score_attribute}"
# Alias for backwards compatibility
deprecation_message = (
"`CheckpointStrategy` is deprecated and will be removed in "
"the future. Please use `ray.air.config.CheckpointStrategy` "
"instead."
)
@Deprecated(message=deprecation_message)
@dataclass
```
###Assistant : Same as ``checkpoint_score_attr`` in ``tune.run``.
Only used for Legacy API compatibility.
|
101 | def test_dynamic_sampling_bias_activation(self):
project = self.project # force creation
project.update_option(
"sentry:dynamic_sampling_biases",
[
{"id": "boostEnvironments", "active": False},
],
)
self.login_as(self.user)
token = ApiToken.objects.create(user=self.user, scope_list=["project:write"])
authorization = f"Bearer {token.token}"
url = reverse(
"sentry-api-0-project-details",
kwargs={
"organization_slug": self.project.organization.slug,
"project_slug": self.project.slug,
},
)
with Feature({self.new_ds_flag: True}):
self.client.put(
url,
format="json",
HTTP_AUTHORIZATION=authorization,
data={
"dynamicSamplingBiases": [
{"id": "boostEnvironments", "active": True},
]
},
)
assert AuditLogEntry.objects.filter(
organization=self.project.organization,
event=audit_log.get_event_id("SAMPLING_BIAS_ENABLED"),
).exists()
|
Tests that when sending a request to enable a dynamic sampling bias,
the bias will be successfully enabled and the audit log 'SAMPLING_BIAS_ENABLED' will be triggered
| 26 | 59 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_dynamic_sampling_bias_activation(self):
project = self.project # force creation
project.update_option(
"sentry:dynamic_sampling_biases",
[
{"id": "boostEnvironments", "active": False},
],
)
self.login_as(self.user)
token = ApiToken.objects.create(user=self.user, scope_list=["project:write"])
authorization = f"Bearer {token.token}"
url = reverse(
"sentry-api-0-project-details",
kwargs={
"organization_slug": self.project.organization.slug,
"project_slug": self.project.slug,
},
)
with Feature({self.new_ds_flag: True}):
self.client.put(
url,
format="json",
HTTP_AUTHORIZATION=authorization,
data={
"dynamicSamplingBiases": [
{"id": "boostEnvironments", "active": True},
]
},
)
assert AuditLogEntry.objects.filter(
organization=self.project.organization,
event=audit_log.get_event_id("SAMPLING_BIAS_ENABLED"),
).exists()
```
###Assistant :
Tests that when sending a request to enable a dynamic sampling bias,
the bias will be successfully enabled and the audit log 'SAMPLING_BIAS_ENABLED' will be triggered
|
102 | def call(self, inputs, *args, **kwargs):
input_shape = K.int_shape(inputs)
if len(input_shape) != 4:
raise ValueError('Inputs should have rank ' +
str(4) +
'; Received input shape:', str(input_shape))
if self.data_format == 'channels_first':
batch_size, channels, height, width = input_shape
if batch_size is None:
batch_size = -1
r_height, r_width = self.size
o_height, o_width = height * r_height, width * r_width
o_channels = channels // (r_height * r_width)
out = K.reshape(inputs, (batch_size, r_height, r_width, o_channels, height, width))
out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2))
out = K.reshape(out, (batch_size, o_channels, o_height, o_width))
elif self.data_format == 'channels_last':
batch_size, height, width, channels = input_shape
if batch_size is None:
batch_size = -1
r_height, r_width = self.size
o_height, o_width = height * r_height, width * r_width
o_channels = channels // (r_height * r_width)
out = K.reshape(inputs, (batch_size, height, width, r_height, r_width, o_channels))
out = K.permute_dimensions(out, (0, 1, 3, 2, 4, 5))
out = K.reshape(out, (batch_size, o_height, o_width, o_channels))
return out
| This is where the layer's logic lives.
Parameters
----------
inputs: tensor
Input tensor, or list/tuple of input tensors
args: tuple
Additional standard keras Layer arguments
kwargs: dict
Additional standard keras Layer keyword arguments
Returns
-------
tensor
A tensor or list/tuple of tensors
| 42 | 152 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def call(self, inputs, *args, **kwargs):
input_shape = K.int_shape(inputs)
if len(input_shape) != 4:
raise ValueError('Inputs should have rank ' +
str(4) +
'; Received input shape:', str(input_shape))
if self.data_format == 'channels_first':
batch_size, channels, height, width = input_shape
if batch_size is None:
batch_size = -1
r_height, r_width = self.size
o_height, o_width = height * r_height, width * r_width
o_channels = channels // (r_height * r_width)
out = K.reshape(inputs, (batch_size, r_height, r_width, o_channels, height, width))
out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2))
out = K.reshape(out, (batch_size, o_channels, o_height, o_width))
elif self.data_format == 'channels_last':
batch_size, height, width, channels = input_shape
if batch_size is None:
batch_size = -1
r_height, r_width = self.size
o_height, o_width = height * r_height, width * r_width
o_channels = channels // (r_height * r_width)
out = K.reshape(inputs, (batch_size, height, width, r_height, r_width, o_channels))
out = K.permute_dimensions(out, (0, 1, 3, 2, 4, 5))
out = K.reshape(out, (batch_size, o_height, o_width, o_channels))
return out
```
###Assistant : This is where the layer's logic lives.
Parameters
----------
inputs: tensor
Input tensor, or list/tuple of input tensors
args: tuple
Additional standard keras Layer arguments
kwargs: dict
Additional standard keras Layer keyword arguments
Returns
-------
tensor
A tensor or list/tuple of tensors
|
103 | def to_native_types(self, slicer=None, **kwargs) -> np.ndarray:
warnings.warn(
"The 'to_native_types' method is deprecated and will be removed in "
"a future version. Use 'astype(str)' instead.",
FutureWarning,
stacklevel=find_stack_level(inspect.currentframe()),
)
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs)
|
Format specified values of `self` and return them.
.. deprecated:: 1.2.0
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatting process.
kwargs : dict
Options for specifying how the values should be formatted.
These options include the following:
1) na_rep : str
The value that serves as a placeholder for NULL values
2) quoting : bool or None
Whether or not there are quoted values in `self`
3) date_format : str
The format used to represent date-like values.
Returns
-------
numpy.ndarray
Formatted values.
| 93 | 40 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def to_native_types(self, slicer=None, **kwargs) -> np.ndarray:
warnings.warn(
"The 'to_native_types' method is deprecated and will be removed in "
"a future version. Use 'astype(str)' instead.",
FutureWarning,
stacklevel=find_stack_level(inspect.currentframe()),
)
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs)
```
###Assistant :
Format specified values of `self` and return them.
.. deprecated:: 1.2.0
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatting process.
kwargs : dict
Options for specifying how the values should be formatted.
These options include the following:
1) na_rep : str
The value that serves as a placeholder for NULL values
2) quoting : bool or None
Whether or not there are quoted values in `self`
3) date_format : str
The format used to represent date-like values.
Returns
-------
numpy.ndarray
Formatted values.
|
104 | def periphery(G, e=None, usebounds=False, weight=None):
if usebounds is True and e is None and not G.is_directed():
return _extrema_bounding(G, compute="periphery", weight=weight)
if e is None:
e = eccentricity(G, weight=weight)
diameter = max(e.values())
p = [v for v in e if e[v] == diameter]
return p
| Returns the periphery of the graph G.
The periphery is the set of nodes with eccentricity equal to the diameter.
Parameters
----------
G : NetworkX graph
A graph
e : eccentricity dictionary, optional
A precomputed dictionary of eccentricities.
weight : string, function, or None
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
If this is None, every edge has weight/distance/cost 1.
Weights stored as floating point values can lead to small round-off
errors in distances. Use integer weights to avoid this.
Weights should be positive, since they are distances.
Returns
-------
p : list
List of nodes in periphery
Examples
--------
>>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])
>>> nx.periphery(G)
[2, 5]
See Also
--------
barycenter
center
| 212 | 44 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def periphery(G, e=None, usebounds=False, weight=None):
if usebounds is True and e is None and not G.is_directed():
return _extrema_bounding(G, compute="periphery", weight=weight)
if e is None:
e = eccentricity(G, weight=weight)
diameter = max(e.values())
p = [v for v in e if e[v] == diameter]
return p
```
###Assistant : Returns the periphery of the graph G.
The periphery is the set of nodes with eccentricity equal to the diameter.
Parameters
----------
G : NetworkX graph
A graph
e : eccentricity dictionary, optional
A precomputed dictionary of eccentricities.
weight : string, function, or None
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
If this is None, every edge has weight/distance/cost 1.
Weights stored as floating point values can lead to small round-off
errors in distances. Use integer weights to avoid this.
Weights should be positive, since they are distances.
Returns
-------
p : list
List of nodes in periphery
Examples
--------
>>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])
>>> nx.periphery(G)
[2, 5]
See Also
--------
barycenter
center
|
105 | def _current_mode_setpoint_enums(self) -> list[ThermostatSetpointType | None]:
if self._current_mode is None:
# Thermostat(valve) with no support for setting a mode is considered heating-only
return [ThermostatSetpointType.HEATING]
return THERMOSTAT_MODE_SETPOINT_MAP.get(int(self._current_mode.value), []) # type: ignore[no-any-return]
| Return the list of enums that are relevant to the current thermostat mode. | 13 | 30 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _current_mode_setpoint_enums(self) -> list[ThermostatSetpointType | None]:
if self._current_mode is None:
# Thermostat(valve) with no support for setting a mode is considered heating-only
return [ThermostatSetpointType.HEATING]
return THERMOSTAT_MODE_SETPOINT_MAP.get(int(self._current_mode.value), []) # type: ignore[no-any-return]
```
###Assistant : Return the list of enums that are relevant to the current thermostat mode.
|
106 | def line(loc, strg):
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR + 1:nextCR]
else:
return strg[lastCR + 1:]
| Returns the line of text containing loc within a string, counting newlines as line separators.
| 15 | 25 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def line(loc, strg):
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR + 1:nextCR]
else:
return strg[lastCR + 1:]
```
###Assistant : Returns the line of text containing loc within a string, counting newlines as line separators.
|
107 | def _parse_configs(self, config_files):
formatted = ""
for cfile in config_files:
fname = os.path.basename(cfile)
ext = os.path.splitext(cfile)[1]
formatted += f"\n--------- {fname} ---------\n"
if ext == ".ini":
formatted += self._parse_ini(cfile)
elif fname == ".faceswap":
formatted += self._parse_json(cfile)
return formatted
| Parse the given list of config files into a human readable format.
Parameters
----------
config_files: list
A list of paths to the faceswap config files
Returns
-------
str
The current configuration in the config files formatted in a human readable format
| 41 | 37 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _parse_configs(self, config_files):
formatted = ""
for cfile in config_files:
fname = os.path.basename(cfile)
ext = os.path.splitext(cfile)[1]
formatted += f"\n--------- {fname} ---------\n"
if ext == ".ini":
formatted += self._parse_ini(cfile)
elif fname == ".faceswap":
formatted += self._parse_json(cfile)
return formatted
```
###Assistant : Parse the given list of config files into a human readable format.
Parameters
----------
config_files: list
A list of paths to the faceswap config files
Returns
-------
str
The current configuration in the config files formatted in a human readable format
|
108 | def parent(self) -> DOMNode:
if self._parent is None:
raise NoParent(f"{self} has no parent")
assert isinstance(self._parent, DOMNode)
return self._parent
| Get the parent node.
Raises:
NoParent: If this is the root node.
Returns:
DOMNode: The node which is the direct parent of this node.
| 24 | 18 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def parent(self) -> DOMNode:
if self._parent is None:
raise NoParent(f"{self} has no parent")
assert isinstance(self._parent, DOMNode)
return self._parent
```
###Assistant : Get the parent node.
Raises:
NoParent: If this is the root node.
Returns:
DOMNode: The node which is the direct parent of this node.
|
109 | def user_documents_dir(self) -> str:
documents_dir = _get_user_dirs_folder("XDG_DOCUMENTS_DIR")
if documents_dir is None:
documents_dir = os.environ.get("XDG_DOCUMENTS_DIR", "").strip()
if not documents_dir:
documents_dir = os.path.expanduser("~/Documents")
return documents_dir
|
:return: documents directory tied to the user, e.g. ``~/Documents``
| 9 | 23 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def user_documents_dir(self) -> str:
documents_dir = _get_user_dirs_folder("XDG_DOCUMENTS_DIR")
if documents_dir is None:
documents_dir = os.environ.get("XDG_DOCUMENTS_DIR", "").strip()
if not documents_dir:
documents_dir = os.path.expanduser("~/Documents")
return documents_dir
```
###Assistant :
:return: documents directory tied to the user, e.g. ``~/Documents``
|
110 | def test_iforest_sparse(global_random_seed):
rng = check_random_state(global_random_seed)
X_train, X_test = train_test_split(diabetes.data[:50], random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0], "bootstrap": [True, False]})
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in grid:
# Trained on sparse format
sparse_classifier = IsolationForest(
n_estimators=10, random_state=global_random_seed, **params
).fit(X_train_sparse)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_classifier = IsolationForest(
n_estimators=10, random_state=global_random_seed, **params
).fit(X_train)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
| Check IForest for various parameter settings on sparse input. | 9 | 65 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_iforest_sparse(global_random_seed):
rng = check_random_state(global_random_seed)
X_train, X_test = train_test_split(diabetes.data[:50], random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0], "bootstrap": [True, False]})
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in grid:
# Trained on sparse format
sparse_classifier = IsolationForest(
n_estimators=10, random_state=global_random_seed, **params
).fit(X_train_sparse)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_classifier = IsolationForest(
n_estimators=10, random_state=global_random_seed, **params
).fit(X_train)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
```
###Assistant : Check IForest for various parameter settings on sparse input.
|
111 | def get_scorer_names():
return sorted(_SCORERS.keys())
for name, metric in [
("precision", precision_score),
("recall", recall_score),
("f1", f1_score),
("jaccard", jaccard_score),
]:
_SCORERS[name] = make_scorer(metric, average="binary")
for average in ["macro", "micro", "samples", "weighted"]:
qualified_name = "{0}_{1}".format(name, average)
_SCORERS[qualified_name] = make_scorer(metric, pos_label=None, average=average)
SCORERS = _DeprecatedScorers(_SCORERS)
| Get the names of all available scorers.
These names can be passed to :func:`~sklearn.metrics.get_scorer` to
retrieve the scorer object.
Returns
-------
list of str
Names of all available scorers.
| 29 | 41 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_scorer_names():
return sorted(_SCORERS.keys())
for name, metric in [
("precision", precision_score),
("recall", recall_score),
("f1", f1_score),
("jaccard", jaccard_score),
]:
_SCORERS[name] = make_scorer(metric, average="binary")
for average in ["macro", "micro", "samples", "weighted"]:
qualified_name = "{0}_{1}".format(name, average)
_SCORERS[qualified_name] = make_scorer(metric, pos_label=None, average=average)
SCORERS = _DeprecatedScorers(_SCORERS)
```
###Assistant : Get the names of all available scorers.
These names can be passed to :func:`~sklearn.metrics.get_scorer` to
retrieve the scorer object.
Returns
-------
list of str
Names of all available scorers.
|
112 | def _split_generators(self, dl_manager):
# Download extract and return path of data file.
dl_dir = dl_manager.download_and_extract(_URL)
# Use swda/ folder.
data_dir = os.path.join(dl_dir, "swda")
# Handle partitions files: download extract and return paths of split files.
downloaded_files = dl_manager.download(self._URLS)
return [
# Return whole data path and train splits file downloaded path.
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"data_dir": data_dir, "split_file": downloaded_files["train"]}
),
# Return whole data path and dev splits file downloaded path.
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"data_dir": data_dir, "split_file": downloaded_files["dev"]},
),
# Return whole data path and train splits file downloaded path.
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"data_dir": data_dir, "split_file": downloaded_files["test"]}
),
]
|
Returns SplitGenerators.
This method is tasked with downloading/extracting the data and defining the splits.
Args:
dl_manager (:obj:`datasets.utils.download_manager.DownloadManager`):
Download manager to download and extract data files from urls.
Returns:
:obj:`list[str]`:
List of paths to data.
| 34 | 95 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _split_generators(self, dl_manager):
# Download extract and return path of data file.
dl_dir = dl_manager.download_and_extract(_URL)
# Use swda/ folder.
data_dir = os.path.join(dl_dir, "swda")
# Handle partitions files: download extract and return paths of split files.
downloaded_files = dl_manager.download(self._URLS)
return [
# Return whole data path and train splits file downloaded path.
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"data_dir": data_dir, "split_file": downloaded_files["train"]}
),
# Return whole data path and dev splits file downloaded path.
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"data_dir": data_dir, "split_file": downloaded_files["dev"]},
),
# Return whole data path and train splits file downloaded path.
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"data_dir": data_dir, "split_file": downloaded_files["test"]}
),
]
```
###Assistant :
Returns SplitGenerators.
This method is tasked with downloading/extracting the data and defining the splits.
Args:
dl_manager (:obj:`datasets.utils.download_manager.DownloadManager`):
Download manager to download and extract data files from urls.
Returns:
:obj:`list[str]`:
List of paths to data.
|
113 | def print_help(self):
help_text =
console.print(text=help_text, menu="Stocks - Discovery")
| Print help[cmds]
[src][Geek of Wall St][/src]
rtearn realtime earnings from and expected moves
[src][Finnhub][/src]
pipo past IPOs dates
fipo future IPOs dates
[src][Yahoo Finance][/src]
gainers show latest top gainers
losers show latest top losers
ugs undervalued stocks with revenue and earnings growth in excess of 25%
gtech tech stocks with revenue and earnings growth more than 25%
active most active stocks by intraday trade volume
ulc potentially undervalued large cap stocks
asc small cap stocks with earnings growth rates better than 25%
[src][Fidelity][/src]
ford orders by Fidelity Customers
[src][Cathiesark.com][/src]
arkord orders by ARK Investment Management LLC
[src][Seeking Alpha][/src]
upcoming upcoming earnings release dates
trending trending news
cnews customized news (buybacks, ipos, spacs, healthcare, politics)
[src][Shortinterest.com][/src]
lowfloat low float stocks under 10M shares float
[src][Pennystockflow.com][/src]
hotpenny today's hot penny stocks
[src][NASDAQ Data Link (Formerly Quandl)][/src]
rtat top 10 retail traded stocks per day[/cmds]
| 142 | 8 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def print_help(self):
help_text =
console.print(text=help_text, menu="Stocks - Discovery")
```
###Assistant : Print help[cmds]
[src][Geek of Wall St][/src]
rtearn realtime earnings from and expected moves
[src][Finnhub][/src]
pipo past IPOs dates
fipo future IPOs dates
[src][Yahoo Finance][/src]
gainers show latest top gainers
losers show latest top losers
ugs undervalued stocks with revenue and earnings growth in excess of 25%
gtech tech stocks with revenue and earnings growth more than 25%
active most active stocks by intraday trade volume
ulc potentially undervalued large cap stocks
asc small cap stocks with earnings growth rates better than 25%
[src][Fidelity][/src]
ford orders by Fidelity Customers
[src][Cathiesark.com][/src]
arkord orders by ARK Investment Management LLC
[src][Seeking Alpha][/src]
upcoming upcoming earnings release dates
trending trending news
cnews customized news (buybacks, ipos, spacs, healthcare, politics)
[src][Shortinterest.com][/src]
lowfloat low float stocks under 10M shares float
[src][Pennystockflow.com][/src]
hotpenny today's hot penny stocks
[src][NASDAQ Data Link (Formerly Quandl)][/src]
rtat top 10 retail traded stocks per day[/cmds]
|
114 | def handle_fk_field(self, obj, field):
self._start_relational_field(field)
related_att = getattr(obj, field.get_attname())
if related_att is not None:
if self.use_natural_foreign_keys and hasattr(
field.remote_field.model, "natural_key"
):
related = getattr(obj, field.name)
# If related object has a natural key, use it
related = related.natural_key()
# Iterable natural keys are rolled out as subelements
for key_value in related:
self.xml.startElement("natural", {})
self.xml.characters(str(key_value))
self.xml.endElement("natural")
else:
self.xml.characters(str(related_att))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
|
Handle a ForeignKey (they need to be treated slightly
differently from regular fields).
| 13 | 60 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def handle_fk_field(self, obj, field):
self._start_relational_field(field)
related_att = getattr(obj, field.get_attname())
if related_att is not None:
if self.use_natural_foreign_keys and hasattr(
field.remote_field.model, "natural_key"
):
related = getattr(obj, field.name)
# If related object has a natural key, use it
related = related.natural_key()
# Iterable natural keys are rolled out as subelements
for key_value in related:
self.xml.startElement("natural", {})
self.xml.characters(str(key_value))
self.xml.endElement("natural")
else:
self.xml.characters(str(related_att))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
```
###Assistant :
Handle a ForeignKey (they need to be treated slightly
differently from regular fields).
|
115 | def test_metrics_folder():
with _ray_start(include_dashboard=True) as context:
session_dir = context["session_dir"]
assert os.path.exists(
f"{session_dir}/metrics/grafana/provisioning/dashboards/default.yml"
)
assert os.path.exists(
f"{session_dir}/metrics/grafana/provisioning/dashboards"
"/default_grafana_dashboard.json"
)
assert os.path.exists(
f"{session_dir}/metrics/grafana/provisioning/datasources/default.yml"
)
assert os.path.exists(f"{session_dir}/metrics/prometheus/prometheus.yml")
|
Tests that the default dashboard files get created.
| 8 | 24 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_metrics_folder():
with _ray_start(include_dashboard=True) as context:
session_dir = context["session_dir"]
assert os.path.exists(
f"{session_dir}/metrics/grafana/provisioning/dashboards/default.yml"
)
assert os.path.exists(
f"{session_dir}/metrics/grafana/provisioning/dashboards"
"/default_grafana_dashboard.json"
)
assert os.path.exists(
f"{session_dir}/metrics/grafana/provisioning/datasources/default.yml"
)
assert os.path.exists(f"{session_dir}/metrics/prometheus/prometheus.yml")
```
###Assistant :
Tests that the default dashboard files get created.
|
116 | def state_dict(self):
state_dict = {}
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['cur_scale'] = self.cur_scale
state_dict['cur_iter'] = self.cur_iter
if state_dict['dynamic_loss_scale']:
state_dict['last_overflow_iter'] = self.last_overflow_iter
state_dict['scale_factor'] = self.scale_factor
state_dict['scale_window'] = self.scale_window
state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict()
state_dict['fp32_groups_flat'] = self.fp32_groups_flat
state_dict['clip_grad'] = self.clip_grad
return state_dict
# Refresh fp32 master params from fp16 copies |
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
| 39 | 44 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def state_dict(self):
state_dict = {}
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['cur_scale'] = self.cur_scale
state_dict['cur_iter'] = self.cur_iter
if state_dict['dynamic_loss_scale']:
state_dict['last_overflow_iter'] = self.last_overflow_iter
state_dict['scale_factor'] = self.scale_factor
state_dict['scale_window'] = self.scale_window
state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict()
state_dict['fp32_groups_flat'] = self.fp32_groups_flat
state_dict['clip_grad'] = self.clip_grad
return state_dict
# Refresh fp32 master params from fp16 copies
```
###Assistant :
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
|
117 | def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
if np.isclose(n_new, 0.0):
return mu, var
new_mu = np.average(X, axis=0, weights=sample_weight)
new_var = np.average((X - new_mu) ** 2, axis=0, weights=sample_weight)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = old_ssd + new_ssd + (n_new * n_past / n_total) * (mu - new_mu) ** 2
total_var = total_ssd / n_total
return total_mu, total_var
| Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like of shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like of shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like of shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like of shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
| 191 | 162 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
if np.isclose(n_new, 0.0):
return mu, var
new_mu = np.average(X, axis=0, weights=sample_weight)
new_var = np.average((X - new_mu) ** 2, axis=0, weights=sample_weight)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = old_ssd + new_ssd + (n_new * n_past / n_total) * (mu - new_mu) ** 2
total_var = total_ssd / n_total
return total_mu, total_var
```
###Assistant : Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like of shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like of shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like of shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like of shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
|
118 | def is_prime(n):
sympy_deprecation_warning(
,
deprecated_since_version="1.11",
active_deprecations_target='deprecated-carmichael-static-methods',
)
return isprime(n)
|
is_prime is just a wrapper around sympy.ntheory.primetest.isprime so use that
directly instead.
| 12 | 9 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def is_prime(n):
sympy_deprecation_warning(
,
deprecated_since_version="1.11",
active_deprecations_target='deprecated-carmichael-static-methods',
)
return isprime(n)
```
###Assistant :
is_prime is just a wrapper around sympy.ntheory.primetest.isprime so use that
directly instead.
|
119 | def standard_b64decode(s):
return b64decode(s)
_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_')
_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')
| Decode bytes encoded with the standard Base64 alphabet.
Argument s is a bytes-like object or ASCII string to decode. The result
is returned as a bytes object. A binascii.Error is raised if the input
is incorrectly padded. Characters that are not in the standard alphabet
are discarded prior to the padding check.
| 52 | 12 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def standard_b64decode(s):
return b64decode(s)
_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_')
_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')
```
###Assistant : Decode bytes encoded with the standard Base64 alphabet.
Argument s is a bytes-like object or ASCII string to decode. The result
is returned as a bytes object. A binascii.Error is raised if the input
is incorrectly padded. Characters that are not in the standard alphabet
are discarded prior to the padding check.
|
120 | def inception_score(self, imgs, batch_size=32, splits=1):
N = len(imgs)
dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)
up = nn.Upsample(
size=(28, 28),
mode="bilinear",
align_corners=False, # This is to reduce user warnings from torch.
).type(torch.FloatTensor)
| Calculate the inception score of the generated images. | 8 | 29 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def inception_score(self, imgs, batch_size=32, splits=1):
N = len(imgs)
dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)
up = nn.Upsample(
size=(28, 28),
mode="bilinear",
align_corners=False, # This is to reduce user warnings from torch.
).type(torch.FloatTensor)
```
###Assistant : Calculate the inception score of the generated images.
|
121 | def execute():
frappe.reload_doc("stock", "doctype", "shipment")
# update submitted status
frappe.db.sql(
)
# update cancelled status
frappe.db.sql(
)
| UPDATE `tabShipment`
SET status = "Submitted"
WHERE status = "Draft" AND docstatus = 1UPDATE `tabShipment`
SET status = "Cancelled"
WHERE status = "Draft" AND docstatus = 2 | 27 | 17 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def execute():
frappe.reload_doc("stock", "doctype", "shipment")
# update submitted status
frappe.db.sql(
)
# update cancelled status
frappe.db.sql(
)
```
###Assistant : UPDATE `tabShipment`
SET status = "Submitted"
WHERE status = "Draft" AND docstatus = 1UPDATE `tabShipment`
SET status = "Cancelled"
WHERE status = "Draft" AND docstatus = 2
|
122 | def url(self, name):
raise NotImplementedError("subclasses of Storage must provide a url() method")
|
Return an absolute URL where the file's contents can be accessed
directly by a web browser.
| 16 | 12 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def url(self, name):
raise NotImplementedError("subclasses of Storage must provide a url() method")
```
###Assistant :
Return an absolute URL where the file's contents can be accessed
directly by a web browser.
|
123 | def load_attributes_from_hdf5_group(group, name):
if name in group.attrs:
data = [
n.decode("utf8") if hasattr(n, "decode") else n
for n in group.attrs[name]
]
else:
data = []
chunk_id = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[
n.decode("utf8") if hasattr(n, "decode") else n
for n in group.attrs["%s%d" % (name, chunk_id)]
]
)
chunk_id += 1
return data
| Loads attributes of the specified name from the HDF5 group.
This method deals with an inherent problem
of HDF5 file which is not able to store
data larger than HDF5_OBJECT_HEADER_LIMIT bytes.
Args:
group: A pointer to a HDF5 group.
name: A name of the attributes to load.
Returns:
data: Attributes data.
| 51 | 57 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def load_attributes_from_hdf5_group(group, name):
if name in group.attrs:
data = [
n.decode("utf8") if hasattr(n, "decode") else n
for n in group.attrs[name]
]
else:
data = []
chunk_id = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[
n.decode("utf8") if hasattr(n, "decode") else n
for n in group.attrs["%s%d" % (name, chunk_id)]
]
)
chunk_id += 1
return data
```
###Assistant : Loads attributes of the specified name from the HDF5 group.
This method deals with an inherent problem
of HDF5 file which is not able to store
data larger than HDF5_OBJECT_HEADER_LIMIT bytes.
Args:
group: A pointer to a HDF5 group.
name: A name of the attributes to load.
Returns:
data: Attributes data.
|
124 | def _find_root_block_schema(block_schemas_with_references):
return next(
(
block_schema
for (
block_schema,
_,
parent_block_schema_id,
) in block_schemas_with_references
if parent_block_schema_id is None
),
None,
)
|
Attempts to find the root block schema from a list of block schemas
with references. Returns None if a root block schema is not found.
Returns only the first potential root block schema if multiple are found.
| 37 | 21 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _find_root_block_schema(block_schemas_with_references):
return next(
(
block_schema
for (
block_schema,
_,
parent_block_schema_id,
) in block_schemas_with_references
if parent_block_schema_id is None
),
None,
)
```
###Assistant :
Attempts to find the root block schema from a list of block schemas
with references. Returns None if a root block schema is not found.
Returns only the first potential root block schema if multiple are found.
|
125 | def feature_embedding(input_feats, out_feat_len):
assert input_feats.ndim == 2
assert isinstance(out_feat_len, int)
assert out_feat_len >= input_feats.shape[1]
num_nodes = input_feats.shape[0]
feat_dim = input_feats.shape[1]
feat_repeat_times = out_feat_len // feat_dim
residue_dim = out_feat_len % feat_dim
if residue_dim > 0:
embed_wave = np.array([
np.power(1000, 2.0 * (j // 2) / feat_repeat_times + 1)
for j in range(feat_repeat_times + 1)
]).reshape((feat_repeat_times + 1, 1, 1))
repeat_feats = np.repeat(
np.expand_dims(
input_feats, axis=0), feat_repeat_times, axis=0)
residue_feats = np.hstack([
input_feats[:, 0:residue_dim], np.zeros(
(num_nodes, feat_dim - residue_dim))
])
residue_feats = np.expand_dims(residue_feats, axis=0)
repeat_feats = np.concatenate([repeat_feats, residue_feats], axis=0)
embedded_feats = repeat_feats / embed_wave
embedded_feats[:, 0::2] = np.sin(embedded_feats[:, 0::2])
embedded_feats[:, 1::2] = np.cos(embedded_feats[:, 1::2])
embedded_feats = np.transpose(embedded_feats, (1, 0, 2)).reshape(
(num_nodes, -1))[:, 0:out_feat_len]
else:
embed_wave = np.array([
np.power(1000, 2.0 * (j // 2) / feat_repeat_times)
for j in range(feat_repeat_times)
]).reshape((feat_repeat_times, 1, 1))
repeat_feats = np.repeat(
np.expand_dims(
input_feats, axis=0), feat_repeat_times, axis=0)
embedded_feats = repeat_feats / embed_wave
embedded_feats[:, 0::2] = np.sin(embedded_feats[:, 0::2])
embedded_feats[:, 1::2] = np.cos(embedded_feats[:, 1::2])
embedded_feats = np.transpose(embedded_feats, (1, 0, 2)).reshape(
(num_nodes, -1)).astype(np.float32)
return embedded_feats
| Embed features. This code was partially adapted from
https://github.com/GXYM/DRRG licensed under the MIT license.
Args:
input_feats (ndarray): The input features of shape (N, d), where N is
the number of nodes in graph, d is the input feature vector length.
out_feat_len (int): The length of output feature vector.
Returns:
embedded_feats (ndarray): The embedded features.
| 54 | 162 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def feature_embedding(input_feats, out_feat_len):
assert input_feats.ndim == 2
assert isinstance(out_feat_len, int)
assert out_feat_len >= input_feats.shape[1]
num_nodes = input_feats.shape[0]
feat_dim = input_feats.shape[1]
feat_repeat_times = out_feat_len // feat_dim
residue_dim = out_feat_len % feat_dim
if residue_dim > 0:
embed_wave = np.array([
np.power(1000, 2.0 * (j // 2) / feat_repeat_times + 1)
for j in range(feat_repeat_times + 1)
]).reshape((feat_repeat_times + 1, 1, 1))
repeat_feats = np.repeat(
np.expand_dims(
input_feats, axis=0), feat_repeat_times, axis=0)
residue_feats = np.hstack([
input_feats[:, 0:residue_dim], np.zeros(
(num_nodes, feat_dim - residue_dim))
])
residue_feats = np.expand_dims(residue_feats, axis=0)
repeat_feats = np.concatenate([repeat_feats, residue_feats], axis=0)
embedded_feats = repeat_feats / embed_wave
embedded_feats[:, 0::2] = np.sin(embedded_feats[:, 0::2])
embedded_feats[:, 1::2] = np.cos(embedded_feats[:, 1::2])
embedded_feats = np.transpose(embedded_feats, (1, 0, 2)).reshape(
(num_nodes, -1))[:, 0:out_feat_len]
else:
embed_wave = np.array([
np.power(1000, 2.0 * (j // 2) / feat_repeat_times)
for j in range(feat_repeat_times)
]).reshape((feat_repeat_times, 1, 1))
repeat_feats = np.repeat(
np.expand_dims(
input_feats, axis=0), feat_repeat_times, axis=0)
embedded_feats = repeat_feats / embed_wave
embedded_feats[:, 0::2] = np.sin(embedded_feats[:, 0::2])
embedded_feats[:, 1::2] = np.cos(embedded_feats[:, 1::2])
embedded_feats = np.transpose(embedded_feats, (1, 0, 2)).reshape(
(num_nodes, -1)).astype(np.float32)
return embedded_feats
```
###Assistant : Embed features. This code was partially adapted from
https://github.com/GXYM/DRRG licensed under the MIT license.
Args:
input_feats (ndarray): The input features of shape (N, d), where N is
the number of nodes in graph, d is the input feature vector length.
out_feat_len (int): The length of output feature vector.
Returns:
embedded_feats (ndarray): The embedded features.
|
126 | def decision_function(self, X):
check_is_fitted(self)
xp, _ = get_namespace(X)
X = self._validate_data(X, accept_sparse="csr", reset=False)
scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
return xp.reshape(scores, -1) if scores.shape[1] == 1 else scores
|
Predict confidence scores for samples.
The confidence score for a sample is proportional to the signed
distance of that sample to the hyperplane.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data matrix for which we want to get the confidence scores.
Returns
-------
scores : ndarray of shape (n_samples,) or (n_samples, n_classes)
Confidence scores per `(n_samples, n_classes)` combination. In the
binary case, confidence score for `self.classes_[1]` where >0 means
this class would be predicted.
| 79 | 29 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def decision_function(self, X):
check_is_fitted(self)
xp, _ = get_namespace(X)
X = self._validate_data(X, accept_sparse="csr", reset=False)
scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
return xp.reshape(scores, -1) if scores.shape[1] == 1 else scores
```
###Assistant :
Predict confidence scores for samples.
The confidence score for a sample is proportional to the signed
distance of that sample to the hyperplane.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data matrix for which we want to get the confidence scores.
Returns
-------
scores : ndarray of shape (n_samples,) or (n_samples, n_classes)
Confidence scores per `(n_samples, n_classes)` combination. In the
binary case, confidence score for `self.classes_[1]` where >0 means
this class would be predicted.
|
127 | def get_ranking(pairs):
if len(pairs) == 1:
return list(pairs[0])
w = get_winner(pairs)
# now remove the winner from the list of pairs
p_new = np.array([(a, b) for a, b in pairs if a != w])
return [w] + get_ranking(p_new)
|
Abuses concordance property to get a (not necessarily unqiue) ranking.
The lack of uniqueness is due to the potential existance of multiple
equally ranked winners. We have to pick one, which is where
the non-uniqueness comes from
| 37 | 38 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_ranking(pairs):
if len(pairs) == 1:
return list(pairs[0])
w = get_winner(pairs)
# now remove the winner from the list of pairs
p_new = np.array([(a, b) for a, b in pairs if a != w])
return [w] + get_ranking(p_new)
```
###Assistant :
Abuses concordance property to get a (not necessarily unqiue) ranking.
The lack of uniqueness is due to the potential existance of multiple
equally ranked winners. We have to pick one, which is where
the non-uniqueness comes from
|
128 | def backup_dir(dir, ext=".bak"):
# type: (str, str) -> str
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
| Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc) | 18 | 31 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def backup_dir(dir, ext=".bak"):
# type: (str, str) -> str
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
```
###Assistant : Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)
|
129 | def synchronize(local_filters, remotes, update_remote=True):
remote_filters = ray.get(
[r.get_filters.remote(flush_after=True) for r in remotes]
)
for rf in remote_filters:
for k in local_filters:
local_filters[k].apply_changes(rf[k], with_buffer=False)
if update_remote:
copies = {k: v.as_serializable() for k, v in local_filters.items()}
remote_copy = ray.put(copies)
[r.sync_filters.remote(remote_copy) for r in remotes]
| Aggregates all filters from remote evaluators.
Local copy is updated and then broadcasted to all remote evaluators.
Args:
local_filters: Filters to be synchronized.
remotes: Remote evaluators with filters.
update_remote: Whether to push updates to remote filters.
| 36 | 42 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def synchronize(local_filters, remotes, update_remote=True):
remote_filters = ray.get(
[r.get_filters.remote(flush_after=True) for r in remotes]
)
for rf in remote_filters:
for k in local_filters:
local_filters[k].apply_changes(rf[k], with_buffer=False)
if update_remote:
copies = {k: v.as_serializable() for k, v in local_filters.items()}
remote_copy = ray.put(copies)
[r.sync_filters.remote(remote_copy) for r in remotes]
```
###Assistant : Aggregates all filters from remote evaluators.
Local copy is updated and then broadcasted to all remote evaluators.
Args:
local_filters: Filters to be synchronized.
remotes: Remote evaluators with filters.
update_remote: Whether to push updates to remote filters.
|
130 | def test_estimator_empty_instance_dict(estimator):
state = estimator.__getstate__()
expected = {"_sklearn_version": sklearn.__version__}
assert state == expected
# this should not raise
pickle.loads(pickle.dumps(BaseEstimator()))
| Check that ``__getstate__`` returns an empty ``dict`` with an empty
instance.
Python 3.11+ changed behaviour by returning ``None`` instead of raising an
``AttributeError``. Non-regression test for gh-25188.
| 27 | 19 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_estimator_empty_instance_dict(estimator):
state = estimator.__getstate__()
expected = {"_sklearn_version": sklearn.__version__}
assert state == expected
# this should not raise
pickle.loads(pickle.dumps(BaseEstimator()))
```
###Assistant : Check that ``__getstate__`` returns an empty ``dict`` with an empty
instance.
Python 3.11+ changed behaviour by returning ``None`` instead of raising an
``AttributeError``. Non-regression test for gh-25188.
|
131 | def test_with_variables(self):
context = Context({"name": "jonathan wells"})
template =
expected =
self.assertHTMLEqual(expected, Template(template).render(context))
|
{% load wagtailadmin_tags %}
{% fragment as my_fragment %}
<p>Hello, {{ name|title }}</p>
{% endfragment %}
Text coming after:
{{ my_fragment }}
Text coming after:
<p>Hello, Jonathan Wells</p>
| 28 | 13 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_with_variables(self):
context = Context({"name": "jonathan wells"})
template =
expected =
self.assertHTMLEqual(expected, Template(template).render(context))
```
###Assistant :
{% load wagtailadmin_tags %}
{% fragment as my_fragment %}
<p>Hello, {{ name|title }}</p>
{% endfragment %}
Text coming after:
{{ my_fragment }}
Text coming after:
<p>Hello, Jonathan Wells</p>
|
132 | def _extract_interpreter(b_module_data):
interpreter = None
args = []
b_lines = b_module_data.split(b"\n", 1)
if b_lines[0].startswith(b"#!"):
b_shebang = b_lines[0].strip()
# shlex.split on python-2.6 needs bytes. On python-3.x it needs text
cli_split = shlex.split(to_native(b_shebang[2:], errors='surrogate_or_strict'))
# convert args to text
cli_split = [to_text(a, errors='surrogate_or_strict') for a in cli_split]
interpreter = cli_split[0]
args = cli_split[1:]
return interpreter, args
|
Used to extract shebang expression from binary module data and return a text
string with the shebang, or None if no shebang is detected.
| 24 | 54 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _extract_interpreter(b_module_data):
interpreter = None
args = []
b_lines = b_module_data.split(b"\n", 1)
if b_lines[0].startswith(b"#!"):
b_shebang = b_lines[0].strip()
# shlex.split on python-2.6 needs bytes. On python-3.x it needs text
cli_split = shlex.split(to_native(b_shebang[2:], errors='surrogate_or_strict'))
# convert args to text
cli_split = [to_text(a, errors='surrogate_or_strict') for a in cli_split]
interpreter = cli_split[0]
args = cli_split[1:]
return interpreter, args
```
###Assistant :
Used to extract shebang expression from binary module data and return a text
string with the shebang, or None if no shebang is detected.
|
133 | def test_resource_exhausted_info(self):
# generate some random data to be captured implicitly in training func.
from sklearn.datasets import fetch_olivetti_faces
a_large_array = []
for i in range(50):
a_large_array.append(fetch_olivetti_faces())
| This is to test if helpful information is displayed when
the objects captured in trainable/training function are too
large and RESOURCES_EXHAUSTED error of gRPC is triggered. | 26 | 26 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_resource_exhausted_info(self):
# generate some random data to be captured implicitly in training func.
from sklearn.datasets import fetch_olivetti_faces
a_large_array = []
for i in range(50):
a_large_array.append(fetch_olivetti_faces())
```
###Assistant : This is to test if helpful information is displayed when
the objects captured in trainable/training function are too
large and RESOURCES_EXHAUSTED error of gRPC is triggered.
|
134 | def testing_session_settings():
with tempfile.TemporaryDirectory() as tmpdir:
profile = prefect.settings.Profile(
name="test-session",
settings={
# Set PREFECT_HOME to a temporary directory to avoid clobbering
# environments and settings
PREFECT_HOME: tmpdir,
PREFECT_PROFILES_PATH: "$PREFECT_HOME/profiles.toml",
# Enable debug logging
PREFECT_LOGGING_LEVEL: "DEBUG",
# Disable shipping logs to the API;
# can be enabled by the `enable_orion_handler` mark
PREFECT_LOGGING_ORION_ENABLED: False,
# Disable services for test runs
PREFECT_ORION_ANALYTICS_ENABLED: False,
PREFECT_ORION_SERVICES_LATE_RUNS_ENABLED: False,
PREFECT_ORION_SERVICES_SCHEDULER_ENABLED: False,
},
source=__file__,
)
with prefect.settings.use_profile(
profile,
override_environment_variables=True,
include_current_context=False,
) as ctx:
yield ctx
|
Creates a fixture for the scope of the test session that modifies setting defaults.
This ensures that tests are isolated from existing settings, databases, etc.
| 25 | 77 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def testing_session_settings():
with tempfile.TemporaryDirectory() as tmpdir:
profile = prefect.settings.Profile(
name="test-session",
settings={
# Set PREFECT_HOME to a temporary directory to avoid clobbering
# environments and settings
PREFECT_HOME: tmpdir,
PREFECT_PROFILES_PATH: "$PREFECT_HOME/profiles.toml",
# Enable debug logging
PREFECT_LOGGING_LEVEL: "DEBUG",
# Disable shipping logs to the API;
# can be enabled by the `enable_orion_handler` mark
PREFECT_LOGGING_ORION_ENABLED: False,
# Disable services for test runs
PREFECT_ORION_ANALYTICS_ENABLED: False,
PREFECT_ORION_SERVICES_LATE_RUNS_ENABLED: False,
PREFECT_ORION_SERVICES_SCHEDULER_ENABLED: False,
},
source=__file__,
)
with prefect.settings.use_profile(
profile,
override_environment_variables=True,
include_current_context=False,
) as ctx:
yield ctx
```
###Assistant :
Creates a fixture for the scope of the test session that modifies setting defaults.
This ensures that tests are isolated from existing settings, databases, etc.
|
135 | def bot_start(self, **kwargs) -> None:
self.buy_rsi = IntParameter([0, 50], default=30, space='buy')
|
Parameters can also be defined here ...
| 7 | 11 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def bot_start(self, **kwargs) -> None:
self.buy_rsi = IntParameter([0, 50], default=30, space='buy')
```
###Assistant :
Parameters can also be defined here ...
|
136 | def getvalue(self):
if callable(getattr(self.stream, "getvalue", None)):
return self.stream.getvalue()
|
Return the fully serialized queryset (or None if the output stream is
not seekable).
| 14 | 8 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def getvalue(self):
if callable(getattr(self.stream, "getvalue", None)):
return self.stream.getvalue()
```
###Assistant :
Return the fully serialized queryset (or None if the output stream is
not seekable).
|
137 | def boxplot(self, X, win=None, env=None, opts=None):
X = np.squeeze(X)
assert X.ndim == 1 or X.ndim == 2, "X should be one or two-dimensional"
if X.ndim == 1:
X = X[:, None]
opts = {} if opts is None else opts
_title2str(opts)
_assert_opts(opts)
if opts.get("legend") is not None:
assert (
len(opts["legend"]) == X.shape[1]
), "number of legened labels must match number of columns"
data = []
for k in range(X.shape[1]):
_data = {
"y": X.take(k, 1).tolist(),
"type": "box",
}
if opts.get("legend"):
_data["name"] = opts["legend"][k]
else:
_data["name"] = "column " + str(k)
data.append(_data)
return self._send(
{
"data": data,
"win": win,
"eid": env,
"layout": _opts2layout(opts),
"opts": opts,
}
)
|
This function draws boxplots of the specified data. It takes as input
an `N` or an `NxM` tensor `X` that specifies the `N` data values of
which to construct the `M` boxplots.
The following plot-specific `opts` are currently supported:
- `opts.legend`: labels for each of the columns in `X`
| 49 | 106 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def boxplot(self, X, win=None, env=None, opts=None):
X = np.squeeze(X)
assert X.ndim == 1 or X.ndim == 2, "X should be one or two-dimensional"
if X.ndim == 1:
X = X[:, None]
opts = {} if opts is None else opts
_title2str(opts)
_assert_opts(opts)
if opts.get("legend") is not None:
assert (
len(opts["legend"]) == X.shape[1]
), "number of legened labels must match number of columns"
data = []
for k in range(X.shape[1]):
_data = {
"y": X.take(k, 1).tolist(),
"type": "box",
}
if opts.get("legend"):
_data["name"] = opts["legend"][k]
else:
_data["name"] = "column " + str(k)
data.append(_data)
return self._send(
{
"data": data,
"win": win,
"eid": env,
"layout": _opts2layout(opts),
"opts": opts,
}
)
```
###Assistant :
This function draws boxplots of the specified data. It takes as input
an `N` or an `NxM` tensor `X` that specifies the `N` data values of
which to construct the `M` boxplots.
The following plot-specific `opts` are currently supported:
- `opts.legend`: labels for each of the columns in `X`
|
138 | def test_body_after_POST_multipart_related(self):
# Ticket #9054
# There are cases in which the multipart data is related instead of
# being a binary upload, in which case it should still be accessible
# via body.
payload_data = b"\r\n".join([
b'--boundary',
b'Content-ID: id; name="name"',
b'',
b'value',
b'--boundary--'
])
payload = FakePayload(payload_data)
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/related; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
self.assertEqual(request.POST, {})
self.assertEqual(request.body, payload_data)
|
Reading body after parsing multipart that isn't form-data is allowed
| 10 | 65 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_body_after_POST_multipart_related(self):
# Ticket #9054
# There are cases in which the multipart data is related instead of
# being a binary upload, in which case it should still be accessible
# via body.
payload_data = b"\r\n".join([
b'--boundary',
b'Content-ID: id; name="name"',
b'',
b'value',
b'--boundary--'
])
payload = FakePayload(payload_data)
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/related; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
self.assertEqual(request.POST, {})
self.assertEqual(request.body, payload_data)
```
###Assistant :
Reading body after parsing multipart that isn't form-data is allowed
|
139 | def query(self, query, **kwargs) -> Result:
try:
if self.db_conn:
result = self.db_conn.aql.execute(query, **kwargs)
return result
else:
raise AirflowException(
f"Failed to execute AQLQuery, error connecting to database: {self.database}"
)
except AQLQueryExecuteError as error:
raise AirflowException(f"Failed to execute AQLQuery, error: {str(error)}")
|
Function to create a arangodb session
and execute the AQL query in the session.
:param query: AQL query
:return: Result
| 20 | 39 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def query(self, query, **kwargs) -> Result:
try:
if self.db_conn:
result = self.db_conn.aql.execute(query, **kwargs)
return result
else:
raise AirflowException(
f"Failed to execute AQLQuery, error connecting to database: {self.database}"
)
except AQLQueryExecuteError as error:
raise AirflowException(f"Failed to execute AQLQuery, error: {str(error)}")
```
###Assistant :
Function to create a arangodb session
and execute the AQL query in the session.
:param query: AQL query
:return: Result
|
140 | def test_post_process_frame(feature_names, target_names):
pd = pytest.importorskip("pandas")
X_original = pd.DataFrame(
{
"col_int_as_integer": [1, 2, 3],
"col_int_as_numeric": [1, 2, 3],
"col_float_as_real": [1.0, 2.0, 3.0],
"col_float_as_numeric": [1.0, 2.0, 3.0],
"col_categorical": ["a", "b", "c"],
"col_string": ["a", "b", "c"],
}
)
X, y = _post_process_frame(X_original, feature_names, target_names)
assert isinstance(X, pd.DataFrame)
if len(target_names) >= 2:
assert isinstance(y, pd.DataFrame)
elif len(target_names) == 1:
assert isinstance(y, pd.Series)
else:
assert y is None
| Check the behaviour of the post-processing function for splitting a dataframe. | 11 | 64 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_post_process_frame(feature_names, target_names):
pd = pytest.importorskip("pandas")
X_original = pd.DataFrame(
{
"col_int_as_integer": [1, 2, 3],
"col_int_as_numeric": [1, 2, 3],
"col_float_as_real": [1.0, 2.0, 3.0],
"col_float_as_numeric": [1.0, 2.0, 3.0],
"col_categorical": ["a", "b", "c"],
"col_string": ["a", "b", "c"],
}
)
X, y = _post_process_frame(X_original, feature_names, target_names)
assert isinstance(X, pd.DataFrame)
if len(target_names) >= 2:
assert isinstance(y, pd.DataFrame)
elif len(target_names) == 1:
assert isinstance(y, pd.Series)
else:
assert y is None
```
###Assistant : Check the behaviour of the post-processing function for splitting a dataframe.
|
141 | def cleanse_setting(self, key, value):
try:
is_sensitive = self.hidden_settings.search(key)
except TypeError:
is_sensitive = False
if is_sensitive:
cleansed = self.cleansed_substitute
elif isinstance(value, dict):
cleansed = {k: self.cleanse_setting(k, v) for k, v in value.items()}
elif isinstance(value, list):
cleansed = [self.cleanse_setting("", v) for v in value]
elif isinstance(value, tuple):
cleansed = tuple([self.cleanse_setting("", v) for v in value])
else:
cleansed = value
if callable(cleansed):
cleansed = CallableSettingWrapper(cleansed)
return cleansed
|
Cleanse an individual setting key/value of sensitive content. If the
value is a dictionary, recursively cleanse the keys in that dictionary.
| 21 | 64 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def cleanse_setting(self, key, value):
try:
is_sensitive = self.hidden_settings.search(key)
except TypeError:
is_sensitive = False
if is_sensitive:
cleansed = self.cleansed_substitute
elif isinstance(value, dict):
cleansed = {k: self.cleanse_setting(k, v) for k, v in value.items()}
elif isinstance(value, list):
cleansed = [self.cleanse_setting("", v) for v in value]
elif isinstance(value, tuple):
cleansed = tuple([self.cleanse_setting("", v) for v in value])
else:
cleansed = value
if callable(cleansed):
cleansed = CallableSettingWrapper(cleansed)
return cleansed
```
###Assistant :
Cleanse an individual setting key/value of sensitive content. If the
value is a dictionary, recursively cleanse the keys in that dictionary.
|
142 | def func_dump(func):
if os.name == "nt":
raw_code = marshal.dumps(func.__code__).replace(b"\\", b"/")
code = codecs.encode(raw_code, "base64").decode("ascii")
else:
raw_code = marshal.dumps(func.__code__)
code = codecs.encode(raw_code, "base64").decode("ascii")
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
| Serializes a user defined function.
Args:
func: the function to serialize.
Returns:
A tuple `(code, defaults, closure)`.
| 17 | 42 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def func_dump(func):
if os.name == "nt":
raw_code = marshal.dumps(func.__code__).replace(b"\\", b"/")
code = codecs.encode(raw_code, "base64").decode("ascii")
else:
raw_code = marshal.dumps(func.__code__)
code = codecs.encode(raw_code, "base64").decode("ascii")
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
```
###Assistant : Serializes a user defined function.
Args:
func: the function to serialize.
Returns:
A tuple `(code, defaults, closure)`.
|
143 | def cauchy_upper_bound(f):
if not f.lev:
return dup_cauchy_upper_bound(f.rep, f.dom)
else:
raise ValueError('univariate polynomial expected')
| Computes the Cauchy upper bound on the roots of ``f``. | 10 | 13 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def cauchy_upper_bound(f):
if not f.lev:
return dup_cauchy_upper_bound(f.rep, f.dom)
else:
raise ValueError('univariate polynomial expected')
```
###Assistant : Computes the Cauchy upper bound on the roots of ``f``.
|
144 | def kernS(s):
hit = False
quoted = '"' in s or "'" in s
if '(' in s and not quoted:
if s.count('(') != s.count(")"):
raise SympifyError('unmatched left parenthesis')
# strip all space from s
s = ''.join(s.split())
olds = s
# now use space to represent a symbol that
# will
# step 1. turn potential 2-arg Muls into 3-arg versions
# 1a. *( -> * *(
s = s.replace('*(', '* *(')
# 1b. close up exponentials
s = s.replace('** *', '**')
# 2. handle the implied multiplication of a negated
# parenthesized expression in two steps
# 2a: -(...) --> -( *(...)
target = '-( *('
s = s.replace('-(', target)
# 2b: double the matching closing parenthesis
# -( *(...) --> -( *(...))
i = nest = 0
assert target.endswith('(') # assumption below
while True:
j = s.find(target, i)
if j == -1:
break
j += len(target) - 1
for j in range(j, len(s)):
if s[j] == "(":
nest += 1
elif s[j] == ")":
nest -= 1
if nest == 0:
break
s = s[:j] + ")" + s[j:]
i = j + 2 # the first char after 2nd )
if ' ' in s:
# get a unique kern
kern = '_'
while kern in s:
kern += choice(string.ascii_letters + string.digits)
s = s.replace(' ', kern)
hit = kern in s
else:
hit = False
for i in range(2):
try:
expr = sympify(s)
break
except TypeError: # the kern might cause unknown errors...
if hit:
s = olds # maybe it didn't like the kern; use un-kerned s
hit = False
continue
expr = sympify(s) # let original error raise
if not hit:
return expr
from .symbol import Symbol
rep = {Symbol(kern): 1} | Use a hack to try keep autosimplification from distributing a
a number into an Add; this modification does not
prevent the 2-arg Mul from becoming an Add, however.
Examples
========
>>> from sympy.core.sympify import kernS
>>> from sympy.abc import x, y
The 2-arg Mul distributes a number (or minus sign) across the terms
of an expression, but kernS will prevent that:
>>> 2*(x + y), -(x + 1)
(2*x + 2*y, -x - 1)
>>> kernS('2*(x + y)')
2*(x + y)
>>> kernS('-(x + 1)')
-(x + 1)
If use of the hack fails, the un-hacked string will be passed to sympify...
and you get what you get.
XXX This hack should not be necessary once issue 4596 has been resolved.
| 121 | 288 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def kernS(s):
hit = False
quoted = '"' in s or "'" in s
if '(' in s and not quoted:
if s.count('(') != s.count(")"):
raise SympifyError('unmatched left parenthesis')
# strip all space from s
s = ''.join(s.split())
olds = s
# now use space to represent a symbol that
# will
# step 1. turn potential 2-arg Muls into 3-arg versions
# 1a. *( -> * *(
s = s.replace('*(', '* *(')
# 1b. close up exponentials
s = s.replace('** *', '**')
# 2. handle the implied multiplication of a negated
# parenthesized expression in two steps
# 2a: -(...) --> -( *(...)
target = '-( *('
s = s.replace('-(', target)
# 2b: double the matching closing parenthesis
# -( *(...) --> -( *(...))
i = nest = 0
assert target.endswith('(') # assumption below
while True:
j = s.find(target, i)
if j == -1:
break
j += len(target) - 1
for j in range(j, len(s)):
if s[j] == "(":
nest += 1
elif s[j] == ")":
nest -= 1
if nest == 0:
break
s = s[:j] + ")" + s[j:]
i = j + 2 # the first char after 2nd )
if ' ' in s:
# get a unique kern
kern = '_'
while kern in s:
kern += choice(string.ascii_letters + string.digits)
s = s.replace(' ', kern)
hit = kern in s
else:
hit = False
for i in range(2):
try:
expr = sympify(s)
break
except TypeError: # the kern might cause unknown errors...
if hit:
s = olds # maybe it didn't like the kern; use un-kerned s
hit = False
continue
expr = sympify(s) # let original error raise
if not hit:
return expr
from .symbol import Symbol
rep = {Symbol(kern): 1}
```
###Assistant : Use a hack to try keep autosimplification from distributing a
a number into an Add; this modification does not
prevent the 2-arg Mul from becoming an Add, however.
Examples
========
>>> from sympy.core.sympify import kernS
>>> from sympy.abc import x, y
The 2-arg Mul distributes a number (or minus sign) across the terms
of an expression, but kernS will prevent that:
>>> 2*(x + y), -(x + 1)
(2*x + 2*y, -x - 1)
>>> kernS('2*(x + y)')
2*(x + y)
>>> kernS('-(x + 1)')
-(x + 1)
If use of the hack fails, the un-hacked string will be passed to sympify...
and you get what you get.
XXX This hack should not be necessary once issue 4596 has been resolved.
|
145 | def test_load_corrupt_file(self, patched_pickle_load):
# First load is the schema version
patched_pickle_load.side_effect = [DocumentClassifier.FORMAT_VERSION, OSError()]
with self.assertRaises(ClassifierModelCorruptError):
self.classifier.load()
|
GIVEN:
- Corrupted classifier pickle file
WHEN:
- An attempt is made to load the classifier
THEN:
- The ClassifierModelCorruptError is raised
| 22 | 17 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_load_corrupt_file(self, patched_pickle_load):
# First load is the schema version
patched_pickle_load.side_effect = [DocumentClassifier.FORMAT_VERSION, OSError()]
with self.assertRaises(ClassifierModelCorruptError):
self.classifier.load()
```
###Assistant :
GIVEN:
- Corrupted classifier pickle file
WHEN:
- An attempt is made to load the classifier
THEN:
- The ClassifierModelCorruptError is raised
|
146 | def current_option(self) -> str:
return self.device[self.entity_description.current_option_key]
| Return the selected entity option to represent the entity state. | 10 | 6 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def current_option(self) -> str:
return self.device[self.entity_description.current_option_key]
```
###Assistant : Return the selected entity option to represent the entity state.
|
147 | def gen_html(img):
html_code = img['html']['structure']['tokens'].copy()
to_insert = [i for i, tag in enumerate(html_code) if tag in ('<td>', '>')]
for i, cell in zip(to_insert[::-1], img['html']['cells'][::-1]):
if cell['tokens']:
text = ''.join(cell['tokens'])
# skip empty text
sp_char_list = ['<b>', '</b>', '\u2028', ' ', '<i>', '</i>']
text_remove_style = skip_char(text, sp_char_list)
if len(text_remove_style) == 0:
continue
html_code.insert(i + 1, text)
html_code = ''.join(html_code)
html_code = '<html><body><table>{}</table></body></html>'.format(html_code)
return html_code
|
Formats HTML code from tokenized annotation of img
| 8 | 63 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def gen_html(img):
html_code = img['html']['structure']['tokens'].copy()
to_insert = [i for i, tag in enumerate(html_code) if tag in ('<td>', '>')]
for i, cell in zip(to_insert[::-1], img['html']['cells'][::-1]):
if cell['tokens']:
text = ''.join(cell['tokens'])
# skip empty text
sp_char_list = ['<b>', '</b>', '\u2028', ' ', '<i>', '</i>']
text_remove_style = skip_char(text, sp_char_list)
if len(text_remove_style) == 0:
continue
html_code.insert(i + 1, text)
html_code = ''.join(html_code)
html_code = '<html><body><table>{}</table></body></html>'.format(html_code)
return html_code
```
###Assistant :
Formats HTML code from tokenized annotation of img
|
148 | def key_aliases(self) -> Iterable[str]:
for alias in _get_key_aliases(self.key):
yield _normalize_key(alias)
| Get the aliases for the key, including the key itself | 10 | 10 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def key_aliases(self) -> Iterable[str]:
for alias in _get_key_aliases(self.key):
yield _normalize_key(alias)
```
###Assistant : Get the aliases for the key, including the key itself
|
149 | def test_create_single_available_ip(self):
vrf = VRF.objects.create(name='VRF 1')
prefix = Prefix.objects.create(prefix=IPNetwork('192.0.2.0/30'), vrf=vrf, is_pool=True)
url = reverse('ipam-api:prefix-available-ips', kwargs={'pk': prefix.pk})
self.add_permissions('ipam.view_prefix', 'ipam.add_ipaddress')
# Create all four available IPs with individual requests
for i in range(1, 5):
data = {
'description': 'Test IP {}'.format(i)
}
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(response.data['vrf']['id'], vrf.pk)
self.assertEqual(response.data['description'], data['description'])
# Try to create one more IP
response = self.client.post(url, {}, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_409_CONFLICT)
self.assertIn('detail', response.data)
|
Test retrieval of the first available IP address within a parent prefix.
| 12 | 69 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_create_single_available_ip(self):
vrf = VRF.objects.create(name='VRF 1')
prefix = Prefix.objects.create(prefix=IPNetwork('192.0.2.0/30'), vrf=vrf, is_pool=True)
url = reverse('ipam-api:prefix-available-ips', kwargs={'pk': prefix.pk})
self.add_permissions('ipam.view_prefix', 'ipam.add_ipaddress')
# Create all four available IPs with individual requests
for i in range(1, 5):
data = {
'description': 'Test IP {}'.format(i)
}
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(response.data['vrf']['id'], vrf.pk)
self.assertEqual(response.data['description'], data['description'])
# Try to create one more IP
response = self.client.post(url, {}, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_409_CONFLICT)
self.assertIn('detail', response.data)
```
###Assistant :
Test retrieval of the first available IP address within a parent prefix.
|
150 | def climate_adc_t3000_missing_setpoint_fixture(client, climate_adc_t3000_state):
data = copy.deepcopy(climate_adc_t3000_state)
data["name"] = f"{data['name']} missing setpoint"
for value in data["values"][:]:
if (
value["commandClassName"] == "Humidity Control Setpoint"
and value["propertyKeyName"] == "De-humidifier"
):
data["values"].remove(value)
node = Node(client, data)
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_adc_t3000_missing_mode") | Mock a climate ADC-T3000 node with missing de-humidify setpoint. | 9 | 38 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def climate_adc_t3000_missing_setpoint_fixture(client, climate_adc_t3000_state):
data = copy.deepcopy(climate_adc_t3000_state)
data["name"] = f"{data['name']} missing setpoint"
for value in data["values"][:]:
if (
value["commandClassName"] == "Humidity Control Setpoint"
and value["propertyKeyName"] == "De-humidifier"
):
data["values"].remove(value)
node = Node(client, data)
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_adc_t3000_missing_mode")
```
###Assistant : Mock a climate ADC-T3000 node with missing de-humidify setpoint.
|
151 | def classify_ode(eq, func=None, dict=False, ics=None, *, prep=True, xi=None, eta=None, n=None, **kwargs):
r |
Returns a tuple of possible :py:meth:`~sympy.solvers.ode.dsolve`
classifications for an ODE.
The tuple is ordered so that first item is the classification that
:py:meth:`~sympy.solvers.ode.dsolve` uses to solve the ODE by default. In
general, classifications at the near the beginning of the list will
produce better solutions faster than those near the end, thought there are
always exceptions. To make :py:meth:`~sympy.solvers.ode.dsolve` use a
different classification, use ``dsolve(ODE, func,
hint=<classification>)``. See also the
:py:meth:`~sympy.solvers.ode.dsolve` docstring for different meta-hints
you can use.
If ``dict`` is true, :py:meth:`~sympy.solvers.ode.classify_ode` will
return a dictionary of ``hint:match`` expression terms. This is intended
for internal use by :py:meth:`~sympy.solvers.ode.dsolve`. Note that
because dictionaries are ordered arbitrarily, this will most likely not be
in the same order as the tuple.
You can get help on different hints by executing
``help(ode.ode_hintname)``, where ``hintname`` is the name of the hint
without ``_Integral``.
See :py:data:`~sympy.solvers.ode.allhints` or the
:py:mod:`~sympy.solvers.ode` docstring for a list of all supported hints
that can be returned from :py:meth:`~sympy.solvers.ode.classify_ode`.
Notes
=====
These are remarks on hint names.
``_Integral``
If a classification has ``_Integral`` at the end, it will return the
expression with an unevaluated :py:class:`~.Integral` | 184 | 12 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def classify_ode(eq, func=None, dict=False, ics=None, *, prep=True, xi=None, eta=None, n=None, **kwargs):
r
```
###Assistant :
Returns a tuple of possible :py:meth:`~sympy.solvers.ode.dsolve`
classifications for an ODE.
The tuple is ordered so that first item is the classification that
:py:meth:`~sympy.solvers.ode.dsolve` uses to solve the ODE by default. In
general, classifications at the near the beginning of the list will
produce better solutions faster than those near the end, thought there are
always exceptions. To make :py:meth:`~sympy.solvers.ode.dsolve` use a
different classification, use ``dsolve(ODE, func,
hint=<classification>)``. See also the
:py:meth:`~sympy.solvers.ode.dsolve` docstring for different meta-hints
you can use.
If ``dict`` is true, :py:meth:`~sympy.solvers.ode.classify_ode` will
return a dictionary of ``hint:match`` expression terms. This is intended
for internal use by :py:meth:`~sympy.solvers.ode.dsolve`. Note that
because dictionaries are ordered arbitrarily, this will most likely not be
in the same order as the tuple.
You can get help on different hints by executing
``help(ode.ode_hintname)``, where ``hintname`` is the name of the hint
without ``_Integral``.
See :py:data:`~sympy.solvers.ode.allhints` or the
:py:mod:`~sympy.solvers.ode` docstring for a list of all supported hints
that can be returned from :py:meth:`~sympy.solvers.ode.classify_ode`.
Notes
=====
These are remarks on hint names.
``_Integral``
If a classification has ``_Integral`` at the end, it will return the
expression with an unevaluated :py:class:`~.Integral`
|
152 | def mode(a, axis=0, nan_policy='propagate'):
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return ModeResult(np.array([]), np.array([]))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
if a.dtype == object and np.nan in set(a.ravel()):
# Fall back to a slower method since np.unique does not work with NaN
scores = set(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.sum(template, axis, keepdims=True)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
| Return an array of the modal (most common) value in the passed array.
If there is more than one such value, only the smallest is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
ModeResult(mode=array([[3, 1, 0, 0]]), count=array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
ModeResult(mode=array([3]), count=array([3]))
| 183 | 108 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def mode(a, axis=0, nan_policy='propagate'):
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return ModeResult(np.array([]), np.array([]))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
if a.dtype == object and np.nan in set(a.ravel()):
# Fall back to a slower method since np.unique does not work with NaN
scores = set(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.sum(template, axis, keepdims=True)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
```
###Assistant : Return an array of the modal (most common) value in the passed array.
If there is more than one such value, only the smallest is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
ModeResult(mode=array([[3, 1, 0, 0]]), count=array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
ModeResult(mode=array([3]), count=array([3]))
|
153 | def wait_for_instance(self) -> AnsibleCoreCI:
core_ci = self.get_instance()
core_ci.wait()
return core_ci
| Wait for an AnsibleCoreCI VM instance to become ready. | 9 | 10 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def wait_for_instance(self) -> AnsibleCoreCI:
core_ci = self.get_instance()
core_ci.wait()
return core_ci
```
###Assistant : Wait for an AnsibleCoreCI VM instance to become ready.
|
154 | def spherical_bessel_fn(n, x=None, polys=False):
if n < 0:
dup = dup_spherical_bessel_fn_minus(-int(n), ZZ)
else:
dup = dup_spherical_bessel_fn(int(n), ZZ)
poly = DMP(dup, ZZ)
if x is not None:
poly = Poly.new(poly, 1/x)
else:
poly = PurePoly.new(poly, 1/Dummy('x'))
return poly if polys else poly.as_expr()
|
Coefficients for the spherical Bessel functions.
Those are only needed in the jn() function.
The coefficients are calculated from:
fn(0, z) = 1/z
fn(1, z) = 1/z**2
fn(n-1, z) + fn(n+1, z) == (2*n+1)/z * fn(n, z)
Parameters
==========
n : int
`n` decides the degree of polynomial
x : optional
polys : bool, optional
If True, return a Poly, otherwise (default) return an expression.
Examples
========
>>> from sympy.polys.orthopolys import spherical_bessel_fn as fn
>>> from sympy import Symbol
>>> z = Symbol("z")
>>> fn(1, z)
z**(-2)
>>> fn(2, z)
-1/z + 3/z**3
>>> fn(3, z)
-6/z**2 + 15/z**4
>>> fn(4, z)
1/z - 45/z**3 + 105/z**5
| 107 | 41 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def spherical_bessel_fn(n, x=None, polys=False):
if n < 0:
dup = dup_spherical_bessel_fn_minus(-int(n), ZZ)
else:
dup = dup_spherical_bessel_fn(int(n), ZZ)
poly = DMP(dup, ZZ)
if x is not None:
poly = Poly.new(poly, 1/x)
else:
poly = PurePoly.new(poly, 1/Dummy('x'))
return poly if polys else poly.as_expr()
```
###Assistant :
Coefficients for the spherical Bessel functions.
Those are only needed in the jn() function.
The coefficients are calculated from:
fn(0, z) = 1/z
fn(1, z) = 1/z**2
fn(n-1, z) + fn(n+1, z) == (2*n+1)/z * fn(n, z)
Parameters
==========
n : int
`n` decides the degree of polynomial
x : optional
polys : bool, optional
If True, return a Poly, otherwise (default) return an expression.
Examples
========
>>> from sympy.polys.orthopolys import spherical_bessel_fn as fn
>>> from sympy import Symbol
>>> z = Symbol("z")
>>> fn(1, z)
z**(-2)
>>> fn(2, z)
-1/z + 3/z**3
>>> fn(3, z)
-6/z**2 + 15/z**4
>>> fn(4, z)
1/z - 45/z**3 + 105/z**5
|
155 | def raft_large(*, pretrained=False, progress=True, **kwargs):
return _raft(
arch="raft_large",
pretrained=pretrained,
progress=progress,
# Feature encoder
feature_encoder_layers=(64, 64, 96, 128, 256),
feature_encoder_block=ResidualBlock,
feature_encoder_norm_layer=InstanceNorm2d,
# Context encoder
context_encoder_layers=(64, 64, 96, 128, 256),
context_encoder_block=ResidualBlock,
context_encoder_norm_layer=BatchNorm2d,
# Correlation block
corr_block_num_levels=4,
corr_block_radius=4,
# Motion encoder
motion_encoder_corr_layers=(256, 192),
motion_encoder_flow_layers=(128, 64),
motion_encoder_out_channels=128,
# Recurrent block
recurrent_block_hidden_state_size=128,
recurrent_block_kernel_size=((1, 5), (5, 1)),
recurrent_block_padding=((0, 2), (2, 0)),
# Flow head
flow_head_hidden_size=256,
# Mask predictor
use_mask_predictor=True,
**kwargs,
)
| RAFT model from
`RAFT: Recurrent All Pairs Field Transforms for Optical Flow <https://arxiv.org/abs/2003.12039>`_.
Args:
pretrained (bool): Whether to use weights that have been pre-trained on
:class:`~torchvsion.datasets.FlyingChairs` + :class:`~torchvsion.datasets.FlyingThings3D`
with two fine-tuning steps:
- one on :class:`~torchvsion.datasets.Sintel` + :class:`~torchvsion.datasets.FlyingThings3D`
- one on :class:`~torchvsion.datasets.KittiFlow`.
This corresponds to the ``C+T+S/K`` strategy in the paper.
progress (bool): If True, displays a progress bar of the download to stderr.
Returns:
nn.Module: The model.
| 68 | 65 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def raft_large(*, pretrained=False, progress=True, **kwargs):
return _raft(
arch="raft_large",
pretrained=pretrained,
progress=progress,
# Feature encoder
feature_encoder_layers=(64, 64, 96, 128, 256),
feature_encoder_block=ResidualBlock,
feature_encoder_norm_layer=InstanceNorm2d,
# Context encoder
context_encoder_layers=(64, 64, 96, 128, 256),
context_encoder_block=ResidualBlock,
context_encoder_norm_layer=BatchNorm2d,
# Correlation block
corr_block_num_levels=4,
corr_block_radius=4,
# Motion encoder
motion_encoder_corr_layers=(256, 192),
motion_encoder_flow_layers=(128, 64),
motion_encoder_out_channels=128,
# Recurrent block
recurrent_block_hidden_state_size=128,
recurrent_block_kernel_size=((1, 5), (5, 1)),
recurrent_block_padding=((0, 2), (2, 0)),
# Flow head
flow_head_hidden_size=256,
# Mask predictor
use_mask_predictor=True,
**kwargs,
)
```
###Assistant : RAFT model from
`RAFT: Recurrent All Pairs Field Transforms for Optical Flow <https://arxiv.org/abs/2003.12039>`_.
Args:
pretrained (bool): Whether to use weights that have been pre-trained on
:class:`~torchvsion.datasets.FlyingChairs` + :class:`~torchvsion.datasets.FlyingThings3D`
with two fine-tuning steps:
- one on :class:`~torchvsion.datasets.Sintel` + :class:`~torchvsion.datasets.FlyingThings3D`
- one on :class:`~torchvsion.datasets.KittiFlow`.
This corresponds to the ``C+T+S/K`` strategy in the paper.
progress (bool): If True, displays a progress bar of the download to stderr.
Returns:
nn.Module: The model.
|
156 | def _get_prettyprint_usage(self, console, executor_name, usage_kind=None):
from rich.panel import Panel
from rich.syntax import Syntax
flow_plain = f
flow_docker = f
flow_sandbox = f
panels = [
Panel(
Syntax(
p[0],
'python',
theme='monokai',
word_wrap=True,
),
title=p[1],
width=80,
expand=False,
)
for p in [
(flow_plain, 'Use via source'),
(flow_docker, 'Use in Docker'),
(flow_sandbox, 'Use in Sandbox'),
]
]
if usage_kind == 'docker':
console.print(panels[2])
elif usage_kind == 'source':
console.print(panels[1])
else:
console.print(*reversed(panels))
| from jina import Flow
f = Flow().add(uses='jinahub://{executor_name}')
from jina import Flow
f = Flow().add(uses='jinahub+docker://{executor_name}')
from jina import Flow
f = Flow().add(uses='jinahub+sandbox://{executor_name}')
| 21 | 66 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _get_prettyprint_usage(self, console, executor_name, usage_kind=None):
from rich.panel import Panel
from rich.syntax import Syntax
flow_plain = f
flow_docker = f
flow_sandbox = f
panels = [
Panel(
Syntax(
p[0],
'python',
theme='monokai',
word_wrap=True,
),
title=p[1],
width=80,
expand=False,
)
for p in [
(flow_plain, 'Use via source'),
(flow_docker, 'Use in Docker'),
(flow_sandbox, 'Use in Sandbox'),
]
]
if usage_kind == 'docker':
console.print(panels[2])
elif usage_kind == 'source':
console.print(panels[1])
else:
console.print(*reversed(panels))
```
###Assistant : from jina import Flow
f = Flow().add(uses='jinahub://{executor_name}')
from jina import Flow
f = Flow().add(uses='jinahub+docker://{executor_name}')
from jina import Flow
f = Flow().add(uses='jinahub+sandbox://{executor_name}')
|
157 | def encoding(self, val):
self._encoding = val
if hasattr(self, "GET"):
del self.GET
if hasattr(self, "_post"):
del self._post
|
Set the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, remove and recreate it on the
next access (so that it is decoded correctly).
| 31 | 16 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def encoding(self, val):
self._encoding = val
if hasattr(self, "GET"):
del self.GET
if hasattr(self, "_post"):
del self._post
```
###Assistant :
Set the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, remove and recreate it on the
next access (so that it is decoded correctly).
|
158 | def scale(self, factor, scale_tips=False, **kwargs):
r
if self.get_length() == 0:
return self
if scale_tips:
super().scale(factor, **kwargs)
self._set_stroke_width_from_length()
return self
has_tip = self.has_tip()
has_start_tip = self.has_start_tip()
if has_tip or has_start_tip:
old_tips = self.pop_tips()
super().scale(factor, **kwargs)
self._set_stroke_width_from_length()
if has_tip:
self.add_tip(tip=old_tips[0])
if has_start_tip:
self.add_tip(tip=old_tips[1], at_start=True)
return self
| Scale an arrow, but keep stroke width and arrow tip size fixed.
See Also
--------
:meth:`~.Mobject.scale`
Examples
--------
::
>>> arrow = Arrow(np.array([-1, -1, 0]), np.array([1, 1, 0]), buff=0)
>>> scaled_arrow = arrow.scale(2)
>>> np.round(scaled_arrow.get_start_and_end(), 8) + 0
array([[-2., -2., 0.],
[ 2., 2., 0.]])
>>> arrow.tip.length == scaled_arrow.tip.length
True
Manually scaling the object using the default method
:meth:`~.Mobject.scale` does not have the same properties::
>>> new_arrow = Arrow(np.array([-1, -1, 0]), np.array([1, 1, 0]), buff=0)
>>> another_scaled_arrow = VMobject.scale(new_arrow, 2)
>>> another_scaled_arrow.tip.length == arrow.tip.length
False
| 85 | 44 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def scale(self, factor, scale_tips=False, **kwargs):
r
if self.get_length() == 0:
return self
if scale_tips:
super().scale(factor, **kwargs)
self._set_stroke_width_from_length()
return self
has_tip = self.has_tip()
has_start_tip = self.has_start_tip()
if has_tip or has_start_tip:
old_tips = self.pop_tips()
super().scale(factor, **kwargs)
self._set_stroke_width_from_length()
if has_tip:
self.add_tip(tip=old_tips[0])
if has_start_tip:
self.add_tip(tip=old_tips[1], at_start=True)
return self
```
###Assistant : Scale an arrow, but keep stroke width and arrow tip size fixed.
See Also
--------
:meth:`~.Mobject.scale`
Examples
--------
::
>>> arrow = Arrow(np.array([-1, -1, 0]), np.array([1, 1, 0]), buff=0)
>>> scaled_arrow = arrow.scale(2)
>>> np.round(scaled_arrow.get_start_and_end(), 8) + 0
array([[-2., -2., 0.],
[ 2., 2., 0.]])
>>> arrow.tip.length == scaled_arrow.tip.length
True
Manually scaling the object using the default method
:meth:`~.Mobject.scale` does not have the same properties::
>>> new_arrow = Arrow(np.array([-1, -1, 0]), np.array([1, 1, 0]), buff=0)
>>> another_scaled_arrow = VMobject.scale(new_arrow, 2)
>>> another_scaled_arrow.tip.length == arrow.tip.length
False
|
159 | def effective(file, line, frame):
possibles = Breakpoint.bplist[file, line]
for b in possibles:
if not b.enabled:
continue
if not checkfuncname(b, frame):
continue
# Count every hit when bp is enabled
b.hits += 1
if not b.cond:
# If unconditional, and ignoring go on to next, else break
if b.ignore > 0:
b.ignore -= 1
continue
else:
# breakpoint and marker that it's ok to delete if temporary
return (b, True)
else:
# Conditional bp.
# Ignore count applies only to those bpt hits where the
# condition evaluates to true.
try:
val = eval(b.cond, frame.f_globals, frame.f_locals)
if val:
if b.ignore > 0:
b.ignore -= 1
# continue
else:
return (b, True)
# else:
# continue
except:
# if eval fails, most conservative thing is to stop on
# breakpoint regardless of ignore count. Don't delete
# temporary, as another hint to user.
return (b, False)
return (None, None)
# -------------------- testing --------------------
| Determine which breakpoint for this file:line is to be acted upon.
Called only if we know there is a breakpoint at this location. Return
the breakpoint that was triggered and a boolean that indicates if it is
ok to delete a temporary breakpoint. Return (None, None) if there is no
matching breakpoint.
| 52 | 151 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def effective(file, line, frame):
possibles = Breakpoint.bplist[file, line]
for b in possibles:
if not b.enabled:
continue
if not checkfuncname(b, frame):
continue
# Count every hit when bp is enabled
b.hits += 1
if not b.cond:
# If unconditional, and ignoring go on to next, else break
if b.ignore > 0:
b.ignore -= 1
continue
else:
# breakpoint and marker that it's ok to delete if temporary
return (b, True)
else:
# Conditional bp.
# Ignore count applies only to those bpt hits where the
# condition evaluates to true.
try:
val = eval(b.cond, frame.f_globals, frame.f_locals)
if val:
if b.ignore > 0:
b.ignore -= 1
# continue
else:
return (b, True)
# else:
# continue
except:
# if eval fails, most conservative thing is to stop on
# breakpoint regardless of ignore count. Don't delete
# temporary, as another hint to user.
return (b, False)
return (None, None)
# -------------------- testing --------------------
```
###Assistant : Determine which breakpoint for this file:line is to be acted upon.
Called only if we know there is a breakpoint at this location. Return
the breakpoint that was triggered and a boolean that indicates if it is
ok to delete a temporary breakpoint. Return (None, None) if there is no
matching breakpoint.
|
160 | def _log_gauss_mass(a, b):
a, b = jnp.array(a), jnp.array(b)
a, b = jnp.broadcast_arrays(a, b)
# Note: Docstring carried over from scipy
# Calculations in right tail are inaccurate, so we'll exploit the
# symmetry and work only in the left tail
case_left = b <= 0
case_right = a > 0
case_central = ~(case_left | case_right)
def mass_case_left(a, b):
return _log_diff(log_ndtr(b), log_ndtr(a))
def mass_case_right(a, b):
return mass_case_left(-b, -a)
def mass_case_central(a, b):
# Note: Docstring carried over from scipy
# Previously, this was implemented as:
# left_mass = mass_case_left(a, 0)
# right_mass = mass_case_right(0, b)
# return _log_sum(left_mass, right_mass)
# Catastrophic cancellation occurs as np.exp(log_mass) approaches 1.
# Correct for this with an alternative formulation.
# We're not concerned with underflow here: if only one term
# underflows, it was insignificant; if both terms underflow,
# the result can't accurately be represented in logspace anyway
# because sc.log1p(x) ~ x for small x.
return jnp.log1p(-ndtr(a) - ndtr(-b))
out = jnp.select(
[case_left, case_right, case_central],
[mass_case_left(a, b), mass_case_right(a, b), mass_case_central(a, b)]
)
return out
@_wraps(osp_stats.truncnorm.logpdf, update_doc=False) | Log of Gaussian probability mass within an interval | 8 | 172 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _log_gauss_mass(a, b):
a, b = jnp.array(a), jnp.array(b)
a, b = jnp.broadcast_arrays(a, b)
# Note: Docstring carried over from scipy
# Calculations in right tail are inaccurate, so we'll exploit the
# symmetry and work only in the left tail
case_left = b <= 0
case_right = a > 0
case_central = ~(case_left | case_right)
def mass_case_left(a, b):
return _log_diff(log_ndtr(b), log_ndtr(a))
def mass_case_right(a, b):
return mass_case_left(-b, -a)
def mass_case_central(a, b):
# Note: Docstring carried over from scipy
# Previously, this was implemented as:
# left_mass = mass_case_left(a, 0)
# right_mass = mass_case_right(0, b)
# return _log_sum(left_mass, right_mass)
# Catastrophic cancellation occurs as np.exp(log_mass) approaches 1.
# Correct for this with an alternative formulation.
# We're not concerned with underflow here: if only one term
# underflows, it was insignificant; if both terms underflow,
# the result can't accurately be represented in logspace anyway
# because sc.log1p(x) ~ x for small x.
return jnp.log1p(-ndtr(a) - ndtr(-b))
out = jnp.select(
[case_left, case_right, case_central],
[mass_case_left(a, b), mass_case_right(a, b), mass_case_central(a, b)]
)
return out
@_wraps(osp_stats.truncnorm.logpdf, update_doc=False)
```
###Assistant : Log of Gaussian probability mass within an interval
|
161 | def test_fx_validator_integration(tmpdir):
not_supported = {
None: "`self.trainer` reference is not registered",
"on_before_accelerator_backend_setup": "You can't",
"setup": "You can't",
"configure_sharded_model": "You can't",
"on_configure_sharded_model": "You can't",
"configure_optimizers": "You can't",
"on_fit_start": "You can't",
"on_pretrain_routine_start": "You can't",
"on_pretrain_routine_end": "You can't",
"on_train_dataloader": "You can't",
"train_dataloader": "You can't",
"on_val_dataloader": "You can't",
"val_dataloader": "You can't",
"on_validation_end": "You can't",
"on_train_end": "You can't",
"on_fit_end": "You can't",
"teardown": "You can't",
"on_sanity_check_start": "You can't",
"on_sanity_check_end": "You can't",
"prepare_data": "You can't",
"configure_callbacks": "You can't",
"on_validation_model_eval": "You can't",
"on_validation_model_train": "You can't",
"lr_scheduler_step": "You can't",
"summarize": "not managed by the `Trainer",
}
model = HookedModel(not_supported)
with pytest.warns(UserWarning, match=not_supported[None]):
model.log("foo", 1)
callback = HookedCallback(not_supported)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_train_batches=1,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
callbacks=callback,
)
with pytest.deprecated_call(match="on_train_dataloader` is deprecated in v1.5"):
trainer.fit(model)
not_supported.update(
{
# `lightning_module` ref is now present from the `fit` call
"on_before_accelerator_backend_setup": "You can't",
"on_test_dataloader": "You can't",
"test_dataloader": "You can't",
"on_test_model_eval": "You can't",
"on_test_model_train": "You can't",
"on_test_end": "You can't",
}
)
with pytest.deprecated_call(match="on_test_dataloader` is deprecated in v1.5"):
trainer.test(model, verbose=False)
not_supported.update({k: "result collection is not registered yet" for k in not_supported})
not_supported.update(
{
"on_predict_dataloader": "result collection is not registered yet",
"predict_dataloader": "result collection is not registered yet",
"on_predict_model_eval": "result collection is not registered yet",
"on_predict_start": "result collection is not registered yet",
"on_predict_epoch_start": "result collection is not registered yet",
"on_predict_batch_start": "result collection is not registered yet",
"predict_step": "result collection is not registered yet",
"on_predict_batch_end": "result collection is not registered yet",
"on_predict_epoch_end": "result collection is not registered yet",
"on_predict_end": "result collection is not registered yet",
}
)
with pytest.deprecated_call(match="on_predict_dataloader` is deprecated in v1.5"):
trainer.predict(model)
@RunIf(min_gpus=2) | Tries to log inside all `LightningModule` and `Callback` hooks to check any expected errors. | 14 | 249 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_fx_validator_integration(tmpdir):
not_supported = {
None: "`self.trainer` reference is not registered",
"on_before_accelerator_backend_setup": "You can't",
"setup": "You can't",
"configure_sharded_model": "You can't",
"on_configure_sharded_model": "You can't",
"configure_optimizers": "You can't",
"on_fit_start": "You can't",
"on_pretrain_routine_start": "You can't",
"on_pretrain_routine_end": "You can't",
"on_train_dataloader": "You can't",
"train_dataloader": "You can't",
"on_val_dataloader": "You can't",
"val_dataloader": "You can't",
"on_validation_end": "You can't",
"on_train_end": "You can't",
"on_fit_end": "You can't",
"teardown": "You can't",
"on_sanity_check_start": "You can't",
"on_sanity_check_end": "You can't",
"prepare_data": "You can't",
"configure_callbacks": "You can't",
"on_validation_model_eval": "You can't",
"on_validation_model_train": "You can't",
"lr_scheduler_step": "You can't",
"summarize": "not managed by the `Trainer",
}
model = HookedModel(not_supported)
with pytest.warns(UserWarning, match=not_supported[None]):
model.log("foo", 1)
callback = HookedCallback(not_supported)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_train_batches=1,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
callbacks=callback,
)
with pytest.deprecated_call(match="on_train_dataloader` is deprecated in v1.5"):
trainer.fit(model)
not_supported.update(
{
# `lightning_module` ref is now present from the `fit` call
"on_before_accelerator_backend_setup": "You can't",
"on_test_dataloader": "You can't",
"test_dataloader": "You can't",
"on_test_model_eval": "You can't",
"on_test_model_train": "You can't",
"on_test_end": "You can't",
}
)
with pytest.deprecated_call(match="on_test_dataloader` is deprecated in v1.5"):
trainer.test(model, verbose=False)
not_supported.update({k: "result collection is not registered yet" for k in not_supported})
not_supported.update(
{
"on_predict_dataloader": "result collection is not registered yet",
"predict_dataloader": "result collection is not registered yet",
"on_predict_model_eval": "result collection is not registered yet",
"on_predict_start": "result collection is not registered yet",
"on_predict_epoch_start": "result collection is not registered yet",
"on_predict_batch_start": "result collection is not registered yet",
"predict_step": "result collection is not registered yet",
"on_predict_batch_end": "result collection is not registered yet",
"on_predict_epoch_end": "result collection is not registered yet",
"on_predict_end": "result collection is not registered yet",
}
)
with pytest.deprecated_call(match="on_predict_dataloader` is deprecated in v1.5"):
trainer.predict(model)
@RunIf(min_gpus=2)
```
###Assistant : Tries to log inside all `LightningModule` and `Callback` hooks to check any expected errors.
|
162 | def enable_application_mode() -> Callable[[], None]:
terminal_in = sys.stdin
terminal_out = sys.stdout
current_console_mode_in = _get_console_mode(terminal_in)
current_console_mode_out = _get_console_mode(terminal_out)
| Enable application mode.
Returns:
Callable[[], None]: A callable that will restore terminal to previous state.
| 15 | 17 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def enable_application_mode() -> Callable[[], None]:
terminal_in = sys.stdin
terminal_out = sys.stdout
current_console_mode_in = _get_console_mode(terminal_in)
current_console_mode_out = _get_console_mode(terminal_out)
```
###Assistant : Enable application mode.
Returns:
Callable[[], None]: A callable that will restore terminal to previous state.
|
163 | def get_attributes(self) -> dict[str, str]:
return _attributes(
message=self.message,
type=self.type,
)
| Return a dictionary of attributes for this instance. | 8 | 10 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_attributes(self) -> dict[str, str]:
return _attributes(
message=self.message,
type=self.type,
)
```
###Assistant : Return a dictionary of attributes for this instance.
|
164 | def check_handle_timedout(self) -> None:
for trade in Trade.get_open_order_trades():
try:
if not trade.open_order_id:
continue
order = self.exchange.fetch_order(trade.open_order_id, trade.pair)
except (ExchangeError):
logger.info('Cannot query order for %s due to %s', trade, traceback.format_exc())
continue
fully_cancelled = self.update_trade_state(trade, trade.open_order_id, order)
if (order['side'] == 'buy' and (order['status'] == 'open' or fully_cancelled) and (
fully_cancelled
or self.strategy.ft_check_timed_out(
'buy', trade, order, datetime.now(timezone.utc))
)):
self.handle_cancel_enter(trade, order, constants.CANCEL_REASON['TIMEOUT'])
elif (order['side'] == 'sell' and (order['status'] == 'open' or fully_cancelled) and (
fully_cancelled
or self.strategy.ft_check_timed_out(
'sell', trade, order, datetime.now(timezone.utc)))
):
self.handle_cancel_exit(trade, order, constants.CANCEL_REASON['TIMEOUT'])
canceled_count = trade.get_exit_order_count()
max_timeouts = self.config.get('unfilledtimeout', {}).get('exit_timeout_count', 0)
if max_timeouts > 0 and canceled_count >= max_timeouts:
logger.warning(f'Emergencyselling trade {trade}, as the sell order '
f'timed out {max_timeouts} times.')
try:
self.execute_trade_exit(
trade, order.get('price'),
sell_reason=SellCheckTuple(sell_type=SellType.EMERGENCY_SELL))
except DependencyException as exception:
logger.warning(f'Unable to emergency sell trade {trade.pair}: {exception}')
|
Check if any orders are timed out and cancel if necessary
:param timeoutvalue: Number of minutes until order is considered timed out
:return: None
| 24 | 125 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def check_handle_timedout(self) -> None:
for trade in Trade.get_open_order_trades():
try:
if not trade.open_order_id:
continue
order = self.exchange.fetch_order(trade.open_order_id, trade.pair)
except (ExchangeError):
logger.info('Cannot query order for %s due to %s', trade, traceback.format_exc())
continue
fully_cancelled = self.update_trade_state(trade, trade.open_order_id, order)
if (order['side'] == 'buy' and (order['status'] == 'open' or fully_cancelled) and (
fully_cancelled
or self.strategy.ft_check_timed_out(
'buy', trade, order, datetime.now(timezone.utc))
)):
self.handle_cancel_enter(trade, order, constants.CANCEL_REASON['TIMEOUT'])
elif (order['side'] == 'sell' and (order['status'] == 'open' or fully_cancelled) and (
fully_cancelled
or self.strategy.ft_check_timed_out(
'sell', trade, order, datetime.now(timezone.utc)))
):
self.handle_cancel_exit(trade, order, constants.CANCEL_REASON['TIMEOUT'])
canceled_count = trade.get_exit_order_count()
max_timeouts = self.config.get('unfilledtimeout', {}).get('exit_timeout_count', 0)
if max_timeouts > 0 and canceled_count >= max_timeouts:
logger.warning(f'Emergencyselling trade {trade}, as the sell order '
f'timed out {max_timeouts} times.')
try:
self.execute_trade_exit(
trade, order.get('price'),
sell_reason=SellCheckTuple(sell_type=SellType.EMERGENCY_SELL))
except DependencyException as exception:
logger.warning(f'Unable to emergency sell trade {trade.pair}: {exception}')
```
###Assistant :
Check if any orders are timed out and cancel if necessary
:param timeoutvalue: Number of minutes until order is considered timed out
:return: None
|
165 | def test_user_misery_denominator(self):
ProjectTransactionThreshold.objects.create(
project=self.project,
organization=self.project.organization,
threshold=600,
metric=TransactionMetric.LCP.value,
)
lcps = [
400,
400,
300,
3000,
3000,
3000,
]
for idx, lcp in enumerate(lcps):
data = self.load_data(
timestamp=before_now(minutes=(10 + idx)),
)
data["event_id"] = f"{idx}" * 32
data["transaction"] = "/misery/new/"
data["user"] = {"email": f"{idx}@example.com"}
data["measurements"] = {
"lcp": {"value": lcp},
}
self.store_event(data, project_id=self.project.id)
# Shouldn't count towards misery
data = self.load_data(timestamp=self.ten_mins_ago, duration=timedelta(milliseconds=0))
data["transaction"] = "/misery/new/"
data["user"] = {"email": "7@example.com"}
data["measurements"] = {}
self.store_event(data, project_id=self.project.id)
query = {
"field": [
"transaction",
"user_misery()",
],
"query": "event.type:transaction",
"project": [self.project.id],
"sort": "-user_misery",
}
response = self.do_request(
query,
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
data = response.data["data"]
# (3 frustrated + 5.8875) / (6 + 117.75)
assert abs(data[0]["user_misery()"] - 0.071818) < 0.0001
| This is to test against a bug where the denominator of misery(total unique users) was wrong
This is because the total unique users for a LCP misery should only count users that have had a txn with lcp,
and not count all transactions (ie. uniq_if(transaction has lcp) not just uniq())
| 50 | 119 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_user_misery_denominator(self):
ProjectTransactionThreshold.objects.create(
project=self.project,
organization=self.project.organization,
threshold=600,
metric=TransactionMetric.LCP.value,
)
lcps = [
400,
400,
300,
3000,
3000,
3000,
]
for idx, lcp in enumerate(lcps):
data = self.load_data(
timestamp=before_now(minutes=(10 + idx)),
)
data["event_id"] = f"{idx}" * 32
data["transaction"] = "/misery/new/"
data["user"] = {"email": f"{idx}@example.com"}
data["measurements"] = {
"lcp": {"value": lcp},
}
self.store_event(data, project_id=self.project.id)
# Shouldn't count towards misery
data = self.load_data(timestamp=self.ten_mins_ago, duration=timedelta(milliseconds=0))
data["transaction"] = "/misery/new/"
data["user"] = {"email": "7@example.com"}
data["measurements"] = {}
self.store_event(data, project_id=self.project.id)
query = {
"field": [
"transaction",
"user_misery()",
],
"query": "event.type:transaction",
"project": [self.project.id],
"sort": "-user_misery",
}
response = self.do_request(
query,
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
data = response.data["data"]
# (3 frustrated + 5.8875) / (6 + 117.75)
assert abs(data[0]["user_misery()"] - 0.071818) < 0.0001
```
###Assistant : This is to test against a bug where the denominator of misery(total unique users) was wrong
This is because the total unique users for a LCP misery should only count users that have had a txn with lcp,
and not count all transactions (ie. uniq_if(transaction has lcp) not just uniq())
|
166 | def _determine_base_url(document, page_url):
# type: (HTMLElement, str) -> str
for base in document.findall(".//base"):
href = base.get("href")
if href is not None:
return href
return page_url
| Determine the HTML document's base URL.
This looks for a ``<base>`` tag in the HTML document. If present, its href
attribute denotes the base URL of anchor tags in the document. If there is
no such tag (or if it does not have a valid href attribute), the HTML
file's URL is used as the base URL.
:param document: An HTML document representation. The current
implementation expects the result of ``html5lib.parse()``.
:param page_url: The URL of the HTML document.
| 79 | 25 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _determine_base_url(document, page_url):
# type: (HTMLElement, str) -> str
for base in document.findall(".//base"):
href = base.get("href")
if href is not None:
return href
return page_url
```
###Assistant : Determine the HTML document's base URL.
This looks for a ``<base>`` tag in the HTML document. If present, its href
attribute denotes the base URL of anchor tags in the document. If there is
no such tag (or if it does not have a valid href attribute), the HTML
file's URL is used as the base URL.
:param document: An HTML document representation. The current
implementation expects the result of ``html5lib.parse()``.
:param page_url: The URL of the HTML document.
|
167 | def pairwise_distances(self, U, V):
return self._distance_block.eval(feed_dict={self._features_batch1: U, self._features_batch2: V})
#----------------------------------------------------------------------------
| Evaluate pairwise distances between two batches of feature vectors. | 9 | 10 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def pairwise_distances(self, U, V):
return self._distance_block.eval(feed_dict={self._features_batch1: U, self._features_batch2: V})
#----------------------------------------------------------------------------
```
###Assistant : Evaluate pairwise distances between two batches of feature vectors.
|
168 | def upgrade():
op.create_table(
'project',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('company_id', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', 'company_id', name='unique_integration_name_company_id')
)
conn = op.get_bind()
session = sa.orm.Session(bind=conn)
project_record = db.Project(name='mindsdb')
session.add(project_record)
session.commit()
with op.batch_alter_table('predictor', schema=None) as batch_op:
batch_op.add_column(sa.Column('project_id', sa.Integer()))
batch_op.create_foreign_key('fk_project_id', 'project', ['project_id'], ['id'])
conn.execute(sa.sql.text(), project_id=project_record.id)
with op.batch_alter_table('predictor', schema=None) as batch_op:
batch_op.alter_column(
'project_id',
existing_type=sa.INTEGER(),
nullable=False
)
with op.batch_alter_table('view', schema=None) as batch_op:
batch_op.add_column(sa.Column('project_id', sa.Integer()))
batch_op.create_foreign_key('fk_project_id', 'project', ['project_id'], ['id'])
conn.execute(sa.sql.text(), project_id=project_record.id)
with op.batch_alter_table('view', schema=None) as batch_op:
batch_op.alter_column(
'project_id',
existing_type=sa.INTEGER(),
nullable=False
)
views = conn.execute().fetchall()
for row in views:
conn.execute(
text(), {
'name': f"{row['name']}_view",
'view_id': row['id']
}
)
view_integration = session.query(db.Integration).filter_by(name='views').first()
if view_integration is not None:
session.delete(view_integration)
session.commit()
|
update predictor set project_id = :project_id
update view set project_id = :project_id
select id, name from view
where exists (select 1 from predictor where view.name = predictor.name)
update view
set name = :name
where id = :view_id
| 37 | 110 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def upgrade():
op.create_table(
'project',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('company_id', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', 'company_id', name='unique_integration_name_company_id')
)
conn = op.get_bind()
session = sa.orm.Session(bind=conn)
project_record = db.Project(name='mindsdb')
session.add(project_record)
session.commit()
with op.batch_alter_table('predictor', schema=None) as batch_op:
batch_op.add_column(sa.Column('project_id', sa.Integer()))
batch_op.create_foreign_key('fk_project_id', 'project', ['project_id'], ['id'])
conn.execute(sa.sql.text(), project_id=project_record.id)
with op.batch_alter_table('predictor', schema=None) as batch_op:
batch_op.alter_column(
'project_id',
existing_type=sa.INTEGER(),
nullable=False
)
with op.batch_alter_table('view', schema=None) as batch_op:
batch_op.add_column(sa.Column('project_id', sa.Integer()))
batch_op.create_foreign_key('fk_project_id', 'project', ['project_id'], ['id'])
conn.execute(sa.sql.text(), project_id=project_record.id)
with op.batch_alter_table('view', schema=None) as batch_op:
batch_op.alter_column(
'project_id',
existing_type=sa.INTEGER(),
nullable=False
)
views = conn.execute().fetchall()
for row in views:
conn.execute(
text(), {
'name': f"{row['name']}_view",
'view_id': row['id']
}
)
view_integration = session.query(db.Integration).filter_by(name='views').first()
if view_integration is not None:
session.delete(view_integration)
session.commit()
```
###Assistant :
update predictor set project_id = :project_id
update view set project_id = :project_id
select id, name from view
where exists (select 1 from predictor where view.name = predictor.name)
update view
set name = :name
where id = :view_id
|
169 | def clear(self) -> None:
self._patterns: List[PatternType] = []
self.matcher: Matcher = Matcher(self.nlp.vocab, validate=self.validate)
self.phrase_matcher: PhraseMatcher = PhraseMatcher(
self.nlp.vocab,
attr=self.phrase_matcher_attr,
validate=self.validate,
)
| Reset all patterns.
RETURNS: None
DOCS: https://spacy.io/api/spanruler#clear
| 7 | 21 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def clear(self) -> None:
self._patterns: List[PatternType] = []
self.matcher: Matcher = Matcher(self.nlp.vocab, validate=self.validate)
self.phrase_matcher: PhraseMatcher = PhraseMatcher(
self.nlp.vocab,
attr=self.phrase_matcher_attr,
validate=self.validate,
)
```
###Assistant : Reset all patterns.
RETURNS: None
DOCS: https://spacy.io/api/spanruler#clear
|
170 | def _from_module(self, module, object):
if module is None:
return True
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif inspect.isfunction(object):
return module.__dict__ is object.__globals__
elif inspect.ismethoddescriptor(object):
if hasattr(object, '__objclass__'):
obj_mod = object.__objclass__.__module__
elif hasattr(object, '__module__'):
obj_mod = object.__module__
else:
return True # [XX] no easy way to tell otherwise
return module.__name__ == obj_mod
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
|
Return true if the given object is defined in the given
module.
| 12 | 88 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _from_module(self, module, object):
if module is None:
return True
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif inspect.isfunction(object):
return module.__dict__ is object.__globals__
elif inspect.ismethoddescriptor(object):
if hasattr(object, '__objclass__'):
obj_mod = object.__objclass__.__module__
elif hasattr(object, '__module__'):
obj_mod = object.__module__
else:
return True # [XX] no easy way to tell otherwise
return module.__name__ == obj_mod
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
```
###Assistant :
Return true if the given object is defined in the given
module.
|
171 | def cut_ansi_string_into_parts(string_with_ansi_codes):
color_codes_english = ['Black', 'Red', 'Green', 'Yellow', 'Blue', 'Magenta', 'Cyan', 'White', 'Reset']
color_codes = ["30m", "31m", "32m", "33m", "34m", "35m", "36m", "37m", "0m"]
effect_codes_english = ['Italic', 'Underline', 'Slow Blink', 'Rapid Blink', 'Crossed Out']
effect_codes = ["3m", "4m", "5m", "6m", "9m"]
background_codes = ["40m", "41m", "42m", "43m", "44m", "45m", "46m", "47m"]
background_codes_english = ["Black", "Red", "Green", "Yellow", "Blue", "Magenta", "Cyan", "White"]
ansi_codes = color_codes + effect_codes
tuple_list = []
string_list = string_with_ansi_codes.split("\u001b[")
if (len(string_list)) == 1:
string_list = string_with_ansi_codes.split("\033[")
for teststring in string_list:
if teststring == string_with_ansi_codes:
tuple_list += [(teststring, None, None, None)]
break
if any(code in teststring for code in ansi_codes):
static_string = None
color_used = None
effect_used = None
background_used = None
for color in range(0, len(color_codes)):
if teststring.startswith(color_codes[color]):
working_thread = teststring.split(color_codes[color])
ansi_strip = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]')
static_string = ansi_strip.sub('', working_thread[1])
color_used = color_codes_english[color]
for effect in range(0, len(effect_codes)):
if teststring.startswith(effect_codes[effect]):
working_thread = teststring.split(effect_codes[effect])
ansi_strip = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]')
static_string = ansi_strip.sub('', working_thread[1])
effect_used = effect_codes_english[effect]
for background in range(0, len(background_codes)):
if teststring.startswith(background_codes[background]):
working_thread = teststring.split(background_codes[background])
ansi_strip = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]')
static_string = ansi_strip.sub('', working_thread[1])
background_used = background_codes_english[background]
try:
if not tuple_list[len(tuple_list) - 1][0]:
if not tuple_list[len(tuple_list) - 1][1] == None:
color_used = tuple_list[len(tuple_list) - 1][1]
if not tuple_list[len(tuple_list) - 1][2] == None:
background_used = tuple_list[len(tuple_list) - 1][2]
if not tuple_list[len(tuple_list) - 1][3] == None:
effect_used = tuple_list[len(tuple_list) - 1][3]
tuple_list += [(static_string, color_used, background_used, effect_used)]
else:
tuple_list += [(static_string, color_used, background_used, effect_used)]
except Exception:
tuple_list += [(static_string, color_used, background_used, effect_used)]
new_tuple_list = []
for x in range(0, len(tuple_list)):
if tuple_list[x][0]:
new_tuple_list += [[tuple_list[x][0], tuple_list[x][1], tuple_list[x][2], tuple_list[x][3]]]
return new_tuple_list
|
Converts a string with ambedded ANSI Color Codes and parses it to create
a list of tuples describing pieces of the input string.
:param string_with_ansi_codes:
:return: [(sty, str, str, str), ...] A list of tuples. Each tuple has format: (text, text color, background color, effects)
| 45 | 258 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def cut_ansi_string_into_parts(string_with_ansi_codes):
color_codes_english = ['Black', 'Red', 'Green', 'Yellow', 'Blue', 'Magenta', 'Cyan', 'White', 'Reset']
color_codes = ["30m", "31m", "32m", "33m", "34m", "35m", "36m", "37m", "0m"]
effect_codes_english = ['Italic', 'Underline', 'Slow Blink', 'Rapid Blink', 'Crossed Out']
effect_codes = ["3m", "4m", "5m", "6m", "9m"]
background_codes = ["40m", "41m", "42m", "43m", "44m", "45m", "46m", "47m"]
background_codes_english = ["Black", "Red", "Green", "Yellow", "Blue", "Magenta", "Cyan", "White"]
ansi_codes = color_codes + effect_codes
tuple_list = []
string_list = string_with_ansi_codes.split("\u001b[")
if (len(string_list)) == 1:
string_list = string_with_ansi_codes.split("\033[")
for teststring in string_list:
if teststring == string_with_ansi_codes:
tuple_list += [(teststring, None, None, None)]
break
if any(code in teststring for code in ansi_codes):
static_string = None
color_used = None
effect_used = None
background_used = None
for color in range(0, len(color_codes)):
if teststring.startswith(color_codes[color]):
working_thread = teststring.split(color_codes[color])
ansi_strip = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]')
static_string = ansi_strip.sub('', working_thread[1])
color_used = color_codes_english[color]
for effect in range(0, len(effect_codes)):
if teststring.startswith(effect_codes[effect]):
working_thread = teststring.split(effect_codes[effect])
ansi_strip = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]')
static_string = ansi_strip.sub('', working_thread[1])
effect_used = effect_codes_english[effect]
for background in range(0, len(background_codes)):
if teststring.startswith(background_codes[background]):
working_thread = teststring.split(background_codes[background])
ansi_strip = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]')
static_string = ansi_strip.sub('', working_thread[1])
background_used = background_codes_english[background]
try:
if not tuple_list[len(tuple_list) - 1][0]:
if not tuple_list[len(tuple_list) - 1][1] == None:
color_used = tuple_list[len(tuple_list) - 1][1]
if not tuple_list[len(tuple_list) - 1][2] == None:
background_used = tuple_list[len(tuple_list) - 1][2]
if not tuple_list[len(tuple_list) - 1][3] == None:
effect_used = tuple_list[len(tuple_list) - 1][3]
tuple_list += [(static_string, color_used, background_used, effect_used)]
else:
tuple_list += [(static_string, color_used, background_used, effect_used)]
except Exception:
tuple_list += [(static_string, color_used, background_used, effect_used)]
new_tuple_list = []
for x in range(0, len(tuple_list)):
if tuple_list[x][0]:
new_tuple_list += [[tuple_list[x][0], tuple_list[x][1], tuple_list[x][2], tuple_list[x][3]]]
return new_tuple_list
```
###Assistant :
Converts a string with ambedded ANSI Color Codes and parses it to create
a list of tuples describing pieces of the input string.
:param string_with_ansi_codes:
:return: [(sty, str, str, str), ...] A list of tuples. Each tuple has format: (text, text color, background color, effects)
|
172 | def interpolate(self, f=None, f_step=DEFAULT_STEP, pol_order=1, f_min=DEFAULT_F_MIN, f_max=DEFAULT_F_MAX):
# Remove None values
i = 0
while i < len(self.raw):
if self.raw[i] is None:
self.raw = np.delete(self.raw, i)
self.frequency = np.delete(self.frequency, i)
else:
i += 1
# Interpolation functions
keys = 'raw error error_smoothed equalization equalized_raw equalized_smoothed target'.split()
interpolators = dict()
log_f = np.log10(self.frequency)
for key in keys:
if len(self.__dict__[key]):
interpolators[key] = InterpolatedUnivariateSpline(log_f, self.__dict__[key], k=pol_order)
if f is None:
self.frequency = self.generate_frequencies(f_min=f_min, f_max=f_max, f_step=f_step)
else:
self.frequency = np.array(f)
# Prevent log10 from exploding by replacing zero frequency with small value
zero_freq_fix = False
if self.frequency[0] == 0:
self.frequency[0] = 0.001
zero_freq_fix = True
# Run interpolators
log_f = np.log10(self.frequency)
for key in keys:
if len(self.__dict__[key]) and key in interpolators:
self.__dict__[key] = interpolators[key](log_f)
if zero_freq_fix:
# Restore zero frequency
self.frequency[0] = 0
# Everything but the interpolated data is affected by interpolating, reset them
self.reset(**{key: False for key in keys})
| Interpolates missing values from previous and next value. Resets all but raw data. | 13 | 147 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def interpolate(self, f=None, f_step=DEFAULT_STEP, pol_order=1, f_min=DEFAULT_F_MIN, f_max=DEFAULT_F_MAX):
# Remove None values
i = 0
while i < len(self.raw):
if self.raw[i] is None:
self.raw = np.delete(self.raw, i)
self.frequency = np.delete(self.frequency, i)
else:
i += 1
# Interpolation functions
keys = 'raw error error_smoothed equalization equalized_raw equalized_smoothed target'.split()
interpolators = dict()
log_f = np.log10(self.frequency)
for key in keys:
if len(self.__dict__[key]):
interpolators[key] = InterpolatedUnivariateSpline(log_f, self.__dict__[key], k=pol_order)
if f is None:
self.frequency = self.generate_frequencies(f_min=f_min, f_max=f_max, f_step=f_step)
else:
self.frequency = np.array(f)
# Prevent log10 from exploding by replacing zero frequency with small value
zero_freq_fix = False
if self.frequency[0] == 0:
self.frequency[0] = 0.001
zero_freq_fix = True
# Run interpolators
log_f = np.log10(self.frequency)
for key in keys:
if len(self.__dict__[key]) and key in interpolators:
self.__dict__[key] = interpolators[key](log_f)
if zero_freq_fix:
# Restore zero frequency
self.frequency[0] = 0
# Everything but the interpolated data is affected by interpolating, reset them
self.reset(**{key: False for key in keys})
```
###Assistant : Interpolates missing values from previous and next value. Resets all but raw data.
|
173 | def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
device = self.param_groups[0]['params'][0].device
one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
trust_coeff = group['trust_coeff']
eps = group['eps']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
# apply LARS LR adaptation, LARC clipping, weight decay
# ref: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py
if weight_decay != 0 or group['always_adapt']:
w_norm = p.norm(2.0)
g_norm = grad.norm(2.0)
trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps)
# FIXME nested where required since logical and/or not working in PT XLA
trust_ratio = torch.where(
w_norm > 0,
torch.where(g_norm > 0, trust_ratio, one_tensor),
one_tensor,
)
if group['trust_clip']:
trust_ratio = torch.minimum(trust_ratio / group['lr'], one_tensor)
grad.add_(p, alpha=weight_decay)
grad.mul_(trust_ratio)
# apply SGD update https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(grad).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(grad, alpha=1. - dampening)
if nesterov:
grad = grad.add(buf, alpha=momentum)
else:
grad = buf
p.add_(grad, alpha=-group['lr'])
return loss | Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model and returns the loss.
| 19 | 182 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
device = self.param_groups[0]['params'][0].device
one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
trust_coeff = group['trust_coeff']
eps = group['eps']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
# apply LARS LR adaptation, LARC clipping, weight decay
# ref: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py
if weight_decay != 0 or group['always_adapt']:
w_norm = p.norm(2.0)
g_norm = grad.norm(2.0)
trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps)
# FIXME nested where required since logical and/or not working in PT XLA
trust_ratio = torch.where(
w_norm > 0,
torch.where(g_norm > 0, trust_ratio, one_tensor),
one_tensor,
)
if group['trust_clip']:
trust_ratio = torch.minimum(trust_ratio / group['lr'], one_tensor)
grad.add_(p, alpha=weight_decay)
grad.mul_(trust_ratio)
# apply SGD update https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(grad).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(grad, alpha=1. - dampening)
if nesterov:
grad = grad.add(buf, alpha=momentum)
else:
grad = buf
p.add_(grad, alpha=-group['lr'])
return loss
```
###Assistant : Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model and returns the loss.
|
174 | async def _async_force_resync(self, *_):
self._forced_resync = None
await self._async_force_refresh_state()
| Force a resync after an update since the hub may have stale state. | 13 | 9 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
async def _async_force_resync(self, *_):
self._forced_resync = None
await self._async_force_refresh_state()
```
###Assistant : Force a resync after an update since the hub may have stale state.
|
175 | def set_color_codes(palette="deep"):
if palette == "reset":
colors = [
(0., 0., 1.),
(0., .5, 0.),
(1., 0., 0.),
(.75, 0., .75),
(.75, .75, 0.),
(0., .75, .75),
(0., 0., 0.)
]
elif not isinstance(palette, str):
err = "set_color_codes requires a named seaborn palette"
raise TypeError(err)
elif palette in SEABORN_PALETTES:
if not palette.endswith("6"):
palette = palette + "6"
colors = SEABORN_PALETTES[palette] + [(.1, .1, .1)]
else:
err = f"Cannot set colors with palette '{palette}'"
raise ValueError(err)
for code, color in zip("bgrmyck", colors):
rgb = mpl.colors.colorConverter.to_rgb(color)
mpl.colors.colorConverter.colors[code] = rgb
mpl.colors.colorConverter.cache[code] = rgb
| Change how matplotlib color shorthands are interpreted.
Calling this will change how shorthand codes like "b" or "g"
are interpreted by matplotlib in subsequent plots.
Parameters
----------
palette : {deep, muted, pastel, dark, bright, colorblind}
Named seaborn palette to use as the source of colors.
See Also
--------
set : Color codes can be set through the high-level seaborn style
manager.
set_palette : Color codes can also be set through the function that
sets the matplotlib color cycle.
| 78 | 90 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def set_color_codes(palette="deep"):
if palette == "reset":
colors = [
(0., 0., 1.),
(0., .5, 0.),
(1., 0., 0.),
(.75, 0., .75),
(.75, .75, 0.),
(0., .75, .75),
(0., 0., 0.)
]
elif not isinstance(palette, str):
err = "set_color_codes requires a named seaborn palette"
raise TypeError(err)
elif palette in SEABORN_PALETTES:
if not palette.endswith("6"):
palette = palette + "6"
colors = SEABORN_PALETTES[palette] + [(.1, .1, .1)]
else:
err = f"Cannot set colors with palette '{palette}'"
raise ValueError(err)
for code, color in zip("bgrmyck", colors):
rgb = mpl.colors.colorConverter.to_rgb(color)
mpl.colors.colorConverter.colors[code] = rgb
mpl.colors.colorConverter.cache[code] = rgb
```
###Assistant : Change how matplotlib color shorthands are interpreted.
Calling this will change how shorthand codes like "b" or "g"
are interpreted by matplotlib in subsequent plots.
Parameters
----------
palette : {deep, muted, pastel, dark, bright, colorblind}
Named seaborn palette to use as the source of colors.
See Also
--------
set : Color codes can be set through the high-level seaborn style
manager.
set_palette : Color codes can also be set through the function that
sets the matplotlib color cycle.
|
176 | def test_mapped_literal_length_increase_at_runtime_adds_additional_tis(dag_maker, session):
from airflow.models import Variable
Variable.set(key='arg1', value=[1, 2, 3])
| Test that when the length of mapped literal increases at runtime, additional ti is added | 15 | 11 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_mapped_literal_length_increase_at_runtime_adds_additional_tis(dag_maker, session):
from airflow.models import Variable
Variable.set(key='arg1', value=[1, 2, 3])
```
###Assistant : Test that when the length of mapped literal increases at runtime, additional ti is added
|
177 | def create_png_thumbnail_file(self, thumb_dir):
thumb_file = Path(thumb_dir) / Path(f"{self.doc.pk:07}.png")
thumb_file.write_text("this is a dummy png file")
return thumb_file
|
Creates a dummy PNG thumbnail file in the given directory, based on
the database Document
| 15 | 16 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def create_png_thumbnail_file(self, thumb_dir):
thumb_file = Path(thumb_dir) / Path(f"{self.doc.pk:07}.png")
thumb_file.write_text("this is a dummy png file")
return thumb_file
```
###Assistant :
Creates a dummy PNG thumbnail file in the given directory, based on
the database Document
|
178 | def test_parse_html(self):
assert validate(parse_html(), '<!DOCTYPE html><body>"perfectly"<a>valid<div>HTML').tag == "html"
with self.assertRaises(ValueError) as cm:
validate(parse_html(), None)
assert_validationerror(cm.exception, )
|
ValidationError:
Unable to parse HTML: can only parse strings (None)
| 10 | 16 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_parse_html(self):
assert validate(parse_html(), '<!DOCTYPE html><body>"perfectly"<a>valid<div>HTML').tag == "html"
with self.assertRaises(ValueError) as cm:
validate(parse_html(), None)
assert_validationerror(cm.exception, )
```
###Assistant :
ValidationError:
Unable to parse HTML: can only parse strings (None)
|
179 | def _check_feature_names_in(estimator, input_features=None, *, generate_names=True):
feature_names_in_ = getattr(estimator, "feature_names_in_", None)
n_features_in_ = getattr(estimator, "n_features_in_", None)
if input_features is not None:
input_features = np.asarray(input_features, dtype=object)
if feature_names_in_ is not None and not np.array_equal(
feature_names_in_, input_features
):
raise ValueError("input_features is not equal to feature_names_in_")
if n_features_in_ is not None and len(input_features) != n_features_in_:
raise ValueError(
"input_features should have length equal to number of "
f"features ({n_features_in_}), got {len(input_features)}"
)
return input_features
if feature_names_in_ is not None:
return feature_names_in_
if not generate_names:
return
# Generates feature names if `n_features_in_` is defined
if n_features_in_ is None:
raise ValueError("Unable to generate feature names without n_features_in_")
return np.asarray([f"x{i}" for i in range(n_features_in_)], dtype=object)
| Check `input_features` and generate names if needed.
Commonly used in :term:`get_feature_names_out`.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then names are generated: `[x0, x1, ..., x(n_features_in_)]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
generate_names : bool, default=True
Whether to generate names when `input_features` is `None` and
`estimator.feature_names_in_` is not defined. This is useful for transformers
that validates `input_features` but do not require them in
:term:`get_feature_names_out` e.g. `PCA`.
Returns
-------
feature_names_in : ndarray of str or `None`
Feature names in.
| 110 | 107 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _check_feature_names_in(estimator, input_features=None, *, generate_names=True):
feature_names_in_ = getattr(estimator, "feature_names_in_", None)
n_features_in_ = getattr(estimator, "n_features_in_", None)
if input_features is not None:
input_features = np.asarray(input_features, dtype=object)
if feature_names_in_ is not None and not np.array_equal(
feature_names_in_, input_features
):
raise ValueError("input_features is not equal to feature_names_in_")
if n_features_in_ is not None and len(input_features) != n_features_in_:
raise ValueError(
"input_features should have length equal to number of "
f"features ({n_features_in_}), got {len(input_features)}"
)
return input_features
if feature_names_in_ is not None:
return feature_names_in_
if not generate_names:
return
# Generates feature names if `n_features_in_` is defined
if n_features_in_ is None:
raise ValueError("Unable to generate feature names without n_features_in_")
return np.asarray([f"x{i}" for i in range(n_features_in_)], dtype=object)
```
###Assistant : Check `input_features` and generate names if needed.
Commonly used in :term:`get_feature_names_out`.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then names are generated: `[x0, x1, ..., x(n_features_in_)]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
generate_names : bool, default=True
Whether to generate names when `input_features` is `None` and
`estimator.feature_names_in_` is not defined. This is useful for transformers
that validates `input_features` but do not require them in
:term:`get_feature_names_out` e.g. `PCA`.
Returns
-------
feature_names_in : ndarray of str or `None`
Feature names in.
|
180 | def all_pairs_bellman_ford_path(G, weight="weight"):
path = single_source_bellman_ford_path
# TODO This can be trivially parallelized.
for n in G:
yield (n, path(G, n, weight=weight))
| Compute shortest paths between all nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
weight : string or function (default="weight")
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
distance : dictionary
Dictionary, keyed by source and target, of shortest paths.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = dict(nx.all_pairs_bellman_ford_path(G))
>>> path[0][4]
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
floyd_warshall, all_pairs_dijkstra_path
| 170 | 22 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def all_pairs_bellman_ford_path(G, weight="weight"):
path = single_source_bellman_ford_path
# TODO This can be trivially parallelized.
for n in G:
yield (n, path(G, n, weight=weight))
```
###Assistant : Compute shortest paths between all nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
weight : string or function (default="weight")
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
distance : dictionary
Dictionary, keyed by source and target, of shortest paths.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = dict(nx.all_pairs_bellman_ford_path(G))
>>> path[0][4]
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
floyd_warshall, all_pairs_dijkstra_path
|
181 | def update_cost_in_all_boms_in_test():
log = enqueue_update_cost() # create BOM Update Log
while log.status != "Completed":
resume_bom_cost_update_jobs() # run cron job until complete
log.reload()
return log
|
Utility to run 'Update Cost' job in tests without Cron job until fully complete.
| 14 | 24 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def update_cost_in_all_boms_in_test():
log = enqueue_update_cost() # create BOM Update Log
while log.status != "Completed":
resume_bom_cost_update_jobs() # run cron job until complete
log.reload()
return log
```
###Assistant :
Utility to run 'Update Cost' job in tests without Cron job until fully complete.
|
182 | def clear(self) -> None:
self.row_count = 0
self._clear_caches()
self._y_offsets.clear()
self.data.clear()
self.rows.clear()
self._line_no = 0
self._require_update_dimensions = True
self.refresh()
| Clear the table.
Args:
columns (bool, optional): Also clear the columns. Defaults to False.
| 14 | 18 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def clear(self) -> None:
self.row_count = 0
self._clear_caches()
self._y_offsets.clear()
self.data.clear()
self.rows.clear()
self._line_no = 0
self._require_update_dimensions = True
self.refresh()
```
###Assistant : Clear the table.
Args:
columns (bool, optional): Also clear the columns. Defaults to False.
|
183 | def copyFile(source_path, dest_path):
while 1:
try:
shutil.copyfile(source_path, dest_path)
except PermissionError as e:
if e.errno != errno.EACCES:
raise
general.warning("Problem copying file %s:" % e)
try:
reply = raw_input("Retry? (YES/no) ") or "yes"
except EOFError:
reply = "no"
if reply.upper() == "YES":
continue
raise
break
| Improved version of shutil.copy
This handles errors with a chance to correct them, e.g. on Windows, files might be
locked by running program or virus checkers.
| 26 | 43 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def copyFile(source_path, dest_path):
while 1:
try:
shutil.copyfile(source_path, dest_path)
except PermissionError as e:
if e.errno != errno.EACCES:
raise
general.warning("Problem copying file %s:" % e)
try:
reply = raw_input("Retry? (YES/no) ") or "yes"
except EOFError:
reply = "no"
if reply.upper() == "YES":
continue
raise
break
```
###Assistant : Improved version of shutil.copy
This handles errors with a chance to correct them, e.g. on Windows, files might be
locked by running program or virus checkers.
|
184 | def test_iforest(global_random_seed):
X_train = np.array([[0, 1], [1, 2]])
X_test = np.array([[2, 1], [1, 1]])
grid = ParameterGrid(
{"n_estimators": [3], "max_samples": [0.5, 1.0, 3], "bootstrap": [True, False]}
)
with ignore_warnings():
for params in grid:
IsolationForest(random_state=global_random_seed, **params).fit(
X_train
).predict(X_test)
| Check Isolation Forest for various parameter settings. | 7 | 37 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_iforest(global_random_seed):
X_train = np.array([[0, 1], [1, 2]])
X_test = np.array([[2, 1], [1, 1]])
grid = ParameterGrid(
{"n_estimators": [3], "max_samples": [0.5, 1.0, 3], "bootstrap": [True, False]}
)
with ignore_warnings():
for params in grid:
IsolationForest(random_state=global_random_seed, **params).fit(
X_train
).predict(X_test)
```
###Assistant : Check Isolation Forest for various parameter settings.
|
185 | def invalidate_caches(self):
_register(MetaPathFinder, machinery.BuiltinImporter, machinery.FrozenImporter,
machinery.PathFinder, machinery.WindowsRegistryFinder)
| An optional method for clearing the finder's cache, if any.
This method is used by importlib.invalidate_caches().
| 16 | 7 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def invalidate_caches(self):
_register(MetaPathFinder, machinery.BuiltinImporter, machinery.FrozenImporter,
machinery.PathFinder, machinery.WindowsRegistryFinder)
```
###Assistant : An optional method for clearing the finder's cache, if any.
This method is used by importlib.invalidate_caches().
|
186 | def reap(instance=None, status='failed', excluded_uuids=[]):
me = instance
if me is None:
try:
me = Instance.objects.me()
except RuntimeError as e:
logger.warning(f'Local instance is not registered, not running reaper: {e}')
return
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
jobs = UnifiedJob.objects.filter(
Q(status='running') & (Q(execution_node=me.hostname) | Q(controller_node=me.hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
).exclude(celery_task_id__in=excluded_uuids)
for j in jobs:
reap_job(j, status)
|
Reap all jobs in running for this instance.
| 8 | 49 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def reap(instance=None, status='failed', excluded_uuids=[]):
me = instance
if me is None:
try:
me = Instance.objects.me()
except RuntimeError as e:
logger.warning(f'Local instance is not registered, not running reaper: {e}')
return
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
jobs = UnifiedJob.objects.filter(
Q(status='running') & (Q(execution_node=me.hostname) | Q(controller_node=me.hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
).exclude(celery_task_id__in=excluded_uuids)
for j in jobs:
reap_job(j, status)
```
###Assistant :
Reap all jobs in running for this instance.
|
187 | def closure(self, rel, depth=-1):
from nltk.util import acyclic_breadth_first
for synset in acyclic_breadth_first(self, rel, depth):
if synset != self:
yield synset
from nltk.util import acyclic_depth_first as acyclic_tree
from nltk.util import unweighted_minimum_spanning_tree as mst
# Also add this shortcut?
# from nltk.util import unweighted_minimum_spanning_digraph as umsd
|
Return the transitive closure of source under the rel
relationship, breadth-first, discarding cycles:
>>> from nltk.corpus import wordnet as wn
>>> computer = wn.synset('computer.n.01')
>>> topic = lambda s:s.topic_domains()
>>> print(list(computer.closure(topic)))
[Synset('computer_science.n.01')]
UserWarning: Discarded redundant search for Synset('computer.n.01') at depth 2
Include redundant paths (but only once), avoiding duplicate searches
(from 'animal.n.01' to 'entity.n.01'):
>>> dog = wn.synset('dog.n.01')
>>> hyp = lambda s:s.hypernyms()
>>> print(list(dog.closure(hyp)))
[Synset('canine.n.02'), Synset('domestic_animal.n.01'), Synset('carnivore.n.01'),\
Synset('animal.n.01'), Synset('placental.n.01'), Synset('organism.n.01'),\
Synset('mammal.n.01'), Synset('living_thing.n.01'), Synset('vertebrate.n.01'),\
Synset('whole.n.02'), Synset('chordate.n.01'), Synset('object.n.01'),\
Synset('physical_entity.n.01'), Synset('entity.n.01')]
UserWarning: Discarded redundant search for Synset('animal.n.01') at depth 7
| 88 | 44 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def closure(self, rel, depth=-1):
from nltk.util import acyclic_breadth_first
for synset in acyclic_breadth_first(self, rel, depth):
if synset != self:
yield synset
from nltk.util import acyclic_depth_first as acyclic_tree
from nltk.util import unweighted_minimum_spanning_tree as mst
# Also add this shortcut?
# from nltk.util import unweighted_minimum_spanning_digraph as umsd
```
###Assistant :
Return the transitive closure of source under the rel
relationship, breadth-first, discarding cycles:
>>> from nltk.corpus import wordnet as wn
>>> computer = wn.synset('computer.n.01')
>>> topic = lambda s:s.topic_domains()
>>> print(list(computer.closure(topic)))
[Synset('computer_science.n.01')]
UserWarning: Discarded redundant search for Synset('computer.n.01') at depth 2
Include redundant paths (but only once), avoiding duplicate searches
(from 'animal.n.01' to 'entity.n.01'):
>>> dog = wn.synset('dog.n.01')
>>> hyp = lambda s:s.hypernyms()
>>> print(list(dog.closure(hyp)))
[Synset('canine.n.02'), Synset('domestic_animal.n.01'), Synset('carnivore.n.01'),\
Synset('animal.n.01'), Synset('placental.n.01'), Synset('organism.n.01'),\
Synset('mammal.n.01'), Synset('living_thing.n.01'), Synset('vertebrate.n.01'),\
Synset('whole.n.02'), Synset('chordate.n.01'), Synset('object.n.01'),\
Synset('physical_entity.n.01'), Synset('entity.n.01')]
UserWarning: Discarded redundant search for Synset('animal.n.01') at depth 7
|
188 | def _get_save_args(self) -> Tuple[int, ...]:
filetype = self.config["format"]
args: Tuple[int, ...] = tuple()
if filetype == "jpg" and self.config["jpg_quality"] > 0:
args = (cv2.IMWRITE_JPEG_QUALITY, # pylint: disable=no-member
self.config["jpg_quality"])
if filetype == "png" and self.config["png_compress_level"] > -1:
args = (cv2.IMWRITE_PNG_COMPRESSION, # pylint: disable=no-member
self.config["png_compress_level"])
logger.debug(args)
return args
| Obtain the save parameters for the file format.
Returns
-------
tuple
The OpenCV specific arguments for the selected file format
| 20 | 46 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _get_save_args(self) -> Tuple[int, ...]:
filetype = self.config["format"]
args: Tuple[int, ...] = tuple()
if filetype == "jpg" and self.config["jpg_quality"] > 0:
args = (cv2.IMWRITE_JPEG_QUALITY, # pylint: disable=no-member
self.config["jpg_quality"])
if filetype == "png" and self.config["png_compress_level"] > -1:
args = (cv2.IMWRITE_PNG_COMPRESSION, # pylint: disable=no-member
self.config["png_compress_level"])
logger.debug(args)
return args
```
###Assistant : Obtain the save parameters for the file format.
Returns
-------
tuple
The OpenCV specific arguments for the selected file format
|
189 | def set_constrained_layout_pads(self, **kwargs):
if isinstance(self.get_layout_engine(), ConstrainedLayoutEngine):
self.get_layout_engine().set(**kwargs)
|
Set padding for ``constrained_layout``.
Tip: The parameters can be passed from a dictionary by using
``fig.set_constrained_layout(**pad_dict)``.
See :doc:`/tutorials/intermediate/constrainedlayout_guide`.
Parameters
----------
w_pad : float, default: :rc:`figure.constrained_layout.w_pad`
Width padding in inches. This is the pad around Axes
and is meant to make sure there is enough room for fonts to
look good. Defaults to 3 pts = 0.04167 inches
h_pad : float, default: :rc:`figure.constrained_layout.h_pad`
Height padding in inches. Defaults to 3 pts.
wspace : float, default: :rc:`figure.constrained_layout.wspace`
Width padding between subplots, expressed as a fraction of the
subplot width. The total padding ends up being w_pad + wspace.
hspace : float, default: :rc:`figure.constrained_layout.hspace`
Height padding between subplots, expressed as a fraction of the
subplot width. The total padding ends up being h_pad + hspace.
| 122 | 7 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def set_constrained_layout_pads(self, **kwargs):
if isinstance(self.get_layout_engine(), ConstrainedLayoutEngine):
self.get_layout_engine().set(**kwargs)
```
###Assistant :
Set padding for ``constrained_layout``.
Tip: The parameters can be passed from a dictionary by using
``fig.set_constrained_layout(**pad_dict)``.
See :doc:`/tutorials/intermediate/constrainedlayout_guide`.
Parameters
----------
w_pad : float, default: :rc:`figure.constrained_layout.w_pad`
Width padding in inches. This is the pad around Axes
and is meant to make sure there is enough room for fonts to
look good. Defaults to 3 pts = 0.04167 inches
h_pad : float, default: :rc:`figure.constrained_layout.h_pad`
Height padding in inches. Defaults to 3 pts.
wspace : float, default: :rc:`figure.constrained_layout.wspace`
Width padding between subplots, expressed as a fraction of the
subplot width. The total padding ends up being w_pad + wspace.
hspace : float, default: :rc:`figure.constrained_layout.hspace`
Height padding between subplots, expressed as a fraction of the
subplot width. The total padding ends up being h_pad + hspace.
|
190 | def track_current_candle(self):
if self.dd.current_candle > self.current_candle:
self.get_corr_dataframes = True
self.pair_it = 0
self.current_candle = self.dd.current_candle
# Following methods which are overridden by user made prediction models.
# See freqai/prediction_models/CatboostPredictionModel.py for an example.
|
Checks if the latest candle appended by the datadrawer is
equivalent to the latest candle seen by FreqAI. If not, it
asks to refresh the cached corr_dfs, and resets the pair
counter.
| 32 | 32 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def track_current_candle(self):
if self.dd.current_candle > self.current_candle:
self.get_corr_dataframes = True
self.pair_it = 0
self.current_candle = self.dd.current_candle
# Following methods which are overridden by user made prediction models.
# See freqai/prediction_models/CatboostPredictionModel.py for an example.
```
###Assistant :
Checks if the latest candle appended by the datadrawer is
equivalent to the latest candle seen by FreqAI. If not, it
asks to refresh the cached corr_dfs, and resets the pair
counter.
|
191 | def find(self, req):
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
| Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
| 64 | 26 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def find(self, req):
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
```
###Assistant : Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
|
192 | def current() -> dict | None:
try:
ContextStack.top(_FROZEN_CONTEXT_KEY)
sample: Sample = {}
for ctx in ContextStack.stack(_FROZEN_CONTEXT_KEY):
if not isinstance(ctx, dict):
raise TypeError(f'Expect architecture to be a dict, found: {ctx}')
sample.update(ctx)
return sample
except NoContextError:
return None
| Retrieve the current frozen context.
If multiple layers have been found, they would be merged from bottom to top.
Returns
-------
The sample in frozen context.
If no sample is found, return none.
| 33 | 36 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def current() -> dict | None:
try:
ContextStack.top(_FROZEN_CONTEXT_KEY)
sample: Sample = {}
for ctx in ContextStack.stack(_FROZEN_CONTEXT_KEY):
if not isinstance(ctx, dict):
raise TypeError(f'Expect architecture to be a dict, found: {ctx}')
sample.update(ctx)
return sample
except NoContextError:
return None
```
###Assistant : Retrieve the current frozen context.
If multiple layers have been found, they would be merged from bottom to top.
Returns
-------
The sample in frozen context.
If no sample is found, return none.
|
193 | def test_rept_child() -> None:
rows = 10_000
cols = 7
rept_row_count = 5
# these times and sizes are based on the above constants
# and Madhavas MacBook Pro 2019
expected_rept_mem_size = 4.010650634765625
expected_rept_ser_size = 7.4926300048828125
macbook_pro_2019_ser_time = 0.18791760900000032
macbook_pro_2019_de_time = 0.1726598199999998
sept = make_sept(rows=rows, cols=cols)
rept_rows = [sept.copy() for i in range(rept_row_count)]
rept = REPT(rows=rept_rows)
start = timeit.default_timer()
ser = sy.serialize(rept, to_bytes=True)
end = timeit.default_timer()
time_ser = end - start
start = timeit.default_timer()
de = sy.deserialize(ser, from_bytes=True)
end = timeit.default_timer()
time_de = end - start
assert rept == de
current_rept_mem_size = size(rept)
mem_diff = (current_rept_mem_size / expected_rept_mem_size * 100) - 100
current_rept_bytes_size = size(ser)
bytes_diff = (current_rept_bytes_size / expected_rept_ser_size * 100) - 100
ser_time_diff = (time_ser / macbook_pro_2019_ser_time * 100) - 100
de_time_diff = (time_de / macbook_pro_2019_de_time * 100) - 100
print("REPT Stats")
print("==========")
print("In-memory size of REPT", size(rept))
print("Serialized size of REPT", size(ser))
print(f"Serializing {rept_row_count}x{rows}x{cols} took {time_ser} secs")
print(f"Deserializing {rept_row_count}x{rows}x{cols} took {time_de} secs")
print("Current Results")
print("===============")
print(f"In-memory size delta: {mem_diff}%")
print(f"Serialized size delta: {bytes_diff}%")
print(f"Serializing time delta: {ser_time_diff}%")
print(f"Deserializing time delta: {de_time_diff}%")
# we want to assert that our calculated values are smaller than the old values with
# some tolerance
assert (current_rept_mem_size - expected_rept_mem_size) < 1e-3
assert (current_rept_bytes_size - expected_rept_ser_size) < 2e-2
# TODO: make time benchmarks stable (probably can't run in parallel)
# assert (time_ser - macbook_pro_2019_ser_time) < 2e-1
# assert (time_de - macbook_pro_2019_de_time) < 2e-1
| We need to benchmark both the size and time to serialize and deserialize REPTs | 14 | 230 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_rept_child() -> None:
rows = 10_000
cols = 7
rept_row_count = 5
# these times and sizes are based on the above constants
# and Madhavas MacBook Pro 2019
expected_rept_mem_size = 4.010650634765625
expected_rept_ser_size = 7.4926300048828125
macbook_pro_2019_ser_time = 0.18791760900000032
macbook_pro_2019_de_time = 0.1726598199999998
sept = make_sept(rows=rows, cols=cols)
rept_rows = [sept.copy() for i in range(rept_row_count)]
rept = REPT(rows=rept_rows)
start = timeit.default_timer()
ser = sy.serialize(rept, to_bytes=True)
end = timeit.default_timer()
time_ser = end - start
start = timeit.default_timer()
de = sy.deserialize(ser, from_bytes=True)
end = timeit.default_timer()
time_de = end - start
assert rept == de
current_rept_mem_size = size(rept)
mem_diff = (current_rept_mem_size / expected_rept_mem_size * 100) - 100
current_rept_bytes_size = size(ser)
bytes_diff = (current_rept_bytes_size / expected_rept_ser_size * 100) - 100
ser_time_diff = (time_ser / macbook_pro_2019_ser_time * 100) - 100
de_time_diff = (time_de / macbook_pro_2019_de_time * 100) - 100
print("REPT Stats")
print("==========")
print("In-memory size of REPT", size(rept))
print("Serialized size of REPT", size(ser))
print(f"Serializing {rept_row_count}x{rows}x{cols} took {time_ser} secs")
print(f"Deserializing {rept_row_count}x{rows}x{cols} took {time_de} secs")
print("Current Results")
print("===============")
print(f"In-memory size delta: {mem_diff}%")
print(f"Serialized size delta: {bytes_diff}%")
print(f"Serializing time delta: {ser_time_diff}%")
print(f"Deserializing time delta: {de_time_diff}%")
# we want to assert that our calculated values are smaller than the old values with
# some tolerance
assert (current_rept_mem_size - expected_rept_mem_size) < 1e-3
assert (current_rept_bytes_size - expected_rept_ser_size) < 2e-2
# TODO: make time benchmarks stable (probably can't run in parallel)
# assert (time_ser - macbook_pro_2019_ser_time) < 2e-1
# assert (time_de - macbook_pro_2019_de_time) < 2e-1
```
###Assistant : We need to benchmark both the size and time to serialize and deserialize REPTs
|
194 | def _ecm_one_factor(n, B1=10000, B2=100000, max_curve=200):
n = as_int(n)
if B1 % 2 != 0 or B2 % 2 != 0:
raise ValueError("The Bounds should be an even integer")
sieve.extend(B2)
if isprime(n):
return n
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.polys.polytools import gcd
curve = 0
D = int(sqrt(B2))
beta = [0]*(D + 1)
S = [0]*(D + 1)
k = 1
for p in sieve.primerange(1, B1 + 1):
k *= pow(p, integer_log(B1, p)[0])
while(curve <= max_curve):
curve += 1
#Suyama's Paramatrization
sigma = rgen.randint(6, n - 1)
u = (sigma*sigma - 5) % n
v = (4*sigma) % n
diff = v - u
u_3 = pow(u, 3, n)
try:
C = (pow(diff, 3, n)*(3*u + v)*mod_inverse(4*u_3*v, n) - 2) % n
except ValueError:
#If the mod_inverse(4*u_3*v, n) doesn't exist
return gcd(4*u_3*v, n)
a24 = (C + 2)*mod_inverse(4, n) % n
Q = Point(u_3, pow(v, 3, n), a24, n)
Q = Q.mont_ladder(k)
g = gcd(Q.z_cord, n)
#Stage 1 factor
if g != 1 and g != n:
return g
#Stage 1 failure. Q.z = 0, Try another curve
elif g == n:
continue
#Stage 2 - Improved Standard Continuation
S[1] = Q.double()
S[2] = S[1].double()
beta[1] = (S[1].x_cord*S[1].z_cord) % n
beta[2] = (S[2].x_cord*S[2].z_cord) % n
for d in range(3, D + 1):
S[d] = S[d - 1].add(S[1], S[d - 2])
beta[d] = (S[d].x_cord*S[d].z_cord) % n
g = 1
B = B1 - 1
T = Q.mont_ladder(B - 2*D)
R = Q.mont_ladder(B)
for r in range(B, B2, 2*D):
alpha = (R.x_cord*R.z_cord) % n
for q in sieve.primerange(r + 2, r + 2*D + 1):
delta = (q - r) // 2
f = (R.x_cord - S[d].x_cord)*(R.z_cord + S[d].z_cord) -\
alpha + beta[delta]
g = (g*f) % n
#Swap
T, R = R, R.add(S[D], T)
g = gcd(n, g)
#Stage 2 Factor found
if g != 1 and g != n:
return g
#ECM failed, Increase the bounds
raise ValueError("Increase the bounds")
| Returns one factor of n using
Lenstra's 2 Stage Elliptic curve Factorization
with Suyama's Parameterization. Here Montgomery
arithmetic is used for fast computation of addition
and doubling of points in elliptic curve.
This ECM method considers elliptic curves in Montgomery
form (E : b*y**2*z = x**3 + a*x**2*z + x*z**2) and involves
elliptic curve operations (mod N), where the elements in
Z are reduced (mod N). Since N is not a prime, E over FF(N)
is not really an elliptic curve but we can still do point additions
and doubling as if FF(N) was a field.
Stage 1 : The basic algorithm involves taking a random point (P) on an
elliptic curve in FF(N). The compute k*P using Montgomery ladder algorithm.
Let q be an unknown factor of N. Then the order of the curve E, |E(FF(q))|,
might be a smooth number that divides k. Then we have k = l * |E(FF(q))|
for some l. For any point belonging to the curve E, |E(FF(q))|*P = O,
hence k*P = l*|E(FF(q))|*P. Thus kP.z_cord = 0 (mod q), and the unknownn
factor of N (q) can be recovered by taking gcd(kP.z_cord, N).
Stage 2 : This is a continuation of Stage 1 if k*P != O. The idea utilize
the fact that even if kP != 0, the value of k might miss just one large
prime divisor of |E(FF(q))|. In this case we only need to compute the
scalar multiplication by p to get p*k*P = O. Here a second bound B2
restrict the size of possible values of p.
Parameters
==========
n : Number to be Factored
B1 : Stage 1 Bound
B2 : Stage 2 Bound
max_curve : Maximum number of curves generated
References
==========
.. [1] Carl Pomerance and Richard Crandall "Prime Numbers:
A Computational Perspective" (2nd Ed.), page 344
| 303 | 319 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _ecm_one_factor(n, B1=10000, B2=100000, max_curve=200):
n = as_int(n)
if B1 % 2 != 0 or B2 % 2 != 0:
raise ValueError("The Bounds should be an even integer")
sieve.extend(B2)
if isprime(n):
return n
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.polys.polytools import gcd
curve = 0
D = int(sqrt(B2))
beta = [0]*(D + 1)
S = [0]*(D + 1)
k = 1
for p in sieve.primerange(1, B1 + 1):
k *= pow(p, integer_log(B1, p)[0])
while(curve <= max_curve):
curve += 1
#Suyama's Paramatrization
sigma = rgen.randint(6, n - 1)
u = (sigma*sigma - 5) % n
v = (4*sigma) % n
diff = v - u
u_3 = pow(u, 3, n)
try:
C = (pow(diff, 3, n)*(3*u + v)*mod_inverse(4*u_3*v, n) - 2) % n
except ValueError:
#If the mod_inverse(4*u_3*v, n) doesn't exist
return gcd(4*u_3*v, n)
a24 = (C + 2)*mod_inverse(4, n) % n
Q = Point(u_3, pow(v, 3, n), a24, n)
Q = Q.mont_ladder(k)
g = gcd(Q.z_cord, n)
#Stage 1 factor
if g != 1 and g != n:
return g
#Stage 1 failure. Q.z = 0, Try another curve
elif g == n:
continue
#Stage 2 - Improved Standard Continuation
S[1] = Q.double()
S[2] = S[1].double()
beta[1] = (S[1].x_cord*S[1].z_cord) % n
beta[2] = (S[2].x_cord*S[2].z_cord) % n
for d in range(3, D + 1):
S[d] = S[d - 1].add(S[1], S[d - 2])
beta[d] = (S[d].x_cord*S[d].z_cord) % n
g = 1
B = B1 - 1
T = Q.mont_ladder(B - 2*D)
R = Q.mont_ladder(B)
for r in range(B, B2, 2*D):
alpha = (R.x_cord*R.z_cord) % n
for q in sieve.primerange(r + 2, r + 2*D + 1):
delta = (q - r) // 2
f = (R.x_cord - S[d].x_cord)*(R.z_cord + S[d].z_cord) -\
alpha + beta[delta]
g = (g*f) % n
#Swap
T, R = R, R.add(S[D], T)
g = gcd(n, g)
#Stage 2 Factor found
if g != 1 and g != n:
return g
#ECM failed, Increase the bounds
raise ValueError("Increase the bounds")
```
###Assistant : Returns one factor of n using
Lenstra's 2 Stage Elliptic curve Factorization
with Suyama's Parameterization. Here Montgomery
arithmetic is used for fast computation of addition
and doubling of points in elliptic curve.
This ECM method considers elliptic curves in Montgomery
form (E : b*y**2*z = x**3 + a*x**2*z + x*z**2) and involves
elliptic curve operations (mod N), where the elements in
Z are reduced (mod N). Since N is not a prime, E over FF(N)
is not really an elliptic curve but we can still do point additions
and doubling as if FF(N) was a field.
Stage 1 : The basic algorithm involves taking a random point (P) on an
elliptic curve in FF(N). The compute k*P using Montgomery ladder algorithm.
Let q be an unknown factor of N. Then the order of the curve E, |E(FF(q))|,
might be a smooth number that divides k. Then we have k = l * |E(FF(q))|
for some l. For any point belonging to the curve E, |E(FF(q))|*P = O,
hence k*P = l*|E(FF(q))|*P. Thus kP.z_cord = 0 (mod q), and the unknownn
factor of N (q) can be recovered by taking gcd(kP.z_cord, N).
Stage 2 : This is a continuation of Stage 1 if k*P != O. The idea utilize
the fact that even if kP != 0, the value of k might miss just one large
prime divisor of |E(FF(q))|. In this case we only need to compute the
scalar multiplication by p to get p*k*P = O. Here a second bound B2
restrict the size of possible values of p.
Parameters
==========
n : Number to be Factored
B1 : Stage 1 Bound
B2 : Stage 2 Bound
max_curve : Maximum number of curves generated
References
==========
.. [1] Carl Pomerance and Richard Crandall "Prime Numbers:
A Computational Perspective" (2nd Ed.), page 344
|
195 | async def test_unique_id(hass):
await setup_test_entity(
hass,
{
"unique": {
"command_open": "echo open",
"command_close": "echo close",
"command_stop": "echo stop",
"unique_id": "unique",
},
"not_unique_1": {
"command_open": "echo open",
"command_close": "echo close",
"command_stop": "echo stop",
"unique_id": "not-so-unique-anymore",
},
"not_unique_2": {
"command_open": "echo open",
"command_close": "echo close",
"command_stop": "echo stop",
"unique_id": "not-so-unique-anymore",
},
},
)
assert len(hass.states.async_all()) == 2
ent_reg = entity_registry.async_get(hass)
assert len(ent_reg.entities) == 2
assert ent_reg.async_get_entity_id("cover", "command_line", "unique") is not None
assert (
ent_reg.async_get_entity_id("cover", "command_line", "not-so-unique-anymore")
is not None
)
| Test unique_id option and if it only creates one cover per id. | 12 | 78 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
async def test_unique_id(hass):
await setup_test_entity(
hass,
{
"unique": {
"command_open": "echo open",
"command_close": "echo close",
"command_stop": "echo stop",
"unique_id": "unique",
},
"not_unique_1": {
"command_open": "echo open",
"command_close": "echo close",
"command_stop": "echo stop",
"unique_id": "not-so-unique-anymore",
},
"not_unique_2": {
"command_open": "echo open",
"command_close": "echo close",
"command_stop": "echo stop",
"unique_id": "not-so-unique-anymore",
},
},
)
assert len(hass.states.async_all()) == 2
ent_reg = entity_registry.async_get(hass)
assert len(ent_reg.entities) == 2
assert ent_reg.async_get_entity_id("cover", "command_line", "unique") is not None
assert (
ent_reg.async_get_entity_id("cover", "command_line", "not-so-unique-anymore")
is not None
)
```
###Assistant : Test unique_id option and if it only creates one cover per id.
|
196 | def mock_ssl_context():
with patch(
"homeassistant.components.mqtt.config_flow.SSLContext"
) as mock_context, patch(
"homeassistant.components.mqtt.config_flow.load_pem_private_key"
) as mock_key_check, patch(
"homeassistant.components.mqtt.config_flow.load_pem_x509_certificate"
) as mock_cert_check:
yield {
"context": mock_context,
"load_pem_x509_certificate": mock_cert_check,
"load_pem_private_key": mock_key_check,
}
@pytest.fixture | Mock the SSL context used to load the cert chain and to load verify locations. | 15 | 28 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def mock_ssl_context():
with patch(
"homeassistant.components.mqtt.config_flow.SSLContext"
) as mock_context, patch(
"homeassistant.components.mqtt.config_flow.load_pem_private_key"
) as mock_key_check, patch(
"homeassistant.components.mqtt.config_flow.load_pem_x509_certificate"
) as mock_cert_check:
yield {
"context": mock_context,
"load_pem_x509_certificate": mock_cert_check,
"load_pem_private_key": mock_key_check,
}
@pytest.fixture
```
###Assistant : Mock the SSL context used to load the cert chain and to load verify locations.
|
197 | def test_task_states_for_dag_run_when_dag_run_not_exists(self):
with pytest.raises(DagRunNotFound):
default_date2 = timezone.datetime(2016, 1, 9)
task_command.task_states_for_dag_run(
self.parser.parse_args(
[
'tasks',
'states-for-dag-run',
'not_exists_dag',
default_date2.isoformat(),
'--output',
"json",
]
)
)
|
task_states_for_dag_run should return an AirflowException when invalid dag id is passed
| 11 | 21 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_task_states_for_dag_run_when_dag_run_not_exists(self):
with pytest.raises(DagRunNotFound):
default_date2 = timezone.datetime(2016, 1, 9)
task_command.task_states_for_dag_run(
self.parser.parse_args(
[
'tasks',
'states-for-dag-run',
'not_exists_dag',
default_date2.isoformat(),
'--output',
"json",
]
)
)
```
###Assistant :
task_states_for_dag_run should return an AirflowException when invalid dag id is passed
|
198 | def test_title_present(self):
response = self.get(4)
self.assertContains(response, "Christmas", 3)
|
The page title should appear three times. Once in the header, and two times
in the field listing (as the actual title and as the draft title)
| 27 | 8 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_title_present(self):
response = self.get(4)
self.assertContains(response, "Christmas", 3)
```
###Assistant :
The page title should appear three times. Once in the header, and two times
in the field listing (as the actual title and as the draft title)
|
199 | def _set_position(self, pos, which='both'):
if not isinstance(pos, mtransforms.BboxBase):
pos = mtransforms.Bbox.from_bounds(*pos)
for ax in self._twinned_axes.get_siblings(self):
if which in ('both', 'active'):
ax._position.set(pos)
if which in ('both', 'original'):
ax._originalPosition.set(pos)
self.stale = True
|
Private version of set_position.
Call this internally to get the same functionality of `set_position`,
but not to take the axis out of the constrained_layout hierarchy.
| 25 | 30 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _set_position(self, pos, which='both'):
if not isinstance(pos, mtransforms.BboxBase):
pos = mtransforms.Bbox.from_bounds(*pos)
for ax in self._twinned_axes.get_siblings(self):
if which in ('both', 'active'):
ax._position.set(pos)
if which in ('both', 'original'):
ax._originalPosition.set(pos)
self.stale = True
```
###Assistant :
Private version of set_position.
Call this internally to get the same functionality of `set_position`,
but not to take the axis out of the constrained_layout hierarchy.
|