repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
extcolors | extcolors//colorutil.pyfile:/colorutil.py:function:xyz_rgb/xyz_rgb | def xyz_rgb(xyz):
"""
Convert tuple from the CIE XYZ color space to the sRGB color space.
Conversion is based on that the XYZ input uses an the D65 illuminate with a 2° observer angle.
https://en.wikipedia.org/wiki/Illuminant_D65
sRGB (standard Red Green Blue): https://en.wikipedia.org/wiki/SRGB
CIE XYZ: https://en.wikipedia.org/wiki/CIE_1931_color_space
"""
x = xyz[0] / 100.0
y = xyz[1] / 100.0
z = xyz[2] / 100.0
r = x * 3.2404542 + y * -1.5371385 + z * -0.4985314
g = x * -0.969266 + y * 1.8760108 + z * 0.041556
b = x * 0.0556434 + y * -0.2040259 + z * 1.0572252
r = r * 255.0
g = g * 255.0
b = b * 255.0
return round(r), round(g), round(b)
|
aiida | aiida//orm/computers.pyclass:Computer/_description_validator | @classmethod
def _description_validator(cls, description):
"""
Validates the description.
"""
|
pymccrgb-0.1.7 | pymccrgb-0.1.7//pymccrgb/datasets.pyfile:/pymccrgb/datasets.py:function:load_kaibab_lidar/load_kaibab_lidar | def load_kaibab_lidar(npoints=None):
"""Loads sample dataset from Kaibab Plateau, Arizona, USA lidar dataset
Parameters
----------
npoints: int
Optional number of (random) points to load from file
Returns
-------
A data ndarray
"""
raise NotImplementedError('This sample dataset is not available yet!')
|
fmoo-audiotools-3.1beta1 | fmoo-audiotools-3.1beta1//audiotools/toc/yaccrules.pyfile:/audiotools/toc/yaccrules.py:function:p_track_file/p_track_file | def p_track_file(t):
"""track_flag : FILE STRING start
| AUDIOFILE STRING start
| FILE STRING start length
| AUDIOFILE STRING start length"""
from audiotools.toc import TOCFlag_FILE
if len(t) == 4:
t[0] = TOCFlag_FILE(type=t[1], filename=t[2], start=t[3])
else:
t[0] = TOCFlag_FILE(type=t[1], filename=t[2], start=t[3], length=t[4])
|
plagiarism-0.1.0 | plagiarism-0.1.0//src/plagiarism/utils.pyfile:/src/plagiarism/utils.py:function:tokens_all/tokens_all | def tokens_all(documents):
"""
Return a list of tokens from all documents.
"""
tokens = set()
for doc in documents:
tokens.update(doc)
return sorted(tokens)
|
passa | passa//internals/utils.pyfile:/internals/utils.py:function:strip_extras/strip_extras | def strip_extras(requirement):
"""Returns a new requirement object with extras removed.
"""
line = requirement.as_line()
new = type(requirement).from_line(line)
new.extras = None
return new
|
fake-blender-api-2.79-0.3.1 | fake-blender-api-2.79-0.3.1//bpy/ops/graph.pyfile:/bpy/ops/graph.py:function:previewrange_set/previewrange_set | def previewrange_set():
"""Automatically set Preview Range based on range of keyframes
"""
pass
|
djaodjin-saas-0.8.4 | djaodjin-saas-0.8.4//saas/utils.pyfile:/saas/utils.py:function:full_name_natural_split/full_name_natural_split | def full_name_natural_split(full_name, middle_initials=True):
"""
This function splits a full name into a natural first name, last name
and middle initials.
"""
parts = full_name.strip().split(' ')
first_name = ''
if parts:
first_name = parts.pop(0)
if first_name.lower() == 'el' and parts:
first_name += ' ' + parts.pop(0)
last_name = ''
if parts:
last_name = parts.pop()
if last_name.lower() == 'i' or last_name.lower(
) == 'ii' or last_name.lower() == 'iii' and parts:
last_name = parts.pop() + ' ' + last_name
if middle_initials:
mid_name = ''
for middle_name in parts:
if middle_name:
mid_name += middle_name[0]
else:
mid_name = ' '.join(parts)
return first_name, mid_name, last_name
|
genemethods-0.0.0.16 | genemethods-0.0.0.16//genemethods/pointfinder/PointFinder.pyfile:/genemethods/pointfinder/PointFinder.py:function:find_nuc_indel/find_nuc_indel | def find_nuc_indel(gapped_seq, indel_seq):
"""
This function finds the entire indel missing in from a gapped sequence
compared to the indel_seqeunce. It is assumes that the sequences start
with the first position of the gap.
"""
ref_indel = indel_seq[0]
for j in range(1, len(gapped_seq)):
if gapped_seq[j] == '-':
ref_indel += indel_seq[j]
else:
break
return ref_indel
|
PyProbe-0.1.2 | PyProbe-0.1.2//pyprobe/ffprobeparsers.pyclass:ChapterParser/value_title | @staticmethod
def value_title(data):
"""Returns a string"""
info = data.get('tags', {}).get('title', None)
return info, info or 'null'
|
tpDcc-libs-python-0.0.6 | tpDcc-libs-python-0.0.6//tpDcc/libs/python/python.pyfile:/tpDcc/libs/python/python.py:function:is_none/is_none | def is_none(s):
"""
Returns True if the given object has None type or False otherwise
:param s: object
:return: bool
"""
return type(s).__name__ == 'NoneType'
|
wagtail-2.9 | wagtail-2.9//wagtail/images/utils.pyfile:/wagtail/images/utils.py:function:parse_color_string/parse_color_string | def parse_color_string(color_string):
"""
Parses a string a user typed into a tuple of 3 integers representing the
red, green and blue channels respectively.
May raise a ValueError if the string cannot be parsed.
The colour string must be a CSS 3 or 6 digit hex code without the '#' prefix.
"""
if len(color_string) == 3:
r = int(color_string[0], 16) * 17
g = int(color_string[1], 16) * 17
b = int(color_string[2], 16) * 17
elif len(color_string) == 6:
r = int(color_string[0:2], 16)
g = int(color_string[2:4], 16)
b = int(color_string[4:6], 16)
else:
raise ValueError(
'Color string must be either 3 or 6 hexadecimal digits long')
return r, g, b
|
liquidSVM | liquidSVM//doc.pyfile:/doc.py:function:trainArgs/trainArgs | def trainArgs():
"""Arguments for SVM.train
- ``f=c(<kind>,<number>,[<train_fraction>],[<neg_fraction>])``
Selects the fold generation method and the number of folds. If < 1.0,
then the folds for training are generated from a subset with the
specified size and the remaining samples are used for validation.
Meaning of specific values: = 1 => each fold is a contiguous block =
2 => alternating fold assignmend = 3 => random = 4 => stratified
random = 5 => random subset ( and required)
Allowed values: : integer between 1 and 5 : integer >= 1 : float >
0.0 and <= 1.0 : float > 0.0 and < 1.0
Default values: = 3 = 5 = 1.00
- ``g=c(<size>,<min_gamma>,<max_gamma>,[<scale>])``
- ``g=<gamma_list>``
The first variant sets the size of the gamma grid and its endpoints
and . The second variant uses for the gamma grid.
Meaning of specific values: Flag indicating whether and are scaled
based on the sample size, the dimension, and the diameter.
Allowed values: : integer >= 1 : float > 0.0 : float > 0.0 : bool
Default values: = 10 = 0.200 = 5.000 = 1
- ``GPU=<gpus>``
Sets the number of GPUs that are going to be used. Currently, there
is no checking whether your system actually has many GPUs. In
addition, the number of used threads is reduced to .
Allowed values: : integer between 0 and ???
Default values: = 0
Unfortunately, this option is not activated for the binaries you are
currently using. Install CUDA and recompile to activate this option.
- ``h=[<level>]``
Displays all help messages.
Meaning of specific values: = 0 => short help messages = 1 =>
detailed help messages
Allowed values: : 0 or 1
Default values: = 0
- ``i=c(<cold>,<warm>)``
Selects the cold and warm start initialization methods of the solver.
In general, this option should only be used in particular situations
such as the implementation and testing of a new solver or when using
the kernel cache.
Meaning of specific values: For values between 0 and 6, both and have
the same meaning taken from Steinwart et al, 'Training SVMs without
offset', JMLR 2011. These are: 0 Sets all coefficients to zero. 1
Sets all coefficients to C. 2 Uses the coefficients of the previous
solution. 3 Multiplies all coefficients by C\\_new/C\\_old. 4
Multiplies all unbounded SVs by C\\_new/C\\_old. 5 Multiplies all
coefficients by C\\_old/C\\_new. 6 Multiplies all unbounded SVs by
C\\_old/C\\_new.
Allowed values: Depends on the solver, but the range of is always a
subset of the range of .
Default values: Depending on the solver, the (hopefully) most
efficient method is chosen.
- ``k=c(<type>,[aux-file],[<Tr_mm_Pr>,[<size_P>],<Tr_mm>,[<size>],<Va_mm_Pr>,<Va_mm>])``
Selects the type of kernel and optionally the memory model for the
kernel matrices.
Meaning of specific values: = 0 => Gaussian RBF = 1 => Poisson = 2 =>
Experimental hierarchical Gauss kernel => Name of the file that
contains additional information for the hierarchical Gauss kernel.
Only this kernel type requires this option. = 0 => not contiguously
stored matrix = 1 => contiguously stored matrix = 2 => cached matrix
= 3 => no matrix stored => size of kernel cache in MB Here, X=Tr
stands for the training matrix and X=Va for the validation matrix. In
both cases, Y=Pr stands for the pre-kernel matrix, which stores the
distances between the samples. If is set, then the other three flags
need to be set, too. The values must only be set if a cache is
chosen. NOTICE: Not all possible combinations are allowed.
Allowed values: : integer between 0 and 2 : integer between 0 and 3 :
integer not smaller than 1
Default values: = 0 = 1 = 1024 = 512
- ``l=c(<size>,<min_lambda>,<max_lambda>,[<scale>])``
- ``l=c(<lambda_list>,[<interpret_as_C>])``
The first variant sets the size of the lambda grid and its endpoints
and . The second variant uses , after ordering, for the lambda grid.
Meaning of specific values: Flag indicating whether is internally
devided by the average number of samples per fold. Flag indicating
whether the lambda list should be interpreted as a list of C values
Allowed values: : integer >= 1 : float > 0.0 : float > 0.0 : bool :
bool
Default values: = 10 = 0.001 = 0.100 = 1 = 0
- ``L=c(<loss>,[<clipp>],[<neg_weight>,<pos_weight>])``
Sets the loss that is used to compute empirical errors. The optional
value specifies where the predictions are clipped during validation.
The optional weights can only be set if specifies a loss that has
weights.
Meaning of specific values: = 0 => binary classification loss = 2 =>
least squares loss = 3 => weighted least squares loss = 4 => pinball
loss = 5 => your own template loss = -1.0 => clipp at smallest
possible value (depends on labels) = 0.0 => no clipping is applied
Allowed values: : values listed above : float >= -1.0 : float > 0.0 :
float > 0.0
Default values: = native loss of solver chosen by option -S = -1.000
= set by option -W = set by option -W
- ``P=c(1,[<size>])``
- ``P=c(2,[<number>])``
- ``P=c(3,[<radius>],[<subset_size>])``
- ``P=c(4,[<size>],[<reduce>],[<subset_size>])``
- ``P=c(5,[<size>],[<ignore_fraction>],[<subset_size>],[<covers>])``
Selects the working set partition method.
Meaning of specific values: = 0 => do not split the working sets = 1
=> split the working sets in random chunks using maximum of each
chunk. Default values are: = 2000 = 2 => split the working sets in
random chunks using of chunks. Default values are: = 10 = 3 => split
the working sets by Voronoi subsets using . If [subset\\_size] is set,
a subset of this size is used to faster create the Voronoi partition.
If subset\\_size == 0, the entire data set is used. Default values
are: = 1.000 = 0 = 4 => split the working sets by Voronoi subsets
using . The optional controls whether a heuristic to reduce the
number of cells is used. If [subset\\_size] is set, a subset of this
size is used to faster create the Voronoi partition. If subset\\_size
== 0, the entire data set is used. Default values are: = 2000 = 1 =
20000 = 5 => devide the working sets into overlapping regions of size
. The process of creating regions is stopped when \\* samples have not
been assigned to a region. These samples will then be assigned to the
closest region. If is set, a subset of this size is used to find the
regions. If subset\\_size == 0, the entire data set is used. Finally,
controls the number of times the process of finding regions is
repeated. Default values are:. = 2000 = 0.5 = 20000 = 1
Allowed values: : integer between 0 and 5 : positive integer :
positive integer : positive real : positive integer : bool : positive
integer
Default values: = 0
- ``r=<seed>``
Initializes the random number generator with .
Meaning of specific values: = -1 => a random seed based on the
internal timer is used
Allowed values: : integer between -1 and 2147483647
Default values: = -1
- ``s=c(<clipp>,[<stop_eps>])``
Sets the value at which the loss is clipped in the solver to . The
optional parameter sets the threshold in the stopping criterion of
the solver.
Meaning of specific values: = -1.0 => Depending on the solver type
clipp either at the smallest possible value (depends on labels), or
do not clipp. = 0.0 => no clipping is applied
Allowed values: : -1.0 or float >= 0.0. In addition, if > 0.0, then
must not be smaller than the largest absolute value of the samples. :
float > 0.0
Default values: = -1.0 = 0.0010
- ``S=c(<solver>,[<NNs>])``
Selects the SVM solver and the number of nearest neighbors used in
the working set selection strategy (2D-solvers only).
Meaning of specific values: = 0 => kernel rule for classification = 1
=> LS-SVM with 2D-solver = 2 => HINGE-SVM with 2D-solver = 3 =>
QUANTILE-SVM with 2D-solver = 4 => EXPECTILE-SVM with 2D-solver = 5
=> Your SVM solver implemented in template\\_svm.\\*
Allowed values: : integer between 0 and 5 : integer between 0 and 100
Default values: = 2 = depends on the solver
- ``T=<threads>``
Sets the number of threads that are going to be used. Each thread is
assigned to a logical processor on the system, so that the number of
allowed threads is bounded by the number of logical processors. On
systems with activated hyperthreading each physical core runs one
thread, if does not exceed the number of physical cores. Since hyper-
threads on the same core share resources, using more threads than
cores does usually not increase the performance significantly, and
may even decrease it.
Meaning of specific values: = 0 => 4 threads are used (all physical
cores run one thread) = -1 => 3 threads are used (all but one of the
physical cores run one thread)
Allowed values: : integer between -1 and 4
Default values: = 0
- ``w=c(<neg_weight>,<pos_weight>)``
- ``w=c(<min_weight>,<max_weight>,<size>,[<geometric>,<swap>])``
- ``w=c(<weight_list>,[<swap>])``
Sets values for the weights, solvers should be trained with. For
solvers that do not have weights this option is ignored. The first
variants sets a pair of values. The second variant computes a
sequence of weights of length . The third variant takes the list of
weights.
Meaning of specific values: = 1 => is the negative weight and is the
positive weight. > 1 => many pairs are computed, where the positive
weights are between and and the negative weights are 1 - pos\\_weight.
Flag indicating whether the intermediate positive weights are
geometrically or arithmetically distributed. Flag indicating whether
the role of the positive and negative weights are interchanged.
Allowed values: <... weight ...>: float > 0.0 and < 1.0 : integer > 0
: bool : bool
Default values: = 1.0 = 1.0 = 1 = 0 = 0
- ``W=<type>``
Selects the working set selection method.
Meaning of specific values: = 0 => take the entire data set = 1 =>
multiclass 'all versus all' = 2 => multiclass 'one versus all' = 3 =>
bootstrap with resamples of size
Allowed values: : integer between 0 and 3
Default values: = 0
"""
|
patroni | patroni//watchdog/base.pyclass:WatchdogBase/has_set_timeout | @staticmethod
def has_set_timeout():
"""Returns True if setting a timeout is supported."""
return False
|
onnx-coreml-1.3 | onnx-coreml-1.3//onnx_coreml/_operators_nd.pyfile:/onnx_coreml/_operators_nd.py:function:_convert_randomnormallike/_convert_randomnormallike | def _convert_randomnormallike(builder, node, graph, err):
"""
convert to CoreML Random Normal Like Layer:
https://github.com/apple/coremltools/blob/655b3be5cc0d42c3c4fa49f0f0e4a93a26b3e492/mlmodel/format/NeuralNetwork.proto#L4434
"""
mean = node.attributes.get('mean', 0.0)
scale = node.attributes.get('scale', 1.0)
seed = node.attributes.get('seed', -1)
builder.add_random_normal_like(name=node.name, input_name=node.inputs[0
], output_name=node.outputs[0], mean=mean, stddev=scale, seed=seed)
|
multiprocess-0.70.9 | multiprocess-0.70.9//py3.4/multiprocess/reduction.pyclass:ForkingPickler/register | @classmethod
def register(cls, type, reduce):
"""Register a reduce function for a type."""
cls._extra_reducers[type] = reduce
|
rh-elliott-1.0.4 | rh-elliott-1.0.4//elliottlib/bzutil.pyfile:/elliottlib/bzutil.py:function:is_viable_bug/is_viable_bug | def is_viable_bug(bug_obj):
""" Check if a bug is viable to attach to an advisory.
A viable bug must be in one of MODIFIED and VERIFIED status.
:param bug_obj: bug object
:returns: True if viable
"""
return bug_obj.status in ['MODIFIED', 'VERIFIED']
|
eclcli | eclcli//monitoring/monitoringclient/ecl/common/apiclient/auth.pyclass:BaseAuthPlugin/add_common_opts | @classmethod
def add_common_opts(cls, parser):
"""Add options that are common for several plugins.
"""
for opt in cls.common_opt_names:
cls._parser_add_opt(parser, opt)
|
template_utils-0.4p2 | template_utils-0.4p2//template_utils/markup.pyfile:/template_utils/markup.py:function:markdown/markdown | def markdown(text, **kwargs):
"""
Applies Markdown conversion to a string, and returns the HTML.
"""
import markdown
return markdown.markdown(text, **kwargs)
|
envfiles | envfiles//envfiles.pyfile:/envfiles.py:function:process_value/process_value | def process_value(value: str) ->str:
"""Returns a processed value for an environment variable."""
if len(value) > 0 and value[0] == value[-1] == '"':
return value[1:-1]
return value
|
hearts-0.0.0 | hearts-0.0.0//hearts/axis.pyfile:/hearts/axis.py:function:tick_format_function/tick_format_function | def tick_format_function(value, index, tick_count):
"""
This example shows how to define a function to format tick values for
display.
:param x:
The value to be formatted.
:param index:
The index of the tick.
:param tick_count:
The total number of ticks being displayed.
:returns:
A stringified tick value for display.
"""
return str(value)
|
ADvis | ADvis//ADgraph.pyfile:/ADgraph.py:function:reverse_graph/reverse_graph | def reverse_graph(y):
""" Function to create a dictionary containing edges of y reversed.
INPUTS
======
y : ADnum
OUTPUTS
=======
A dictionary
"""
d = y.graph
parents = {}
for key in d:
neighbors = d[key]
for neighbor in neighbors:
if neighbor[0] not in parents:
parents[neighbor[0]] = []
parents[neighbor[0]].append((key, neighbor[1]))
return parents
|
dlcliche | dlcliche//utils.pyfile:/utils.py:function:df_apply_sns_color_map/df_apply_sns_color_map | def df_apply_sns_color_map(df, color='red', **kwargs):
"""Set color map to a dataframe.
Thanks to https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html
"""
import seaborn as sns
cm = sns.light_palette(color, as_cmap=True, **kwargs)
df = df.copy()
return df.style.background_gradient(cmap=cm)
|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/rds.pyfile:/pyboto3/rds.py:function:copy_db_snapshot/copy_db_snapshot | def copy_db_snapshot(SourceDBSnapshotIdentifier=None,
TargetDBSnapshotIdentifier=None, KmsKeyId=None, Tags=None, CopyTags=
None, PreSignedUrl=None, SourceRegion=None):
"""
Copies the specified DB snapshot. The source DB snapshot must be in the "available" state.
To copy a DB snapshot from a shared manual DB snapshot, SourceDBSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared DB snapshot.
You can copy an encrypted DB snapshot from another AWS region. In that case, the region where you call the CopyDBSnapshot action is the destination region for the encrypted DB snapshot to be copied to. To copy an encrypted DB snapshot from another region, you must provide the following values:
To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process .
For more information on copying encrypted snapshots from one region to another, see Copying a DB Snapshot in the Amazon RDS User Guide.
See also: AWS API Documentation
Examples
This example copies a DB snapshot.
Expected Output:
:example: response = client.copy_db_snapshot(
SourceDBSnapshotIdentifier='string',
TargetDBSnapshotIdentifier='string',
KmsKeyId='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
CopyTags=True|False,
SourceRegion='string'
)
:type SourceDBSnapshotIdentifier: string
:param SourceDBSnapshotIdentifier: [REQUIRED]
The identifier for the source DB snapshot.
If you are copying from a shared manual DB snapshot, this must be the ARN of the shared DB snapshot.
You cannot copy an encrypted, shared DB snapshot from one AWS region to another.
Constraints:
Must specify a valid system snapshot in the 'available' state.
If the source snapshot is in the same region as the copy, specify a valid DB snapshot identifier.
If the source snapshot is in a different region than the copy, specify a valid DB snapshot ARN. For more information, go to Copying a DB Snapshot or DB Cluster Snapshot .
Example: rds:mydb-2012-04-02-00-01
Example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805
:type TargetDBSnapshotIdentifier: string
:param TargetDBSnapshotIdentifier: [REQUIRED]
The identifier for the copied snapshot.
Constraints:
Cannot be null, empty, or blank
Must contain from 1 to 255 alphanumeric characters or hyphens
First character must be a letter
Cannot end with a hyphen or contain two consecutive hyphens
Example: my-db-snapshot
:type KmsKeyId: string
:param KmsKeyId: The AWS KMS key ID for an encrypted DB snapshot. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.
If you copy an unencrypted DB snapshot and specify a value for the KmsKeyId parameter, Amazon RDS encrypts the target DB snapshot using the specified KMS encryption key.
If you copy an encrypted DB snapshot from your AWS account, you can specify a value for KmsKeyId to encrypt the copy with a new KMS encryption key. If you don't specify a value for KmsKeyId , then the copy of the DB snapshot is encrypted with the same KMS key as the source DB snapshot.
If you copy an encrypted snapshot to a different AWS region, then you must specify a KMS key for the destination AWS region.
If you copy an encrypted DB snapshot that is shared from another AWS account, then you must specify a value for KmsKeyId .
To copy an encrypted DB snapshot to another region, you must set KmsKeyId to the KMS key ID used to encrypt the copy of the DB snapshot in the destination region. KMS encryption keys are specific to the region that they are created in, and you cannot use encryption keys from one region in another region.
:type Tags: list
:param Tags: A list of tags.
(dict) --Metadata assigned to an Amazon RDS resource consisting of a key-value pair.
Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').
Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with 'aws:' or 'rds:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').
:type CopyTags: boolean
:param CopyTags: True to copy all tags from the source DB snapshot to the target DB snapshot; otherwise false. The default is false.
:type PreSignedUrl: string
:param PreSignedUrl: The URL that contains a Signature Version 4 signed request for the CopyDBSnapshot API action in the AWS region that contains the source DB snapshot to copy. The PreSignedUrl parameter must be used when copying an encrypted DB snapshot from another AWS region.
The presigned URL must be a valid request for the CopyDBSnapshot API action that can be executed in the source region that contains the encrypted DB snapshot to be copied. The presigned URL request must contain the following parameter values:
DestinationRegion - The AWS Region that the encrypted DB snapshot will be copied to. This region is the same one where the CopyDBSnapshot action is called that contains this presigned URL. For example, if you copy an encrypted DB snapshot from the us-west-2 region to the us-east-1 region, then you will call the CopyDBSnapshot action in the us-east-1 region and provide a presigned URL that contains a call to the CopyDBSnapshot action in the us-west-2 region. For this example, the DestinationRegion in the presigned URL must be set to the us-east-1 region.
KmsKeyId - The KMS key identifier for the key to use to encrypt the copy of the DB snapshot in the destination region. This is the same identifier for both the CopyDBSnapshot action that is called in the destination region, and the action contained in the presigned URL.
SourceDBSnapshotIdentifier - The DB snapshot identifier for the encrypted snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source region. For example, if you are copying an encrypted DB snapshot from the us-west-2 region, then your SourceDBSnapshotIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20161115 .
To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process .
Please note that this parameter is automatically populated if it is not provided. Including this parameter is not required
:type SourceRegion: string
:param SourceRegion: The ID of the region that contains the snapshot to be copied.
:rtype: dict
:return: {
'DBSnapshot': {
'DBSnapshotIdentifier': 'string',
'DBInstanceIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Engine': 'string',
'AllocatedStorage': 123,
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'VpcId': 'string',
'InstanceCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'EngineVersion': 'string',
'LicenseModel': 'string',
'SnapshotType': 'string',
'Iops': 123,
'OptionGroupName': 'string',
'PercentProgress': 123,
'SourceRegion': 'string',
'SourceDBSnapshotIdentifier': 'string',
'StorageType': 'string',
'TdeCredentialArn': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'DBSnapshotArn': 'string',
'Timezone': 'string',
'IAMDatabaseAuthenticationEnabled': True|False
}
}
:returns:
TargetDBSnapshotIdentifier - The identifier for the new copy of the DB snapshot in the destination region.
SourceDBSnapshotIdentifier - The DB snapshot identifier for the encrypted snapshot to be copied. This identifier must be in the ARN format for the source region and is the same value as the SourceDBSnapshotIdentifier in the presigned URL.
"""
pass
|
plone.server-1.0a16 | plone.server-1.0a16//src/plone.server/plone/server/interfaces/security.pyclass:IPrincipalRoleMap/get_principals_and_roles | def get_principals_and_roles():
"""Get all settings.
Return all the principal/role combinations along with the
setting for each combination as a sequence of tuples with the
role id, principal id, and setting, in that order.
"""
|
edgedb-0.7.1 | edgedb-0.7.1//edgedb/pgproto/types.pyclass:BitString/from_int | @classmethod
def from_int(cls, x, length, bitorder='big', *, signed=False):
"""Represent the Python int x as a BitString.
Acts similarly to int.to_bytes.
:param int x:
An integer to represent. Negative integers are represented in two's
complement form, unless the argument signed is False, in which case
negative integers raise an OverflowError.
:param int length:
The length of the resulting BitString. An OverflowError is raised
if the integer is not representable in this many bits.
:param bitorder:
Determines the bit order used in the BitString representation. By
default, this function uses Postgres conventions for casting ints
to bits. If bitorder is 'big', the most significant bit is at the
start of the string (this is the same as the default). If bitorder
is 'little', the most significant bit is at the end of the string.
:param bool signed:
Determines whether two's complement is used in the BitString
representation. If signed is False and a negative integer is given,
an OverflowError is raised.
:return BitString: A BitString representing the input integer, in the
form specified by the other input args.
.. versionadded:: 0.18.0
"""
if length < 0:
raise ValueError('length argument must be non-negative')
elif length < x.bit_length():
raise OverflowError('int too big to convert')
if x < 0:
if not signed:
raise OverflowError("can't convert negative int to unsigned")
x &= (1 << length) - 1
if bitorder == 'big':
pass
elif bitorder == 'little':
x = int(bin(x)[:1:-1].ljust(length, '0'), 2)
else:
raise ValueError("bitorder must be either 'big' or 'little'")
x <<= -length % 8
bytes_ = x.to_bytes((length + 7) // 8, byteorder='big')
return cls.frombytes(bytes_, length)
|
docx | docx//shared.pyclass:RGBColor/from_string | @classmethod
def from_string(cls, rgb_hex_str):
"""
Return a new instance from an RGB color hex string like ``'3C2F80'``.
"""
r = int(rgb_hex_str[:2], 16)
g = int(rgb_hex_str[2:4], 16)
b = int(rgb_hex_str[4:], 16)
return cls(r, g, b)
|
aiobitcoin-0.75.14 | aiobitcoin-0.75.14//aiobitcoin/tools/encoding.pyfile:/aiobitcoin/tools/encoding.py:function:is_sec_compressed/is_sec_compressed | def is_sec_compressed(sec):
"""Return a boolean indicating if the sec represents a compressed public key."""
return sec[:1] in (b'\x02', b'\x03')
|
ioos_tools | ioos_tools//ioos.pyfile:/ioos.py:function:to_html/to_html | def to_html(df, css=None):
"""
Return a pandas table HTML representation with the datagrid css.
Examples
--------
>>> from IPython.display import HTML
>>> from pandas import DataFrame
>>> df = DataFrame(np.empty((5, 5)))
>>> html = to_html(df)
>>> isinstance(html, HTML)
True
"""
from IPython.display import HTML
if css:
style = f'<style>{css}</style>'
else:
style = ''
table = {'style': style, 'table': df.to_html()}
return HTML(f'{style}<div class="datagrid">{table}</div>')
|
sympathy | sympathy//typeutils/table.pyfile:/typeutils/table.py:function:table_sql/table_sql | def table_sql():
"""Avoid pre-loading table_sql."""
from . import table_sql
return table_sql
|
dit | dit//utils/misc.pyfile:/utils/misc.py:function:partitions2/partitions2 | def partitions2(n):
"""
Generates all partitions of {1,...,n}.
For n=12, this finishes in 4.48 seconds.
"""
if n == 0:
yield [[]]
elif n == 1:
yield [[0]]
else:
a = [0] * (n + 1)
b = [1] * n
m = 1
while True:
yield a[1:]
if a[n] == m:
j = n - 1
while a[j] == b[j]:
j -= 1
if j == 1:
break
else:
a[j] += 1
m = b[j]
if a[j] == b[j]:
m += 1
j += 1
while j < n:
a[j] = 0
b[j] = m
j += 1
a[n] = 0
else:
a[n] += 1
|
dolmen.collection-0.3 | dolmen.collection-0.3//src/dolmen/collection/interfaces.pyclass:ICollection/append | def append(component):
"""Add a new component to the collection. Modify the current
collection.
"""
|
polyglot | polyglot//transliteration/base.pyclass:Transliterator/_dummy_coder | @staticmethod
def _dummy_coder(word):
"""Returns the string as it is, no transliteration is done."""
return word
|
alignak | alignak//external_command.pyclass:ExternalCommandManager/remove_host_acknowledgement | @staticmethod
def remove_host_acknowledgement(host):
"""Remove an acknowledgment on a host
Format of the line that triggers function call::
REMOVE_HOST_ACKNOWLEDGEMENT;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
"""
host.unacknowledge_problem()
|
charm-tools-2.7.3 | charm-tools-2.7.3//charmtools/utils.pyfile:/charmtools/utils.py:function:retry/retry | def retry(attempts, *callbacks, **kwargs):
"""
Repeatedly try callbacks a fixed number of times or until all return True
"""
for attempt in range(attempts):
if 'bar' in kwargs:
kwargs['bar'].next(attempt == 0, message=kwargs.get('message'))
for callback in callbacks:
if not callback():
break
else:
break
else:
raise OSError('Retry attempts exceeded')
return True
|
pyMosaic-0.4.0 | pyMosaic-0.4.0//lib/mosaic_pdb/import_structure.pyfile:/lib/mosaic_pdb/import_structure.py:function:ss_bridges/ss_bridges | def ss_bridges(structure, asym_id_1, asym_id_2):
"""
Iterate over the disulfide bridges in the structure.
"""
for (asym1, comp1, seq1), (asym2, comp2, seq2) in structure.ss_bridges:
if asym1 == asym_id_1 and asym2 == asym_id_2:
yield seq1, seq2
elif asym1 == asym_id_2 and asym2 == asym_id_1:
yield seq2, seq1
|
xrayutilities | xrayutilities//simpack/darwin_theory.pyclass:DarwinModelAlGaAs001/poisson_ratio | @staticmethod
def poisson_ratio(x):
"""
calculate the Poisson ratio of the alloy
"""
return 2 * (5.38 + 0.32 * x) / (11.88 + 0.14 * x)
|
IPython | IPython//core/magic_arguments.pyfile:/core/magic_arguments.py:function:parse_argstring/parse_argstring | def parse_argstring(magic_func, argstring):
""" Parse the string of arguments for the given magic function.
"""
return magic_func.parser.parse_argstring(argstring)
|
silva.core.views-3.0.2 | silva.core.views-3.0.2//src/silva/core/views/interfaces.pyclass:IHTTPResponseHeaders/cache_headers | def cache_headers():
""" Set the cache and Last modified settings.
"""
|
GEMEditor | GEMEditor//base/functions.pyfile:/base/functions.py:function:generate_copy_id/generate_copy_id | def generate_copy_id(base_id, collection, suffix='_copy'):
""" Generate a new id that is not present in collection
Parameters
----------
base_id: str, Original id while copying or New for new entries
collection: dict or list
suffix: str, Suffix that is added to the base id
Returns
-------
"""
composite_id = str(base_id) + suffix
new_id = composite_id
n = 0
while new_id in collection:
n += 1
new_id = composite_id + str(n)
return new_id
|
wc_utils | wc_utils//util/dict.pyclass:DictUtil/nested_get | @staticmethod
def nested_get(dict, keys, key_delimiter='.'):
""" Get the value of a nested dictionary at the nested key sequence `keys`
Args:
dict (:obj:`dict`): dictionary to retrieve value from
keys (:obj:`str` or :obj:`list`): list of nested keys to retrieve
key_delimiter (:obj:`str`, optional): delimiter for `keys`
Returns:
:obj:`object`: The value of `dict` from the nested keys list
"""
if isinstance(keys, str):
keys = keys.split(key_delimiter)
nested_dict = dict
for key in keys:
nested_dict = nested_dict[key]
return nested_dict
|
ondevice-0.3.1 | ondevice-0.3.1//ondevice/core/config.pyfile:/ondevice/core/config.py:function:invalidateCache/invalidateCache | def invalidateCache():
""" Invalidate the in-memory cache of the configuration
(forcing a re-read the next time any getter or setter is called) """
global _config
_config = None
|
asc | asc//pokecommands.pyfile:/pokecommands.py:function:dec_table/dec_table | def dec_table(cmds):
"""Make a decompilation table from a compilation table"""
dec_pkcommands = {}
for name, cmd in cmds.items():
if 'hex' in cmd:
dec_pkcommands[cmd['hex']] = name
return dec_pkcommands
|
trove | trove//guestagent/datastore/experimental/postgresql/pgsql_query.pyclass:UserQuery/create | @classmethod
def create(cls, name, password, encrypt_password=None, *options):
"""Query to create a user with a password."""
create_clause = 'CREATE USER "{name}"'.format(name=name)
with_clause = cls._build_with_clause(password, encrypt_password, *options)
return ' '.join([create_clause, with_clause])
|
verilogparser-0.23 | verilogparser-0.23//verilogparser/deductivelogic.pyfile:/verilogparser/deductivelogic.py:function:bufFuncD/bufFuncD | def bufFuncD(inputVector, inputVectorD, nodeName, Output):
"""
buf Function Deductive Logic
:param inputVector: input logic vector
:param inputVectorD: input stuck-at-fault vector
:param nodeName: output node name
:param Output: output node value
:return: list of stuck-at-fault
"""
result = []
result.extend(inputVectorD[0])
result.append(nodeName + '_' + str(1 - Output))
return result
|
Distutils2-py3-1.0a5.dev0 | Distutils2-py3-1.0a5.dev0//distutils2/database.pyfile:/distutils2/database.py:function:disable_cache/disable_cache | def disable_cache():
"""
Disables the internal cache.
Note that this function will not clear the cache in any case, for that
functionality see :func:`clear_cache`.
"""
global _cache_enabled
_cache_enabled = False
|
cvtoolss-0.0.5 | cvtoolss-0.0.5//cvtools/utils/misc.pyfile:/cvtools/utils/misc.py:function:slice_list/slice_list | def slice_list(in_list, lens):
"""Slice a list into several sub lists by a list of given length.
Args:
in_list (list): The list to be sliced.
lens(int or list): The expected length of each out list.
Returns:
list: A list of sliced list.
"""
if not isinstance(lens, list):
raise TypeError('"indices" must be a list of integers')
elif sum(lens) != len(in_list):
raise ValueError('sum of lens and list length does not match: {} != {}'
.format(sum(lens), len(in_list)))
out_list = []
idx = 0
for i in range(len(lens)):
out_list.append(in_list[idx:idx + lens[i]])
idx += lens[i]
return out_list
|
lsdviztools | lsdviztools//lsdplottingtools/lsdmap_basicmanipulation.pyfile:/lsdplottingtools/lsdmap_basicmanipulation.py:function:BasinKeyToJunction/BasinKeyToJunction | def BasinKeyToJunction(grouped_data_list, thisPointData):
"""This takes a basin_info_csv file (produced by several LSDTopoTools routies) and spits out lists of the junction numbers (it converts basin numbers to junction numbers).
Args:
grouped_data_list (int list): A list of list of basin numbers
thisPointData (str): A point data object with the basins
Returns:
Junction_grouped_list: the junction numbers of the basins.
Author: SMM
"""
thisJunctionData = thisPointData.QueryData('outlet_junction')
junction_grouped_list = []
if not grouped_data_list:
return grouped_data_list
else:
for group in grouped_data_list:
this_list = []
for element in group:
this_list.append(thisJunctionData[element])
junction_grouped_list.append(this_list)
print(junction_grouped_list)
return junction_grouped_list
|
jingo | jingo//ext.pyfile:/ext.py:function:field_attrs/field_attrs | def field_attrs(field_inst, **kwargs):
"""Adds html attributes to django form fields"""
for k, v in kwargs.items():
if v is not None:
field_inst.field.widget.attrs[k] = v
else:
try:
del field_inst.field.widget.attrs[k]
except KeyError:
pass
return field_inst
|
profile-viewer-0.1.5 | profile-viewer-0.1.5//versioneer.pyfile:/versioneer.py:function:plus_or_dot/plus_or_dot | def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if '+' in pieces.get('closest-tag', ''):
return '.'
return '+'
|
soupsieve-2.0 | soupsieve-2.0//soupsieve/css_match.pyclass:_DocumentNav/split_namespace | @staticmethod
def split_namespace(el, attr_name):
"""Return namespace and attribute name without the prefix."""
return getattr(attr_name, 'namespace', None), getattr(attr_name, 'name',
None)
|
SimPEG-0.13.1 | SimPEG-0.13.1//SimPEG/EM/NSEM/Utils/dataUtils.pyfile:/SimPEG/EM/NSEM/Utils/dataUtils.py:function:rec_to_ndarr/rec_to_ndarr | def rec_to_ndarr(rec_arr, data_type=float):
"""
Function to transform a numpy record array to a nd array.
"""
return rec_arr.copy().view((data_type, len(rec_arr.dtype.names)))
|
WaveGliDA-0.2.1 | WaveGliDA-0.2.1//WaveGliDA/calculations.pyfile:/WaveGliDA/calculations.py:function:temperature_correction/temperature_correction | def temperature_correction(equ_degC, sst_degC):
"""
pCO2 is corrected for equilibrator vs seawater temperatures
using the relationship determined in Takahashi (1993).
Here the Prawler CTD temperature is used in conjunction with
the Licor equilibrator temperature.
"""
from numpy import exp
delta_temp = sst_degC - equ_degC
temp_correct_factor = exp(0.0423 * delta_temp)
return temp_correct_factor
|
openpathsampling | openpathsampling//analysis/replica_network.pyfile:/analysis/replica_network.py:function:condense_repeats/condense_repeats | def condense_repeats(ll, use_is=True):
"""
Count the number of consecutive repeats in a list.
Essentially, a way of doing `uniq -c`
Parameters
----------
ll : list
a list
Returns
-------
list of tuples
list of 2-tuples in the format (element, repeats) where element is
the element from the list, and repeats is the number of consecutive
times it appeared
"""
count = 0
old = None
vals = []
for e in ll:
if use_is and e is old or not use_is and e == old:
count += 1
else:
if old is not None:
vals.append((old, count))
count = 1
old = e
vals.append((old, count))
return vals
|
_pytest | _pytest//python.pyclass:Class/from_parent | @classmethod
def from_parent(cls, parent, *, name, obj=None):
"""
The public constructor
"""
return super().from_parent(name=name, parent=parent)
|
mailman | mailman//interfaces/runner.pyclass:IRunner/_do_periodic | def _do_periodic():
"""Do some arbitrary periodic processing.
Called every once in a while both from the runner's main loop, and
from the runner's hash slice processing loop. You can do whatever
special periodic processing you want here.
"""
|
pyramid | pyramid//interfaces.pyclass:IAuthenticationPolicy/remember | def remember(request, userid, **kw):
""" Return a set of headers suitable for 'remembering' the
:term:`userid` named ``userid`` when set in a response. An
individual authentication policy and its consumers can
decide on the composition and meaning of ``**kw``.
"""
|
Pillow-7.1.2 | Pillow-7.1.2//src/PIL/Image.pyfile:/src/PIL/Image.py:function:fromqimage/fromqimage | def fromqimage(im):
"""Creates an image instance from a QImage image"""
from . import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError('Qt bindings are not installed')
return ImageQt.fromqimage(im)
|
cylleneus-0.5.8 | cylleneus-0.5.8//cylleneus/engine/highlight.pyfile:/cylleneus/engine/highlight.py:function:get_text/get_text | def get_text(original, token, replace):
"""Convenience function for getting the text to use for a match when
formatting.
If ``replace`` is False, returns the part of ``original`` between
``token.startchar`` and ``token.endchar``. If ``replace`` is True, returns
``token.text``.
"""
if replace:
return token.text
else:
return original[token.startchar:token.endchar]
|
mmlspark | mmlspark//recommendation/RecommendationIndexer.pyclass:RecommendationIndexer/getJavaPackage | @staticmethod
def getJavaPackage():
""" Returns package name String. """
return 'com.microsoft.ml.spark.recommendation.RecommendationIndexer'
|
dbschema-1.4.3 | dbschema-1.4.3//src/schema_change.pyfile:/src/schema_change.py:function:get_migration_source/get_migration_source | def get_migration_source(file):
""" Returns migration source code """
with open(file, 'r') as f:
return f.read()
|
ekmmeters-0.2.5 | ekmmeters-0.2.5//ekmmeters.pyfile:/ekmmeters.py:function:ekm_set_log_level/ekm_set_log_level | def ekm_set_log_level(level=3):
""" Set the logging level.
Args:
level (int): cutoff level (print at level and below).
"""
global ekmmeters_log_level
ekmmeters_log_level = level
pass
|
snntoolbox | snntoolbox//simulation/backends/inisim/ttfs_corrective.pyclass:SpikeConcatenate/reset | @staticmethod
def reset(sample_idx):
"""Reset layer variables."""
pass
|
PyGraphics-2.1 | PyGraphics-2.1//cpython/media.pyfile:/cpython/media.py:function:close_inspect/close_inspect | def close_inspect(obj):
"""Close an open inspector window for object obj. Works on Sound and
Picture objects."""
obj.close_inspect()
|
tache-0.2.1 | tache-0.2.1//tache/serializer.pyfile:/tache/serializer.py:function:_from_yaml/_from_yaml | def _from_yaml(stream):
"""Load data form a YAML file or string."""
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
data = load(stream, Loader=Loader)
return data
|
plone.z3cform-1.1.2 | plone.z3cform-1.1.2//src/plone/z3cform/crud/crud.pyclass:ICrudForm/add | def add(data):
"""Subclasses must implement this method to create an item for
the given `data` *and* add it to a container, and return it.
The `data` mapping corresponds to the schema returned by
`add_schema`.
May raise zope.schema.ValidationError to indicate that there's
a problem with the add form data.
"""
|
nbjekyll-0.1.1 | nbjekyll-0.1.1//nbjekyll/jekyllconvert/jekyll_export.pyfile:/nbjekyll/jekyllconvert/jekyll_export.py:function:jekyllpath/jekyllpath | def jekyllpath(path):
""" Take the filepath of an image output by the ExportOutputProcessor
and convert it into a URL we can use with Jekyll. This is passed to the exporter
as a filter to the exporter.
Note that this will be directly taken from the Jekyll _config.yml file
"""
return path.replace('./', '{{site.url}}{{site.baseurl}}/')
|
t4_geom_convert-0.3.2 | t4_geom_convert-0.3.2//t4_geom_convert/Kernel/Surface/Duplicates.pyfile:/t4_geom_convert/Kernel/Surface/Duplicates.py:function:remove_duplicate_surfaces/remove_duplicate_surfaces | def remove_duplicate_surfaces(surfs):
"""This function that detects duplicate surfaces from a surface dictionary,
removes them and provides a dictionary where the IDs of the deleted
surfaces are associated with the ID of the surface that replaced them."""
renumbering = {}
new_surfs = {}
tuple_surf_to_id = {}
n_surfs = len(surfs)
fmt_string = ('\rdetecting duplicates for surface {{:{}d}} ({{:3d}}%)'.
format(len(str(max(surfs)))))
for i, (key, (surf, aux)) in enumerate(sorted(surfs.items())):
percent = int(100.0 * i / (n_surfs - 1)) if n_surfs > 1 else 100
print(fmt_string.format(key, percent), end='', flush=True)
tuple_surf = surf.type_surface, tuple(surf.param_surface)
if tuple_surf in tuple_surf_to_id:
renumbering[key] = tuple_surf_to_id[tuple_surf]
else:
new_surfs[key] = surf, aux
renumbering[key] = key
tuple_surf_to_id[tuple_surf] = key
print('... done', flush=True)
n_surfs = len(new_surfs)
fmt_string = (
'\rrenumbering auxiliary surfaces for surface {{:{}d}} ({{:3d}}%)'.
format(len(str(max(new_surfs)))))
for i, (key, (_, aux)) in enumerate(new_surfs.items()):
percent = int(100.0 * i / (n_surfs - 1)) if n_surfs > 1 else 100
print(fmt_string.format(key, percent), end='', flush=True)
for i in range(len(aux)):
surf = aux[i]
if surf > 0:
aux[i] = renumbering[aux[i]]
else:
aux[i] = -renumbering[-aux[i]]
print('... done', flush=True)
return new_surfs, renumbering
|
ffffff | ffffff//_vendor/attr/_funcs.pyfile:/_vendor/attr/_funcs.py:function:has/has | def has(cls):
"""
Check whether *cls* is a class with ``attrs`` attributes.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:rtype: bool
"""
return getattr(cls, '__attrs_attrs__', None) is not None
|
stoqlib | stoqlib//gui/interfaces.pyclass:ISearchResultView/show | def show():
"""
Displays the result view
"""
|
fake-bpy-module-2.79-20200428 | fake-bpy-module-2.79-20200428//bpy/ops/clip.pyfile:/bpy/ops/clip.py:function:select_circle/select_circle | def select_circle(x: int=0, y: int=0, radius: int=1, gesture_mode: int=0):
"""Select markers using circle selection
:param x: X
:type x: int
:param y: Y
:type y: int
:param radius: Radius
:type radius: int
:param gesture_mode: Gesture Mode
:type gesture_mode: int
"""
pass
|
energy_demand-0.80 | energy_demand-0.80//energy_demand/profiles/hdd_cdd.pyfile:/energy_demand/profiles/hdd_cdd.py:function:get_meterological_equation_case_cdd/get_meterological_equation_case_cdd | def get_meterological_equation_case_cdd(t_min, t_max, t_base):
"""Calculatease number to calculate cdd with Meteorological
Office equations as outlined in Day (2006): Degree-days: theory and application
Arguments
---------
t_min : float
Minimum daily temperature
t_max : float
Maximum dail temperature
t_base : float
Base temperature
Return
-------
case_nr : int
Case number
"""
if t_max - t_base == t_base - t_min:
t_base += 0.0001
if t_min >= t_base:
return 1
elif t_max <= t_base:
return 4
elif t_max > t_base and t_max - t_base > t_base - t_min:
return 2
elif t_min < t_base and t_max - t_base < t_base - t_min:
return 3
else:
raise Exception(
'Error in calculating methorological office equation case {} {} {}'
.format(t_min, t_max, t_base))
|
weblate | weblate//vcs/git.pyclass:GitLabRepository/_get_version | @classmethod
def _get_version(cls):
"""Return VCS program version."""
return cls._popen(['--version'], merge_err=False).split()[-1]
|
combi | combi//_python_toolbox/dict_tools.pyfile:/_python_toolbox/dict_tools.py:function:sum_dicts/sum_dicts | def sum_dicts(dicts):
"""
Return the sum of a bunch of dicts i.e. all the dicts merged into one.
If there are any collisions, the latest dicts in the sequence win.
"""
result = {}
for dict_ in dicts:
result.update(dict_)
return result
|
uszipcode | uszipcode//pkg/sqlalchemy_mate/crud/selecting.pyfile:/pkg/sqlalchemy_mate/crud/selecting.py:function:count_row/count_row | def count_row(engine, table):
"""
Return number of rows in a table.
Example::
>>> count_row(engine, table_user)
3
**中文文档**
返回一个表中的行数。
"""
return engine.execute(table.count()).fetchone()[0]
|
xrayutilities | xrayutilities//simpack/darwin_theory.pyclass:DarwinModelGaInAs001/poisson_ratio | @staticmethod
def poisson_ratio(x):
"""
calculate the Poisson ratio of the alloy
"""
return 2 * (4.54 + 0.8 * x) / (8.34 + 3.56 * x)
|
googlenet_pytorch-0.3.0 | googlenet_pytorch-0.3.0//googlenet_pytorch/utils.pyfile:/googlenet_pytorch/utils.py:function:googlenet_params/googlenet_params | def googlenet_params(model_name):
""" Map VGGNet model name to parameter coefficients. """
params_dict = {'googlenet': (True, True, None, 224)}
return params_dict[model_name]
|
patoolib | patoolib//programs/p7zip.pyfile:/programs/p7zip.py:function:create_bzip2/create_bzip2 | def create_bzip2(archive, compression, cmd, verbosity, interactive, filenames):
"""Create a BZIP2 archive."""
cmdlist = [cmd, 'a']
if not interactive:
cmdlist.append('-y')
cmdlist.extend(['-tbzip2', '-mx=9', '--', archive])
cmdlist.extend(filenames)
return cmdlist
|
everest | everest//missions/tess/tess.pyfile:/missions/tess/tess.py:function:Breakpoints/Breakpoints | def Breakpoints(ID, cadence='lc', **kwargs):
"""
Returns the location of the breakpoints for a given target.
:param int ID: The target ID number
:param str cadence: The light curve cadence. Default `lc`
.. note :: The number corresponding to a given breakpoint is the number of cadences *since the beginning of the campaign*.
"""
raise NotImplementedError('This mission is not yet supported.')
|
monero_agent-2.0.6 | monero_agent-2.0.6//monero_glue/xmr/core/backend/keccak2.pyclass:KeccakState/lane2bytes | @staticmethod
def lane2bytes(s, w):
"""
Converts the lane s to a sequence of byte values,
assuming a lane is w bits.
"""
o = []
for b in range(0, w, 8):
o.append(s >> b & 255)
return o
|
matplotlibaux | matplotlibaux//matplotlibaux.pyfile:/matplotlibaux.py:function:format_legend/format_legend | def format_legend(leg):
"""Sets some formatting options in a matplotlib legend object."""
|
rattail-0.9.127 | rattail-0.9.127//rattail/util.pyfile:/rattail/util.py:function:progress_loop/progress_loop | def progress_loop(func, items, factory, *args, **kwargs):
"""
This will iterate over ``items`` and call ``func`` for each. If a progress
``factory`` kwarg is provided, then a progress instance will be created and
updated along the way.
"""
message = kwargs.pop('message', None)
count = kwargs.pop('count', None)
allow_cancel = kwargs.pop('allow_cancel', False)
if count is None:
try:
count = len(items)
except TypeError:
count = items.count()
if not count:
return True
prog = None
if factory:
prog = factory(message, count)
canceled = False
for i, item in enumerate(items, 1):
func(item, i, *args, **kwargs)
if prog and not prog.update(i):
canceled = True
break
if prog:
prog.finish()
if canceled and not allow_cancel:
raise RuntimeError('Operation was canceled')
return not canceled
|
azure-cli-sql-2.2.5 | azure-cli-sql-2.2.5//azure/cli/command_modules/sql/custom.pyfile:/azure/cli/command_modules/sql/custom.py:function:dw_pause/dw_pause | def dw_pause(client, database_name, server_name, resource_group_name):
"""
Pauses a datawarehouse.
"""
client.pause(server_name=server_name, resource_group_name=
resource_group_name, database_name=database_name).wait()
|
pyvim-3.0.2 | pyvim-3.0.2//pyvim/key_bindings.pyfile:/pyvim/key_bindings.py:function:_current_window_for_event/_current_window_for_event | def _current_window_for_event(event):
"""
Return the `Window` for the currently focussed Buffer.
"""
return event.app.layout.current_window
|
skoolkit-8.1 | skoolkit-8.1//skoolkit/graphics.pyfile:/skoolkit/graphics.py:function:flip_udgs/flip_udgs | def flip_udgs(udgs, flip=1):
"""Flip a 2D array of UDGs (instances of :class:`~skoolkit.graphics.Udg`).
:param udgs: The array of UDGs.
:param flip: 1 to flip horizontally, 2 to flip vertically, or 3 to flip
horizontally and vertically.
"""
if flip:
flipped_udgs = set()
for row in udgs:
for udg in row:
if id(udg) not in flipped_udgs:
udg.flip(flip)
flipped_udgs.add(id(udg))
if flip & 1:
for row in udgs:
row.reverse()
if flip & 2:
udgs.reverse()
|
nose | nose//ext/dtcompat.pyfile:/ext/dtcompat.py:function:_comment_line/_comment_line | def _comment_line(line):
"""Return a commented form of the given line"""
line = line.rstrip()
if line:
return '# ' + line
else:
return '#'
|
superset | superset//db_engine_specs.pyclass:BaseEngineSpec/mutate_label | @staticmethod
def mutate_label(label):
"""
Most engines support mixed case aliases that can include numbers
and special characters, like commas, parentheses etc. For engines that
have restrictions on what types of aliases are supported, this method
can be overridden to ensure that labels conform to the engine's
limitations. Mutated labels should be deterministic (input label A always
yields output label X) and unique (input labels A and B don't yield the same
output label X).
"""
return label
|
fake-bpy-module-2.80-20200428 | fake-bpy-module-2.80-20200428//bpy/ops/node.pyfile:/bpy/ops/node.py:function:view_selected/view_selected | def view_selected():
"""Resize view so you can see selected nodes
"""
pass
|
mxnet-1.6.0.data | mxnet-1.6.0.data//purelib/mxnet/_numpy_op_doc.pyfile:/purelib/mxnet/_numpy_op_doc.py:function:_np__random_shuffle/_np__random_shuffle | def _np__random_shuffle(x):
"""
Modify a sequence in-place by shuffling its contents.
This function only shuffles the array along the first axis of a
multi-dimensional array. The order of sub-arrays is changed but
their contents remain the same.
Parameters
----------
x: ndarray
The array or list to be shuffled.
Returns
-------
None
Examples
--------
>>> arr = np.arange(10)
>>> np.random.shuffle(arr)
>>> arr
array([5., 1., 0., 6., 7., 3., 9., 8., 4., 2.]) # random
Multi-dimensional arrays are only shuffled along the first axis:
>>> arr = np.arange(9).reshape((3, 3))
>>> np.random.shuffle(arr)
>>> arr
array([[6., 7., 8.], # random
[3., 4., 5.],
[0., 1., 2.]])
"""
pass
|
unofficial-superset-0.34.0 | unofficial-superset-0.34.0//superset/db_engine_specs/presto.pyclass:PrestoEngineSpec/_expand_row_data | @classmethod
def _expand_row_data(cls, datum: dict, column: str, column_hierarchy: dict
) ->None:
"""
Separate out nested fields and its value in a row of data
:param datum: row of data
:param column: row column name
:param column_hierarchy: dictionary tracking structural columns and its
nested fields
"""
if column in datum:
row_data = datum[column]
row_children = column_hierarchy[column]['children']
if row_data and len(row_data) != len(row_children):
raise Exception(
'The number of data values and number of nestedfields are not equal'
)
elif row_data:
for index, data_value in enumerate(row_data):
datum[row_children[index]] = data_value
else:
for row_child in row_children:
datum[row_child] = ''
|
kaidoku | kaidoku//help.pyfile:/help.py:function:welcommessage/welcommessage | def welcommessage(version):
"""Return welcome message."""
message = ('Kaidoku - player, solver and creater of sudoku puzzles.\n' +
""" https://sekika.github.io/kaidoku/
""" +
'Type h for help, c for showing a problem, q for quit.')
return message
|
horae.planning-1.0a1 | horae.planning-1.0a1//horae/planning/interfaces.pyclass:ICachedCalculator/clear_cache | def clear_cache(method=None):
""" Clear the cache of the calculator, optionally only for a given method
"""
|
zopkio-0.2.5 | zopkio-0.2.5//zopkio/runtime.pyfile:/zopkio/runtime.py:function:set_machines/set_machines | def set_machines(machines):
"""
Private function to set the machine mapping should only be called from the main file
:param machines:
:return:
"""
global _machine_names
_machine_names = machines
|
ogh-0.2.1 | ogh-0.2.1//tutorials/ogh.pyfile:/tutorials/ogh.py:function:compareonvar/compareonvar | def compareonvar(map_df, colvar='all'):
"""
subsetting a dataframe based on some columns of interest
map_df: (dataframe) the dataframe of the mappingfile table
colvar: (str or list) the column(s) to use for subsetting; 'None' returns an outerjoin, 'all' returns an innerjoin
"""
if isinstance(colvar, type(None)):
return map_df
if colvar is 'all':
return map_df.dropna()
else:
return map_df.dropna(subset=colvar)
|
Pepper-0.0.3 | Pepper-0.0.3//src/pepper/parser.pyfile:/src/pepper/parser.py:function:p_identifier_list_empty/p_identifier_list_empty | def p_identifier_list_empty(p):
"""
identifier_list :
"""
p[0] = []
|
msaf | msaf//utils.pyfile:/utils.py:function:get_num_frames/get_num_frames | def get_num_frames(dur, anal):
"""Given the duration of a track and a dictionary containing analysis
info, return the number of frames."""
total_samples = dur * anal['sample_rate']
return int(total_samples / anal['hop_size'])
|
bottle-streamline-1.0.post3 | bottle-streamline-1.0.post3//streamline/base.pyclass:RouteBase/get_path | @classmethod
def get_path(cls):
"""
Return the value of :py:attr:`~RouteBase.path` attribute.
"""
return cls.path
|
iris_grib | iris_grib//_save_rules.pyfile:/_save_rules.py:function:fixup_int32_as_uint32/fixup_int32_as_uint32 | def fixup_int32_as_uint32(value):
"""
Workaround for use when the ECMWF GRIB API treats a signed, 4-byte
integer value as an unsigned, 4-byte integer.
Returns the unsigned integer value which will result in the on-disk
representation corresponding to the signed, 4-byte integer value.
"""
value = int(value)
if -2147483647 <= value <= 2147483647:
if value < 0:
value = 2147483648 - value
else:
msg = '{} out of range -2147483647 to 2147483647.'.format(value)
raise ValueError(msg)
return value
|
dropbox | dropbox//team_log.pyclass:EventType/showcase_delete_comment | @classmethod
def showcase_delete_comment(cls, val):
"""
Create an instance of this class set to the ``showcase_delete_comment``
tag with value ``val``.
:param ShowcaseDeleteCommentType val:
:rtype: EventType
"""
return cls('showcase_delete_comment', val)
|
dropbox | dropbox//team_log.pyclass:AccessMethodLogInfo/content_manager | @classmethod
def content_manager(cls, val):
"""
Create an instance of this class set to the ``content_manager`` tag with
value ``val``.
:param WebSessionLogInfo val:
:rtype: AccessMethodLogInfo
"""
return cls('content_manager', val)
|