repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/inspector.pyfile:/pyboto3/inspector.py:function:describe_assessment_runs/describe_assessment_runs | def describe_assessment_runs(assessmentRunArns=None):
"""
Describes the assessment runs that are specified by the ARNs of the assessment runs.
See also: AWS API Documentation
Examples
Describes the assessment runs that are specified by the ARNs of the assessment runs.
Expected Output:
:example: response = client.describe_assessment_runs(
assessmentRunArns=[
'string',
]
)
:type assessmentRunArns: list
:param assessmentRunArns: [REQUIRED]
The ARN that specifies the assessment run that you want to describe.
(string) --
:rtype: dict
:return: {
'assessmentRuns': [
{
'arn': 'string',
'name': 'string',
'assessmentTemplateArn': 'string',
'state': 'CREATED'|'START_DATA_COLLECTION_PENDING'|'START_DATA_COLLECTION_IN_PROGRESS'|'COLLECTING_DATA'|'STOP_DATA_COLLECTION_PENDING'|'DATA_COLLECTED'|'START_EVALUATING_RULES_PENDING'|'EVALUATING_RULES'|'FAILED'|'ERROR'|'COMPLETED'|'COMPLETED_WITH_ERRORS',
'durationInSeconds': 123,
'rulesPackageArns': [
'string',
],
'userAttributesForFindings': [
{
'key': 'string',
'value': 'string'
},
],
'createdAt': datetime(2015, 1, 1),
'startedAt': datetime(2015, 1, 1),
'completedAt': datetime(2015, 1, 1),
'stateChangedAt': datetime(2015, 1, 1),
'dataCollected': True|False,
'stateChanges': [
{
'stateChangedAt': datetime(2015, 1, 1),
'state': 'CREATED'|'START_DATA_COLLECTION_PENDING'|'START_DATA_COLLECTION_IN_PROGRESS'|'COLLECTING_DATA'|'STOP_DATA_COLLECTION_PENDING'|'DATA_COLLECTED'|'START_EVALUATING_RULES_PENDING'|'EVALUATING_RULES'|'FAILED'|'ERROR'|'COMPLETED'|'COMPLETED_WITH_ERRORS'
},
],
'notifications': [
{
'date': datetime(2015, 1, 1),
'event': 'ASSESSMENT_RUN_STARTED'|'ASSESSMENT_RUN_COMPLETED'|'ASSESSMENT_RUN_STATE_CHANGED'|'FINDING_REPORTED'|'OTHER',
'message': 'string',
'error': True|False,
'snsTopicArn': 'string',
'snsPublishStatusCode': 'SUCCESS'|'TOPIC_DOES_NOT_EXIST'|'ACCESS_DENIED'|'INTERNAL_ERROR'
},
],
'findingCounts': {
'string': 123
}
},
],
'failedItems': {
'string': {
'failureCode': 'INVALID_ARN'|'DUPLICATE_ARN'|'ITEM_DOES_NOT_EXIST'|'ACCESS_DENIED'|'LIMIT_EXCEEDED'|'INTERNAL_ERROR',
'retryable': True|False
}
}
}
:returns:
(string) --
"""
pass
|
mountains-0.7.6 | mountains-0.7.6//mountains/django/model.pyfile:/mountains/django/model.py:function:model_to_select_list/model_to_select_list | def model_to_select_list(model_class, filter_dict=None, q_filter=None):
"""
只选择 id 和 name,用来做列表选择
:param model_class:
:param filter_dict:
:param q_filter:
:return:
"""
if filter_dict is None:
filter_dict = {}
if q_filter is not None:
filter_list = [q_filter]
else:
filter_list = []
objects = model_class.objects.filter(*filter_list, **filter_dict).values(
'id', 'name')
return list(objects)
|
regparser | regparser//tree/appendix/generic.pyfile:/tree/appendix/generic.py:function:is_title_case/is_title_case | def is_title_case(line):
"""Determine if a line is title-case (i.e. the first letter of every
word is upper-case. More readable than the equivalent all([]) form."""
for word in line.split(u' '):
if len(word) > 0 and len(word) > 3 and word[0] != word[0].upper():
return False
return True
|
xshg-1.0.0 | xshg-1.0.0//xshg.pyfile:/xshg.py:function:helloworld/helloworld | def helloworld():
"""
This is a sample py file.
print helloworld on screen.
"""
print('Hello, world.')
|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/organizations.pyfile:/pyboto3/organizations.py:function:create_organization/create_organization | def create_organization(FeatureSet=None):
"""
Creates an AWS organization. The account whose user is calling the CreateOrganization operation automatically becomes the master account of the new organization.
This operation must be called using credentials from the account that is to become the new organization's master account. The principal must also have the relevant IAM permissions.
By default (or if you set the FeatureSet parameter to ALL ), the new organization is created with all features enabled and service control policies automatically enabled in the root. If you instead choose to create the organization supporting only the consolidated billing features by setting the FeatureSet parameter to CONSOLIDATED_BILLING" , then no policy types are enabled by default and you cannot use organization policies.
See also: AWS API Documentation
:example: response = client.create_organization(
FeatureSet='ALL'|'CONSOLIDATED_BILLING'
)
:type FeatureSet: string
:param FeatureSet: Specifies the feature set supported by the new organization. Each feature set supports different levels of functionality.
CONSOLIDATED_BILLING : All member accounts have their bills consolidated to and paid by the master account. For more information, see Consolidated Billing in the AWS Organizations User Guide .
ALL : In addition to all the features supported by the consolidated billing feature set, the master account can also apply any type of policy to any member account in the organization. For more information, see All features in the AWS Organizations User Guide .
:rtype: dict
:return: {
'Organization': {
'Id': 'string',
'Arn': 'string',
'FeatureSet': 'ALL'|'CONSOLIDATED_BILLING',
'MasterAccountArn': 'string',
'MasterAccountId': 'string',
'MasterAccountEmail': 'string',
'AvailablePolicyTypes': [
{
'Type': 'SERVICE_CONTROL_POLICY',
'Status': 'ENABLED'|'PENDING_ENABLE'|'PENDING_DISABLE'
},
]
}
}
"""
pass
|
kipoi-0.6.25 | kipoi-0.6.25//kipoi/data.pyclass:BaseDataLoader/_add_description_factory | @classmethod
def _add_description_factory(cls, descr):
"""Factory method which populates the un-set class variables
Returns:
new dataloader class
"""
for field in ['type', 'defined_as', 'args', 'output_schema', 'info',
'dependencies', 'path', 'postprocessing']:
setattr(cls, field, getattr(descr, field))
return cls
|
qcore-nocython-0.3.0 | qcore-nocython-0.3.0//qcore/helpers.pyfile:/qcore/helpers.py:function:object_from_string/object_from_string | def object_from_string(st):
"""Creates a Python class or function from its fully qualified name.
:param st: A fully qualified name of a class or a function.
:return: A function or class object.
This method is used by serialization code to create a function or class
from a fully qualified name.
"""
pos = st.rfind('.')
if pos < 0:
raise ValueError('Invalid function or class name %s' % st)
module_name = st[:pos]
func_name = st[pos + 1:]
mod = __import__(module_name, fromlist=[func_name], level=0)
return getattr(mod, func_name)
|
xoeuf-0.79.0 | xoeuf-0.79.0//versioneer.pyfile:/versioneer.py:function:render_git_describe/render_git_describe | def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
return rendered
|
pcapng3-0.1.23 | pcapng3-0.1.23//pcapng/util.pyfile:/pcapng/util.py:function:take/take | def take(n, listy):
"""Return the first n values from a list or a generator"""
return list(listy)[:n]
|
nutsml-1.0.43 | nutsml-1.0.43//nutsml/transformer.pyclass:TransformImage/register | @classmethod
def register(cls, name, transformation):
"""
Register new transformation function.
>>> brighter = lambda image, c: image * c
>>> TransformImage.register('brighter', brighter)
>>> transform = TransformImage(0).by('brighter', 1.5)
:param string name: Name of transformation
:param function transformation: Transformation function.
"""
cls.transformations[name] = transformation
|
OFS | OFS//interfaces.pyclass:IWriteLock/wl_hasLock | def wl_hasLock(token, killinvalids=0):
""" Returns true if the lock identified by the token is attached
to the object. """
|
automate-report-1.0.2 | automate-report-1.0.2//automate/common/util/DateUtil.pyfile:/automate/common/util/DateUtil.py:function:gen_time_str/gen_time_str | def gen_time_str(t_info):
"""
从时间信息生成要访问的日志文件名
20150101
"""
year = str(t_info.tm_year)
if t_info.tm_mon < 10:
month = '0' + str(t_info.tm_mon)
else:
month = str(t_info.tm_mon)
if t_info.tm_mday < 10:
day = '0' + str(t_info.tm_mday)
else:
day = str(t_info.tm_mday)
return year + month + day
|
pyembroidery | pyembroidery//EmbThread.pyfile:/EmbThread.py:function:build_unique_palette/build_unique_palette | def build_unique_palette(thread_palette, threadlist):
"""Turns a threadlist into a unique index list with the thread palette"""
chart = [None] * len(thread_palette)
for thread in set(threadlist):
index = thread.find_nearest_color_index(thread_palette)
if index is None:
break
thread_palette[index] = None
chart[index] = thread
palette = []
for thread in threadlist:
palette.append(thread.find_nearest_color_index(chart))
return palette
|
gordon-janitor-0.0.1.dev7 | gordon-janitor-0.0.1.dev7//gordon_janitor/interfaces.pyclass:IPublisher/__init__ | def __init__(config, changes_channel, metrics=None):
"""Initialize a Publisher Plugin client."""
|
pyrin-0.3.16 | pyrin-0.3.16//src/pyrin/core/structs.pyclass:CLI/execute | @classmethod
def execute(cls, handler_name, **options):
"""
executes the handler with the given name with given inputs.
:param str handler_name: handler name to be executed.
:raises CLIHandlerNotFoundError: cli handler not found error.
:rtype: int
"""
return cls._execute_service(handler_name, **options)
|
shaolin | shaolin//dashboards/data_transforms.pyclass:DataFrameScaler/is_categorical_series | @staticmethod
def is_categorical_series(data):
"""true if data is a categorical series"""
return isinstance(data.values[0], str)
|
timestream | timestream//util/validation.pyfile:/util/validation.py:function:v_num_str/v_num_str | def v_num_str(x):
"""Validate an object that can be coerced to an ``int``."""
return int(x)
|
awsjar-0.2.11 | awsjar-0.2.11//versioneer.pyfile:/versioneer.py:function:scan_setup_py/scan_setup_py | def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open('setup.py', 'r') as f:
for line in f.readlines():
if 'import versioneer' in line:
found.add('import')
if 'versioneer.get_cmdclass()' in line:
found.add('cmdclass')
if 'versioneer.get_version()' in line:
found.add('get_version')
if 'versioneer.VCS' in line:
setters = True
if 'versioneer.versionfile_source' in line:
setters = True
if len(found) != 3:
print('')
print('Your setup.py appears to be missing some important items')
print('(but I might be wrong). Please make sure it has something')
print('roughly like the following:')
print('')
print(' import versioneer')
print(' setup( version=versioneer.get_version(),')
print(' cmdclass=versioneer.get_cmdclass(), ...)')
print('')
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print('now lives in setup.cfg, and should be removed from setup.py')
print('')
errors += 1
return errors
|
trollimage | trollimage//image.pyfile:/image.py:function:ycbcr2rgb/ycbcr2rgb | def ycbcr2rgb(y__, cb_, cr_):
"""Convert the three YCbCr channels to RGB channels.
"""
kb_ = 0.114
kr_ = 0.299
r__ = 2 * cr_ / (1 - kr_) + y__
b__ = 2 * cb_ / (1 - kb_) + y__
g__ = (y__ - kr_ * r__ - kb_ * b__) / (1 - kr_ - kb_)
return r__, g__, b__
|
nibabel | nibabel//parrec.pyfile:/parrec.py:function:vol_numbers/vol_numbers | def vol_numbers(slice_nos):
""" Calculate volume numbers inferred from slice numbers `slice_nos`
The volume number for each slice is the number of times this slice number
has occurred previously in the `slice_nos` sequence
Parameters
----------
slice_nos : sequence
Sequence of slice numbers, e.g. ``[1, 2, 3, 4, 1, 2, 3, 4]``.
Returns
-------
vol_nos : list
A list, the same length of `slice_nos` giving the volume number for
each corresponding slice number.
"""
counter = {}
vol_nos = []
for s_no in slice_nos:
count = counter.setdefault(s_no, 0)
vol_nos.append(count)
counter[s_no] += 1
return vol_nos
|
som-0.2.3 | som-0.2.3//som/utils.pyfile:/som/utils.py:function:read_file/read_file | def read_file(filename, mode='r'):
"""write_file will open a file, "filename" and write content, "content"
and properly close the file
"""
with open(filename, mode) as filey:
content = filey.readlines()
return content
|
pagebot | pagebot//toolbox/transformer.pyfile:/toolbox/transformer.py:function:float2Fixed/float2Fixed | def float2Fixed(value):
"""
The float2Fixed method translates a float into a 1/64 pixel unit-value.
"""
return int(round(value * 64))
|
cz-urnnbn-api-0.2.6 | cz-urnnbn-api-0.2.6//src/cz_urnnbn_api/xml_composer.pyclass:MonographComposer/_create_path | @staticmethod
def _create_path(root, dict_type, path):
"""
Create nested dicts in `root` using `dict_type` as constructor. Strings
in `path` are used to construct the keys for nested dicts.
Args:
root (dict instance): Root dictionary, where the nested dicts will
be created.
dict_type (dict-like class): Class which will be used to construct
dicts - ``dict_type()`.
path (list/tuple of strings): List of keys for nested
``dict_type``.
Returns:
dict: Return last nested dict (dict at last element at `path`).
"""
for sub_path in path:
if not isinstance(root.get(sub_path, None), dict):
root[sub_path] = dict_type()
root = root[sub_path]
return root
|
pyensae | pyensae//sql/type_helpers.pyfile:/sql/type_helpers.py:function:guess_type_value_type/guess_type_value_type | def guess_type_value_type(none=True):
"""
@param none if True and all values are empty, return None
@return the list of types recognized by guess_type_value
"""
return [None, str, int, float] if none else [str, int, float]
|
pyqg | pyqg//diagnostic_tools.pyfile:/diagnostic_tools.py:function:spec_sum/spec_sum | def spec_sum(ph2):
"""Compute total spectral sum of the real spectral quantity``ph^2``.
Parameters
----------
model : pyqg.Model instance
The model object from which `ph` originates
ph2 : real array
The field on which to compute the sum
Returns
-------
var_dens : float
The sum of `ph2`
"""
ph2 = 2.0 * ph2
ph2[..., 0] = ph2[..., 0] / 2.0
ph2[..., -1] = ph2[..., -1] / 2.0
return ph2.sum(axis=(-1, -2))
|
sbpy | sbpy//photometry/core.pyclass:HG12_Pen16/_G12_to_G1 | @staticmethod
def _G12_to_G1(g12):
"""Calculate G1 from G12"""
return 0.84293649 * g12
|
tom_education-1.1.9 | tom_education-1.1.9//tom_education/models/observation_template.pyclass:ObservationTemplate/get_date_fields | @staticmethod
def get_date_fields(facility):
"""
Return a sequence of field names whose type is datetime
"""
if facility == 'LCO':
return ['start', 'end']
return []
|
PyXB-1.2.6 | PyXB-1.2.6//pyxb/binding/basis.pyclass:_DynamicCreate_mixin/_SetSupersedingClass | @classmethod
def _SetSupersedingClass(cls, superseding):
"""Set the class reference attribute.
@param superseding: A Python class that is a subclass of this class.
"""
assert superseding is None or issubclass(superseding, cls)
if superseding is None:
cls.__dict__.pop(cls.__SupersedingClassAttribute(), None)
else:
setattr(cls, cls.__SupersedingClassAttribute(), superseding)
return superseding
|
CMash | CMash//MinHash.pyfile:/MinHash.py:function:is_prime/is_prime | def is_prime(number):
"""Check if a number is prime."""
if number < 2:
return False
if number == 2:
return True
if number % 2 == 0:
return False
for _ in range(3, int(number ** 0.5) + 1, 2):
if number % _ == 0:
return False
return True
|
dit_cli | dit_cli//parser.pyfile:/parser.py:function:_list_from_data/_list_from_data | def _list_from_data(data):
"""Get a new list of all the items in the list since the last
special token, either an assigner or a list.
Special token denoted by None, which cannot appear otherwise."""
for index, item in enumerate(reversed(data)):
if item is None:
list_ = data[-index:len(data)]
del data[-index:len(data)]
return list_
|
nio | nio//events/room_events.pyclass:DefaultLevels/from_dict | @classmethod
def from_dict(cls, parsed_dict):
"""Create a DefaultLevels object from a dictionary.
This creates the DefaultLevels object from a dictionary containing a
m.room.power_levels event. The event structure isn't checked in this
method.
This shouldn't be used directly, the `PowerLevelsEvent` method will
call this method to construct the DefaultLevels object.
"""
content = parsed_dict['content']
return cls(content['ban'], content['invite'], content['kick'], content[
'redact'], content['state_default'], content['events_default'],
content['users_default'])
|
mercurial-5.4 | mercurial-5.4//mercurial/obsolete.pyfile:/mercurial/obsolete.py:function:clearobscaches/clearobscaches | def clearobscaches(repo):
"""Remove all obsolescence related cache from a repo
This remove all cache in obsstore is the obsstore already exist on the
repo.
(We could be smarter here given the exact event that trigger the cache
clearing)"""
if b'obsstore' in repo._filecache:
repo.obsstore.caches.clear()
|
episodic_memory-0.21 | episodic_memory-0.21//episodic_memory/memory.pyfile:/episodic_memory/memory.py:function:intersection_ll/intersection_ll | def intersection_ll(ll1, ll2):
""" compute the intersection between two list of lists
"""
assert type(ll1) == list
assert type(ll2) == list
counts = 0
for list_i in ll1:
if list_i in ll2:
counts += 1
return counts
|
pynigma-0.1.0 | pynigma-0.1.0//versioneer.pyfile:/versioneer.py:function:render_git_describe/render_git_describe | def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
return rendered
|
SwiftSeq-1.0.1 | SwiftSeq-1.0.1//swiftseq/swift/wrappers.pyfile:/swiftseq/swift/wrappers.py:function:get_broken_symlink_check_str/get_broken_symlink_check_str | def get_broken_symlink_check_str(variableName):
"""Will take a file variable name and will print bash code that will
check if the symlink for that file is broken
This seems to be helping with files not being staged out properly.
Likely will not be necessary in bug fixed versions of Swift
Do not force an exit since other checks may occur afer the symlink
check"""
return '# See if the symlink is orphaned... if so exit 1\nif [ ! -e $' + variableName + """ ] ; then
echo "$""" + variableName + ' symlink good" >> $logFile 2>&1\nelse\n\techo "$' + variableName + """ symlink broken" >> $logFile 2>&1
exit 1
fi
"""
|
fake-bpy-module-2.79-20200428 | fake-bpy-module-2.79-20200428//bpy/ops/uv.pyfile:/bpy/ops/uv.py:function:circle_select/circle_select | def circle_select(x: int=0, y: int=0, radius: int=1, gesture_mode: int=0):
"""Select UV vertices using circle selection
:param x: X
:type x: int
:param y: Y
:type y: int
:param radius: Radius
:type radius: int
:param gesture_mode: Gesture Mode
:type gesture_mode: int
"""
pass
|
haychecker | haychecker//_common/config.pyclass:Config/_freshness_params_check | @staticmethod
def _freshness_params_check(metric, error_msg):
"""
Check the definition of a freshness metric for consistency, this will
rise an assertion error if any error is met.
:param metric: Freshness metric in a dict form.
:type metric: dict
:param error_msg: Error message to return in case of error.
:type error_msg: str
"""
assert metric['metric'] == 'freshness', error_msg
assert len(metric) == 3 and 'columns' in metric and ('dateFormat' in
metric or 'timeFormat' in metric), error_msg
if 'dateFormat' in metric:
assert type(metric['dateFormat']) is str
else:
assert type(metric['timeFormat']) is str
columns = metric['columns']
assert len(columns) > 0, 'Columns list is empty'
assert type(columns) is list, error_msg
for col in columns:
assert type(col) is int or type(col) is str, error_msg
|
Trac-1.4.1 | Trac-1.4.1//trac/resource.pyclass:IResourceManager/get_resource_description | def get_resource_description(resource, format='default', context=None, **kwargs
):
"""Return a string representation of the resource, according to the
`format`.
:param resource: the `Resource` to describe
:param format: the kind of description wanted. Typical formats are:
`'default'`, `'compact'` or `'summary'`.
:param context: an optional rendering context to allow rendering rich
output (like markup containing links)
:type context: `ResourceContext`
Additional keyword arguments can be given as extra information for
some formats.
For example, the ticket with the id 123 is represented as:
- `'#123'` in `'compact'` format,
- `'Ticket #123'` for the `default` format.
- `'Ticket #123 (closed defect): This is the summary'` for the
`'summary'` format
Note that it is also OK to not define this method if there's no
special way to represent the resource, in which case the standard
representations 'realm:id' (in compact mode) or 'Realm id' (in
default mode) will be used.
"""
|
Noodles-0.3.3 | Noodles-0.3.3//noodles/tutorial.pyfile:/noodles/tutorial.py:function:snip_line/snip_line | def snip_line(line, max_width, split_at):
"""Shorten a line to a maximum length."""
if len(line) < max_width:
return line
return line[:split_at] + ' … ' + line[-(max_width - split_at - 3):]
|
dgl_cu90-0.4.3.post2.data | dgl_cu90-0.4.3.post2.data//purelib/dgl/backend/backend.pyfile:/purelib/dgl/backend/backend.py:function:sum/sum | def sum(input, dim, keepdims=False):
"""Reduce sum the input tensor along the given dim.
Parameters
----------
input : Tensor
The input tensor.
dim : int
The reduce dim.
keepdims : bool
Whether to keep the summed dimension.
Returns
-------
Tensor
A framework-specific tensor.
"""
pass
|
skl2onnx | skl2onnx//helpers/onnx_helper.pyfile:/helpers/onnx_helper.py:function:enumerate_model_initializers/enumerate_model_initializers | def enumerate_model_initializers(model, add_node=False):
"""
Enumerates all the initializers of a model.
:param model: ONNX graph
:param add_node: if False, the function enumerates
all output names from every node, otherwise, it
enumerates tuple (output name, node)
:return: enumerator
"""
for node in model.graph.initializer:
yield (node.name, node) if add_node else node.name
|
sifter-0.1 | sifter-0.1//sifter/grammar/lexer.pyfile:/sifter/grammar/lexer.py:function:t_QUOTED_STRING/t_QUOTED_STRING | def t_QUOTED_STRING(t):
""""([^"\\\\]|\\\\["\\\\])*\""""
t.value = t.value.strip('"').replace('\\"', '"').replace('\\\\', '\\')
return t
|
dgl_cu90-0.4.3.post2.data | dgl_cu90-0.4.3.post2.data//purelib/dgl/runtime/spmv.pyfile:/purelib/dgl/runtime/spmv.py:function:build_gidx_and_mapping_graph/build_gidx_and_mapping_graph | def build_gidx_and_mapping_graph(graph):
"""Build immutable graph index of the whole graph.
Parameters
----------
graph : GraphAdapter
Graph
Returns
-------
graph : utils.CtxCachedObject
Function that generates a immutable graph index on given context
edge_map : utils.CtxCachedObject
Function that generates forward and backward edge mapping on given
context
nbits : int
Number of ints needed to represent the graph
"""
return graph.get_immutable_gidx, None, graph.bits_needed()
|
vitrage-7.1.0 | vitrage-7.1.0//vitrage/common/utils.pyfile:/vitrage/common/utils.py:function:fmt/fmt | def fmt(docstr):
"""Format a docstring for use as documentation in sample config."""
docstr = docstr.replace('\n', ' ')
docstr = docstr.strip()
return docstr
|
PyProbe-0.1.2 | PyProbe-0.1.2//pyprobe/mediainfoparsers.pyclass:ChapterParser/value_title | @staticmethod
def value_title(data):
"""Returns a string"""
time = data.text.split(':', 1)
return data.text, time[-1]
|
hatemile | hatemile//util/commonfunctions.pyclass:CommonFunctions/set_list_attributes | @staticmethod
def set_list_attributes(element1, element2, attributes):
"""
Copy a list of attributes of a element for other element.
:param element1: The element that have attributes copied.
:type element1: hatemile.util.html.htmldomelement.HTMLDOMElement
:param element2: The element that copy the attributes.
:type element2: hatemile.util.html.htmldomelement.HTMLDOMElement
:param attributes: The list of attributes that will be copied.
:type attributes: list(str)
"""
for attribute in attributes:
if element1.has_attribute(attribute):
element2.set_attribute(attribute, element1.get_attribute(attribute)
)
|
bpy_extras | bpy_extras//view3d_utils.pyfile:/view3d_utils.py:function:location_3d_to_region_2d/location_3d_to_region_2d | def location_3d_to_region_2d(region: 'bpy.types.Region', rv3d:
'bpy.types.RegionView3D', coord, default=None) ->'mathutils.Vector':
"""Return the region relative 2d location of a 3d position.
:param region: region of the 3D viewport, typically bpy.context.region.
:type region: 'bpy.types.Region'
:param rv3d: 3D region data, typically bpy.context.space_data.region_3d.
:type rv3d: 'bpy.types.RegionView3D'
:param coord: 3d worldspace location.
:param default: Return this value if coord is behind the origin of a perspective view.
:return: 2d location
"""
pass
|
fmcw-3.2.5 | fmcw-3.2.5//fmcw/postprocessing.pyfile:/fmcw/postprocessing.py:function:find_start_batch/find_start_batch | def find_start_batch(data, s, initial_index=0):
"""
Find the starting index of the first valid batch of sweep data and its corresponding header.
:param data: Batch of data coming from the FPGA via the USB port.
:param s: Settings dictionary
:param initial_index: 0 if reading a new batch, non zero if finding the next valid sweep within a batch
:return: Starting index of a sweep data, header of that sweep
"""
flag_valid_header = False
for index in range(initial_index, len(data) - s['nbytes_sweep'] - 2):
current_header = [data[index], data[index + 1]]
if current_header[0] == s['start'] and data[index + s[
'nbytes_sweep'] + 2] == s['start']:
print(
'[INFO] Found start signal {} at position {} (jumped {} byte).'
.format(current_header, index, index - initial_index))
print('[INFO] Next header would read [{}, {}]'.format(data[
index + s['nbytes_sweep'] + 2], data[index + s[
'nbytes_sweep'] + 3]))
flag_valid_header = True
break
if flag_valid_header:
index += 2
else:
index = 0
current_header = [0, 0]
assert index >= 0
assert current_header[0] == 127 or current_header[0] == 0
return index, current_header
|
oci | oci//core/models/instance_configuration_instance_details.pyclass:InstanceConfigurationInstanceDetails/get_subtype | @staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['instanceType']
if type == 'compute':
return 'ComputeInstanceDetails'
else:
return 'InstanceConfigurationInstanceDetails'
|
iodata-0.1.3 | iodata-0.1.3//iodata/cp2k.pyfile:/iodata/cp2k.py:function:_fill_orbitals/_fill_orbitals | def _fill_orbitals(orb_coeffs, orb_energies, orb_occupations, oe, coeffs,
shell_types, restricted):
"""Fill in orbital coefficients, energies and occupation numbers in ``orb``.
Parameters
----------
orb : Orbitals
An object to represent the orbitals
oe : list
The orbital occupation numbers and energies read with
``_read_cp2k_occupations_energies``.
coeffs : dict
The orbital coefficients read with ``_read_cp2k_orbital_coeffs``.
shell_types : np.ndarray
The array with shell types of the GOBasis instance.
restricted : bool
Is wavefunction restricted or unrestricted?
"""
offset = 0
offsets = []
ls = abs(shell_types)
for l in sorted(set(ls)):
offsets.append(offset)
offset += (2 * l + 1) * (l == ls).sum()
del offset
iorb = 0
for l, s, occ, ener in oe:
cs = coeffs.get((l, s))
stride = 2 * l + 1
for m in range(-l, l + 1):
im = m + l
orb_energies[iorb] = ener
orb_occupations[iorb] = occ / float((restricted + 1) * (2 * l + 1))
for ic in range(len(cs)):
orb_coeffs[offsets[l] + stride * ic + im, iorb] = cs[ic]
iorb += 1
|
indico_sixpay-2.0.2 | indico_sixpay-2.0.2//indico_sixpay/plugin.pyclass:SixpayPaymentPlugin/get_field_format_map | @staticmethod
def get_field_format_map(registration):
"""Generates dict which provides registration information for format fields"""
return {'user_id': registration.user_id, 'user_name': registration.
full_name, 'user_firstname': registration.first_name,
'user_lastname': registration.last_name, 'event_id': registration.
event_id, 'event_title': registration.event.title, 'eventuser_id':
'e{0}u{1}'.format(registration.event_id, registration.user_id),
'registration_title': registration.registration_form.title}
|
alignak_webui-0.12.2 | alignak_webui-0.12.2//alignak_webui/objects/element.pyclass:BackendElement/get_cache | @classmethod
def get_cache(cls):
"""Get protected member"""
return cls._cache
|
bayes_opt | bayes_opt//util.pyclass:Colours/red | @classmethod
def red(cls, s):
"""Wrap text in red."""
return cls._wrap_colour(s, cls.RED)
|
dask | dask//base.pyfile:/base.py:function:wait/wait | def wait(x, timeout=None, return_when='ALL_COMPLETED'):
""" Wait until computation has finished
This is a compatibility alias for ``dask.distributed.wait``.
If it is applied onto Dask collections without Dask Futures or if Dask
distributed is not installed then it is a no-op
"""
try:
from distributed import wait
return wait(x, timeout=timeout, return_when=return_when)
except (ImportError, ValueError):
return x
|
AccessControl | AccessControl//interfaces.pyclass:IUser/getRolesInContext | def getRolesInContext(object):
"""Get a sequence of the roles assigned to the user in a context.
Roles include both global roles (ones assigned to the user directly
inside the user folder) and local roles (assigned in context of the
passed in object).
"""
|
strct | strct//dicts/_dict.pyfile:/dicts/_dict.py:function:get_key_val_of_max/get_key_val_of_max | def get_key_val_of_max(dict_obj):
"""Returns the key-value pair with maximal value in the given dict.
Example:
--------
>>> dict_obj = {'a':2, 'b':1}
>>> print(get_key_val_of_max(dict_obj))
('a', 2)
"""
return max(dict_obj.items(), key=lambda item: item[1])
|
jdcloud_cli-1.2.5 | jdcloud_cli-1.2.5//jdcloud_cli/cement/core/log.pyclass:ILog/set_level | def set_level():
"""
Set the log level. Must except atleast one of:
``['INFO', 'WARNING', 'ERROR', 'DEBUG', or 'FATAL']``.
"""
|
debut | debut//debcon.pyfile:/debcon.py:function:as_formatted_lines/as_formatted_lines | def as_formatted_lines(lines):
"""
Return a text formatted for use in a Debian control file with proper
continuation for multilines.
"""
if not lines:
return ''
formatted = []
for line in lines:
stripped = line.strip()
if stripped:
formatted.append(' ' + line)
else:
formatted.append(' .')
return '\n'.join(formatted).strip()
|
md_utils-0.10.0 | md_utils-0.10.0//md_utils/md_common.pyfile:/md_utils/md_common.py:function:print_qm_links/print_qm_links | def print_qm_links(c_alpha_dict, c_beta_dict, f_name, mode='w'):
"""
Note: this needs to be tested. Only ran once to get the protein residues set up correctly.
@param c_alpha_dict: dict of protein residue to be broken to c_alpha atom id
@param c_beta_dict: as above, but for c_beta
@param f_name: The location of the file to write.
@param mode: default is to write to a new file. Use option to designate to append to existing file.
"""
with open(f_name, mode) as m_file:
for resid in c_beta_dict:
m_file.write(
""" !! Break resid {} between CA and CB, and cap CB with hydrogen
&LINK
MM_INDEX {} !! CA
QM_INDEX {} !! CB
LINK_TYPE IMOMM
ALPHA_IMOMM 1.5
&END LINK
"""
.format(resid, c_alpha_dict[resid], c_beta_dict[resid]))
if mode == 'w':
print('Wrote file: {}'.format(f_name))
|
aghplctools-4.2.2 | aghplctools-4.2.2//aghplctools/data_types.pyclass:DADSignalInfo/create_from_agilent_string | @classmethod
def create_from_agilent_string(cls, string: str, name_override: str=None
) ->'DADSignalInfo':
"""
Creates a class instance from a standard Agilent signal description string (e.g. 'DAD1 A, Sig=210,4 Ref=360,100')
:param string: signal description string
:param name_override: override for name specification
:return: DADSignal object
"""
parsed = cls.get_values_from_agilent_string(string)
if name_override is not None:
parsed['name'] = name_override
return cls(**parsed)
|
sky | sky//crawler/reporting.pyfile:/crawler/reporting.py:function:url_report/url_report | def url_report(stat, stats, file=None):
"""Print a report on the state for this URL.
Also update the Stats instance.
"""
if stat.exception:
stats.add('fail')
stats.add('fail_' + str(stat.exception.__class__.__name__))
print(stat.url, 'error', stat.exception, file=file)
elif stat.next_url:
stats.add('redirect')
print(stat.url, stat.status, 'redirect', stat.next_url, file=file)
elif stat.content_type == 'text/html':
stats.add('html')
stats.add('html_bytes', stat.size)
print(stat.url, stat.status, stat.content_type, stat.encoding, stat
.size, '%d/%d' % (stat.num_new_urls, stat.num_urls), file=file)
else:
if stat.status == 200:
stats.add('other')
stats.add('other_bytes', stat.size)
else:
stats.add('error')
stats.add('error_bytes', stat.size)
stats.add('status_%s' % stat.status)
print(stat.url, stat.status, stat.content_type, stat.encoding, stat
.size, file=file)
|
atom | atom//mock_http_core.pyfile:/mock_http_core.py:function:_scrub_request/_scrub_request | def _scrub_request(http_request):
""" Removes email address and password from a client login request.
Since the mock server saves the request and response in plantext, sensitive
information like the password should be removed before saving the
recordings. At the moment only requests sent to a ClientLogin url are
scrubbed.
"""
if (http_request and http_request.uri and http_request.uri.path and
http_request.uri.path.endswith('ClientLogin')):
http_request._body_parts = []
http_request.add_form_inputs({'form_data':
'client login request has been scrubbed'})
else:
http_request._body_parts = []
return http_request
|
marketo | marketo//rfc3339.pyfile:/rfc3339.py:function:_timedelta_to_seconds/_timedelta_to_seconds | def _timedelta_to_seconds(timedelta):
"""
>>> _timedelta_to_seconds(datetime.timedelta(hours=3))
10800
>>> _timedelta_to_seconds(datetime.timedelta(hours=3, minutes=15))
11700
"""
return (timedelta.days * 86400 + timedelta.seconds + timedelta.
microseconds // 1000)
|
patroni | patroni//utils.pyfile:/utils.py:function:parse_bool/parse_bool | def parse_bool(value):
"""
>>> parse_bool(1)
True
>>> parse_bool('off')
False
>>> parse_bool('foo')
"""
value = str(value).lower()
if value in ('on', 'true', 'yes', '1'):
return True
if value in ('off', 'false', 'no', '0'):
return False
|
fake-bpy-module-2.78-20200428 | fake-bpy-module-2.78-20200428//mathutils/geometry.pyfile:/mathutils/geometry.py:function:intersect_point_tri/intersect_point_tri | def intersect_point_tri(pt: 'mathutils.Vector', tri_p1: 'mathutils.Vector',
tri_p2: 'mathutils.Vector', tri_p3: 'mathutils.Vector'
) ->'mathutils.Vector':
"""Takes 4 vectors: one is the point and the next 3 define the triangle.
:param pt: Point
:type pt: 'mathutils.Vector'
:param tri_p1: First point of the triangle
:type tri_p1: 'mathutils.Vector'
:param tri_p2: Second point of the triangle
:type tri_p2: 'mathutils.Vector'
:param tri_p3: Third point of the triangle
:type tri_p3: 'mathutils.Vector'
:return: Point on the triangles plane or None if its outside the triangle
"""
pass
|
toptica | toptica//lasersdk/lasersdk_gen.pyfile:/lasersdk/lasersdk_gen.py:function:generate_device_class_methods/generate_device_class_methods | def generate_device_class_methods(use_async: bool) ->str:
"""Generates the default methods for a device class.
Args:
use_async (bool): True if asynchronous code should be generated, false otherwise.
Returns:
str: The Python code for the default methods of a device class.
"""
if use_async:
return """
def __enter__(self):
return self
def __exit__(self):
raise RuntimeError()
async def __aenter__(self):
await self.open()
return self
async def __aexit__(self, *args):
await self.close()
def __await__(self):
return self.__aenter__().__await__()
async def open(self) -> None:
await self.__client.open()
async def close(self) -> None:
await self.__client.close()
"""
else:
return """
def __enter__(self):
self.open()
return self
def __exit__(self, *args):
self.close()
def open(self) -> None:
self.__client.open()
def close(self) -> None:
self.__client.close()
def run(self, timeout: int = None) -> None:
self.__client.run(timeout)
def stop(self) -> None:
self.__client.stop()
def poll(self) -> None:
self.__client.poll()
"""
|
EasyMIDI | EasyMIDI//midiutil/MidiFile.pyfile:/midiutil/MidiFile.py:function:sort_events/sort_events | def sort_events(event):
"""
.. py:function:: sort_events(event)
The key function used to sort events (both MIDI and Generic)
:param event: An object of type :class:`MIDIEvent` or (a derrivative)
:class:`GenericEvent`
This function should be provided as the ``key`` for both
``list.sort()`` and ``sorted()``. By using it sorting will be as
follows:
* Events are ordered in time. An event that takes place earlier will
appear earlier
* If two events happen at the same time, the secondary sort key is
``ord``. Thus a class of events can be processed earlier than
another. One place this is used in the code is to make sure that note
off events are processed before note on events.
* If time and ordinality are the same, they are sorted in the order in
which they were originally added to the list. Thus, for example, if
one is making an RPN call one can specify the controller change
events in the proper order and be sure that they will end up in the
file that way.
"""
return event.time, event.ord, event.insertion_order
|
pymeasure | pymeasure//instruments/validators.pyfile:/instruments/validators.py:function:truncated_range/truncated_range | def truncated_range(value, values):
""" Provides a validator function that returns the value
if it is in the range. Otherwise it returns the closest
range bound.
:param value: A value to test
:param values: A set of values that are valid
"""
if min(values) <= value <= max(values):
return value
elif value > max(values):
return max(values)
else:
return min(values)
|
dsgutils-0.2.0 | dsgutils-0.2.0//dsgutils/pd/munging.pyfile:/dsgutils/pd/munging.py:function:pivot_by_2_categories/pivot_by_2_categories | def pivot_by_2_categories(df, cat1, cat2):
"""
Create pivot table of df by category 1 and category 2.
param: df: the dataframe
param: cat1: the first category
param: cat2: the second category
return: pivot table
"""
df_for_pivot = df[[cat1, cat2]].groupby([cat1, cat2]).size().reset_index(
name='counts')
df_pivot = df_for_pivot.pivot(index=cat1, columns=cat2, values='counts')
return df_pivot
|
steuer-0.2.2 | steuer-0.2.2//src/steuer/examples/simple/steuer_example.pyfile:/src/steuer/examples/simple/steuer_example.py:function:button_right_pressed/button_right_pressed | def button_right_pressed(controller):
"""
Callback function that is triggered, when the right button was pressed
:param controller: the controller that triggered the action
:type controller: steuer.Controller
"""
print('ACTION:' + str(controller.number) + ':' + controller.name +
':Button Right pressed')
|
dtlpy | dtlpy//entities/annotation.pyclass:Annotation/new | @classmethod
def new(cls, item=None, annotation_definition=None, object_id=None,
automated=None, metadata=None, frame_num=None, parent_id=None,
start_time=None, height=None, width=None):
"""
Create a new annotation object annotations
:param start_time:
:param width: annotation item's width
:param height: annotation item's height
:param item: item to annotate
:param annotation_definition: annotation type object
:param object_id: object_id
:param automated: is automated
:param metadata: metadata
:param frame_num: optional - first frame number if video annotation
:param parent_id: add parent annotation ID
:return: annotation object
"""
if frame_num is None:
frame_num = 0
if metadata is None:
metadata = dict()
if parent_id is not None:
if 'system' not in metadata:
metadata['system'] = dict()
metadata['system']['parentId'] = parent_id
frames = dict()
if item is not None and item.fps is not None:
fps = item.fps
else:
fps = None
ann_type = None
if annotation_definition is not None:
ann_type = annotation_definition.type
dataset_url = None
dataset_id = None
if item is not None:
dataset_url = item.dataset_url
dataset_id = item.datasetId
if start_time is None:
if fps is not None and frame_num is not None:
start_time = frame_num / fps if fps != 0 else 0
else:
start_time = 0
if frame_num is None:
frame_num = 0
return cls(annotation_definition=annotation_definition, id=None, url=
None, item_url=None, item=item, item_id=None, creator=None,
createdAt=None, updatedBy=None, updatedAt=None, object_id=object_id,
type=ann_type, dataset_url=dataset_url, dataset_id=dataset_id,
height=height, width=width, metadata=metadata, fps=fps, status=None,
automated=automated, frames=frames, end_frame=frame_num, end_time=0,
start_frame=frame_num, start_time=start_time, platform_dict=dict())
|
invenio-records-rest-1.7.1 | invenio-records-rest-1.7.1//invenio_records_rest/utils.pyfile:/invenio_records_rest/utils.py:function:make_comma_list_a_list/make_comma_list_a_list | def make_comma_list_a_list(elements_to_rocess):
"""Process a list with commas to simple list.
For example:
['elem1','elem2,elem3'] => ['elem1', 'elem2', 'elem3']
:param elements_to_rocess: list to process
:return: processed list with elemnts separated
"""
output_list = []
for element in elements_to_rocess:
output_list.extend(element.split(','))
return list(set(output_list))
|
xblock | xblock//fields.pyclass:UserScope/scopes | @classmethod
def scopes(cls):
"""
Return a list of valid/understood class scopes.
Why do we need this? I believe it is not used anywhere.
"""
return [cls.NONE, cls.ONE, cls.ALL]
|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/s3.pyfile:/pyboto3/s3.py:function:can_paginate/can_paginate | def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
"""
pass
|
fluids-0.1.78 | fluids-0.1.78//fluids/flow_meter.pyfile:/fluids/flow_meter.py:function:K_to_discharge_coefficient/K_to_discharge_coefficient | def K_to_discharge_coefficient(D, Do, K):
"""Converts a standard loss coefficient to a discharge coefficient.
.. math::
C = \\sqrt{\\frac{1}{2 \\sqrt{K} \\beta^{4} + K \\beta^{4}}
- \\frac{\\beta^{4}}{2 \\sqrt{K} \\beta^{4} + K \\beta^{4}} }
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
K : float
Loss coefficient with respect to the velocity and density of the fluid
just upstream of the orifice, [-]
Returns
-------
C : float
Coefficient of discharge of the orifice, [-]
Notes
-----
If expansibility is used in the orifice calculation, the result will not
match with the specified pressure drop formula in [1]_; it can almost
be matched by dividing the calculated mass flow by the expansibility factor
and using that mass flow with the loss coefficient.
This expression was derived with SymPy, and checked numerically. There were
three other, incorrect roots.
Examples
--------
>>> K_to_discharge_coefficient(D=0.07366, Do=0.05, K=5.2314291729754)
0.6151200000000001
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
"""
beta = Do / D
beta2 = beta * beta
beta4 = beta2 * beta2
root_K = K ** 0.5
return ((1.0 - beta4) / (2.0 * root_K * beta4 + K * beta4)) ** 0.5
|
Crypro-2.0.3 | Crypro-2.0.3//Crypro/Util/number.pyfile:/Crypro/Util/number.py:function:GCD/GCD | def GCD(x, y):
"""GCD(x:long, y:long): long
Return the GCD of x and y.
"""
x = abs(x)
y = abs(y)
while x > 0:
x, y = y % x, x
return y
|
stock_dataframe | stock_dataframe//dataframe.pyclass:StockDataFrame/_get_smma | @classmethod
def _get_smma(cls, df, column, windows):
""" get smoothed moving average.
:param df: data
:param windows: range
:return: result series
"""
window = cls.get_only_one_positive_int(windows)
column_name = '{}_{}_smma'.format(column, window)
smma = df[column].ewm(ignore_na=False, alpha=1.0 / window, min_periods=
0, adjust=True).mean()
df[column_name] = smma
return smma
|
gemben | gemben//utils/evaluation_util.pyfile:/utils/evaluation_util.py:function:getEdgeListFromAdjMtx/getEdgeListFromAdjMtx | def getEdgeListFromAdjMtx(adj, threshold=0.0, is_undirected=True,
edge_pairs=None):
"""Function to get edgelist from adjaceny matrix."""
result = []
node_num = adj.shape[0]
if edge_pairs:
for st, ed in edge_pairs:
if adj[st, ed] >= threshold:
result.append((st, ed, adj[st, ed]))
else:
for i in range(node_num):
for j in range(node_num):
if j == i:
continue
if is_undirected and i >= j:
continue
if adj[i, j] > threshold:
result.append((i, j, adj[i, j]))
return result
|
jupyweave | jupyweave//document_processor.pyclass:DocumentProcessor/__ask_for_language | @staticmethod
def __ask_for_language(filename, languages):
"""Ask user for language for unknown extension"""
print(
"Cannot determine document '%s' type by extension. Select correct type:"
% filename)
for i, lang in enumerate(languages):
print(str.format(' {0}) -> {1}', i + 1, lang))
language = None
while not language:
dtype = input('Enter type number > ')
try:
dtype = int(dtype)
if 1 <= dtype <= len(languages):
language = languages[dtype - 1]
except ValueError:
pass
if not language:
print('Invalid value')
return language
|
sciscripttools | sciscripttools//arguments.pyfile:/arguments.py:function:process_arguement_pairs/process_arguement_pairs | def process_arguement_pairs(arguments):
"""
Create list of pairs from the arguments.
Parameters
----------
arguments : Tuple
tuple list of arguments that are passed into a function
"""
if not isinstance(arguments, tuple):
message = 'Expected a tuple of arguments.'
raise Exception(message)
pairs = []
for i in range(0, int(len(arguments) / 2)):
pairs.append((arguments[2 * i], arguments[2 * i + 1]))
return pairs
|
chemistry_tools-0.2.9 | chemistry_tools-0.2.9//chemistry_tools/cas.pyfile:/chemistry_tools/cas.py:function:check_cas_number/check_cas_number | def check_cas_number(cas_no):
"""
Checks the CAS number to ensure the check digit is valid
with respect to the rest of the number.
If the CAS number is valid 0 is returned. If there is a problem the difference
between the computed check digit and that given as part of the CAS number
is returned.
:param cas_no:
:type cas_no: int
:return:
:rtype: int
"""
cas_no = abs(int(cas_no))
check_digit = cas_no % 10
main_value = (cas_no - check_digit) // 10
block_2 = main_value % 100
block_1 = (main_value - block_2) // 100
last_digit = block_2 % 10
check_total = last_digit + (block_2 - last_digit) // 10 * 2
for position, digit in enumerate(str(block_1)[::-1]):
check_total += int(digit) * (position + 3)
if check_digit == check_total % 10:
return 0
else:
return check_total % 10 - check_digit
|
perseus-core-library-1.12.8 | perseus-core-library-1.12.8//majormode/perseus/utils/email_util.pyfile:/majormode/perseus/utils/email_util.py:function:__build_author_name_expr/__build_author_name_expr | def __build_author_name_expr(author_name, author_email_address):
"""
Build the name of the author of a message as described in the Internet
Message Format specification: https://tools.ietf.org/html/rfc5322#section-3.6.2
:param author_name: complete name of the originator of the message.
:param author_email_address: address of the mailbox to which the author
of the message suggests that replies be sent.
:return: a string representing the author of the message, that is, the
mailbox of the person or system responsible for the writing of the
message. This string is intended to be used as the "From:" field
of the message.
"""
assert author_name is not None or author_email_address is not None, 'Both arguments MUST NOT be bull'
author_name_expr = author_name or author_email_address[:
author_email_address.find('@')]
if ' ' in author_name_expr:
author_name_expr = f'"{author_name_expr}"'
if author_email_address:
author_name_expr = f'{author_name_expr} <{author_email_address}>'
return author_name_expr
|
mpf | mpf//file_interfaces/yaml_roundtrip.pyclass:YamlRoundtrip/rename_key | @staticmethod
def rename_key(old_key, new_key, commented_map, logger=None):
"""Rename a key in YAML file data that was loaded with the RoundTripLoader (e.g. that contains comments).
Comments are retained for the renamed key. Order of keys is also maintained.
Args:
old_key: The existing key name you want to change.
new_key: The new key name.
commented_map: The YAML data CommentMap class (from yaml.load) with
the key you want to change.
logger: Optional logger instance which will be used to log this at
the debug level.
Returns the updated CommentedMap YAML dict. (Note that this method does not
change the dict object (e.g. it's changed in place), you you most
likely don't need to do anything with the returned dict.
"""
if old_key == new_key or old_key not in commented_map:
return commented_map
key_list = list(commented_map.keys())
for key in key_list:
if key == old_key:
if logger:
logger.debug('Renaming key: %s: -> %s:', old_key, new_key)
commented_map[new_key] = commented_map[old_key]
try:
commented_map.ca.items[new_key] = commented_map.ca.items.pop(
old_key)
except KeyError:
pass
del commented_map[old_key]
else:
commented_map.move_to_end(key)
return commented_map
|
mlagents-0.16.0 | mlagents-0.16.0//mlagents/trainers/tensorflow_to_barracuda.pyfile:/mlagents/trainers/tensorflow_to_barracuda.py:function:remove_duplicates_from_list/remove_duplicates_from_list | def remove_duplicates_from_list(array):
"""Preserves the order of elements in the list"""
output = []
unique = set()
for a in array:
if a not in unique:
unique.add(a)
output.append(a)
return output
|
pytknvim | pytknvim//tk_util.pyclass:Text/unique_int | @staticmethod
def unique_int(values):
"""
if a list looks like 3,6
if repeatedly called will return 1,2,4,5,7,8
"""
last = 0
for num in values:
if last not in values:
break
else:
last += 1
return last
|
craterpy | craterpy//plotting.pyfile:/plotting.py:function:plot_ejecta_stats/plot_ejecta_stats | def plot_ejecta_stats():
"""Plot ejecta statistics.
"""
pass
|
pydevd-pycharm-201.7223.92 | pydevd-pycharm-201.7223.92//pydevd_attach_to_process/winappdbg/textio.pyclass:HexDump/hexblock | @classmethod
def hexblock(cls, data, address=None, bits=None, separator=' ', width=8):
"""
Dump a block of hexadecimal numbers from binary data.
Also show a printable text version of the data.
@type data: str
@param data: Binary data.
@type address: str
@param address: Memory address where the data was read from.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@type separator: str
@param separator:
Separator between the hexadecimal representation of each character.
@type width: int
@param width:
(Optional) Maximum number of characters to convert per text line.
@rtype: str
@return: Multiline output text.
"""
return cls.hexblock_cb(cls.hexline, data, address, bits, width,
cb_kwargs={'width': width, 'separator': separator})
|
SQLAlchemy-1.3.17.data | SQLAlchemy-1.3.17.data//purelib/sqlalchemy/util/_collections.pyfile:/purelib/sqlalchemy/util/_collections.py:function:update_copy/update_copy | def update_copy(d, _new=None, **kw):
"""Copy the given dict and update with the given values."""
d = d.copy()
if _new:
d.update(_new)
d.update(**kw)
return d
|
passlib | passlib//ifc.pyclass:DisabledHash/disable | @classmethod
def disable(cls, hash=None):
"""
return string representing a 'disabled' hash;
optionally including previously enabled hash
(this is up to the individual scheme).
"""
return cls.hash('')
|
MDP-3.6 | MDP-3.6//mdp/nodes/recursive_expansion_nodes.pyfile:/mdp/nodes/recursive_expansion_nodes.py:function:recf_chebyshev_poly/recf_chebyshev_poly | def recf_chebyshev_poly(result, x, special, n, cur_var, pos):
"""Implementation of the recursion formula for Chebyshev polynomials
of the first kind.
The recursion formula is
T_n = 2xT_{n-1} - T_{n-2}.
.. note::
The procedure on how an recursion function is built can be found in the
docstring of the recursion function for
standard polynomials *recf_standard_poly*.
:param result: Contains the observations along the first dimension
and the function values of expansion w.r.t. an observation
along the second dimension.
:type result: numpy.ndarray
:param x: The data to be expanded.
:type x: numpy.ndarray
:param special: Index of a special element to be considered in the
recursion formula. The special element is the first
Chebyshev polynomial of the current variable.
:type special: int
:param n: The order of the function to be computed at this step.
:type n: int
:param cur_var: The index of the current variable to be considered in the
recursion. This value will have to be lower than x.shape[1].
:type cur_var: int
:param pos: The index of the element to be computed, along the second
dimension of result.
:type pos: int
:returns: The vectorized result (along the observations) of the
n-th recursion step of the cur_var-th variable.
:rtype: numpy.ndarray
.. admonition:: Reference
https://en.wikipedia.org/wiki/Chebyshev_polynomials
"""
return 2.0 * x[:, (cur_var)] * result[:, (pos - 1)] - result[:, (pos - 2)]
|
nml | nml//generic.pyfile:/generic.py:function:greatest_common_divisor/greatest_common_divisor | def greatest_common_divisor(a, b):
"""
Get the greatest common divisor of two numbers
@param a: First number.
@type a: C{int}
@param b: Second number.
@type b: C{int}
@return: Greatest common divisor.
@rtype: C{int}
"""
while b != 0:
t = b
b = a % b
a = t
return a
|
towel | towel//paginator.pyfile:/paginator.py:function:filter_adjacent/filter_adjacent | def filter_adjacent(iterable):
"""Collapse identical adjacent values"""
current = type(str('Marker'), (object,), {})
for item in iterable:
if item != current:
current = item
yield item
|
monoloco | monoloco//utils/kitti.pyfile:/utils/kitti.py:function:get_translation/get_translation | def get_translation(pp):
"""Separate intrinsic matrix from translation and convert in lists"""
kk = pp[:, :-1]
f_x = kk[0, 0]
f_y = kk[1, 1]
x0, y0 = kk[(2), 0:2]
aa, bb, t3 = pp[0:3, (3)]
t1 = float((aa - x0 * t3) / f_x)
t2 = float((bb - y0 * t3) / f_y)
tt = [t1, t2, float(t3)]
return kk.tolist(), tt
|
mlflow-1.8.0 | mlflow-1.8.0//mlflow/utils/search_utils.pyclass:SearchUtils/_trim_backticks | @classmethod
def _trim_backticks(cls, entity_type):
"""Remove backticks from identifier like `param`, if they exist."""
if cls._is_quoted(entity_type, '`'):
return cls._trim_ends(entity_type)
return entity_type
|
MongoFrames-1.3.5 | MongoFrames-1.3.5//mongoframes/frames.pyclass:_BaseFrame/_path_to_value | @classmethod
def _path_to_value(cls, path, parent_dict):
"""Return a value from a dictionary at the given path"""
keys = cls._path_to_keys(path)
child_dict = parent_dict
for key in keys[:-1]:
child_dict = child_dict.get(key)
if child_dict is None:
return
return child_dict.get(keys[-1])
|
ploneintranet-1.2.72 | ploneintranet-1.2.72//src/ploneintranet/microblog/interfaces.pyclass:IStatusContainer/mention_items | def mention_items(mentions, min=None, max=None, limit=100):
"""Filter (key, IStatusUpdate) items by mentions.
min and max are longint IStatusUpdate.id keys.
limit returns [:limit] most recent items
"""
|
mxnet-1.6.0.data | mxnet-1.6.0.data//purelib/mxnet/symbol/gen_op.pyfile:/purelib/mxnet/symbol/gen_op.py:function:Flatten/Flatten | def Flatten(data=None, name=None, attr=None, out=None, **kwargs):
"""Flattens the input array into a 2-D array by collapsing the higher dimensions.
.. note:: `Flatten` is deprecated. Use `flatten` instead.
For an input array with shape ``(d1, d2, ..., dk)``, `flatten` operation reshapes
the input array into an output array of shape ``(d1, d2*...*dk)``.
Note that the behavior of this function is different from numpy.ndarray.flatten,
which behaves similar to mxnet.ndarray.reshape((-1,)).
Example::
x = [[
[1,2,3],
[4,5,6],
[7,8,9]
],
[ [1,2,3],
[4,5,6],
[7,8,9]
]],
flatten(x) = [[ 1., 2., 3., 4., 5., 6., 7., 8., 9.],
[ 1., 2., 3., 4., 5., 6., 7., 8., 9.]]
Defined in src/operator/tensor/matrix_op.cc:L250
Parameters
----------
data : Symbol
Input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
Examples
--------
Flatten is usually applied before `FullyConnected`, to reshape the 4D tensor
produced by convolutional layers to 2D matrix:
>>> data = Variable('data') # say this is 4D from some conv/pool
>>> flatten = Flatten(data=data, name='flat') # now this is 2D
>>> SymbolDoc.get_output_shape(flatten, data=(2, 3, 4, 5))
{'flat_output': (2L, 60L)}
>>> test_dims = [(2, 3, 4, 5), (2, 3), (2,)]
>>> op = Flatten(name='flat')
>>> for dims in test_dims:
... x = test_utils.random_arrays(dims)
... y = test_utils.simple_forward(op, flat_data=x)
... y_np = x.reshape((dims[0], np.prod(dims[1:]).astype('int32')))
... print('%s: %s' % (dims, test_utils.almost_equal(y, y_np)))
(2, 3, 4, 5): True
(2, 3): True
(2,): True
"""
return 0,
|
GSAS-II-WONDER_osx-1.0.4 | GSAS-II-WONDER_osx-1.0.4//GSAS-II-WONDER/GSASIIspc.pyfile:/GSAS-II-WONDER/GSASIIspc.py:function:SGErrors/SGErrors | def SGErrors(IErr):
"""
Interprets the error message code from SpcGroup. Used in SpaceGroup.
:param IErr: see SGError in :func:`SpcGroup`
:returns:
ErrString - a string with the error message or "Unknown error"
"""
ErrString = [' ', 'Less than 2 operator fields were found',
'Illegal Lattice type, not P, A, B, C, I, F or R',
'Rhombohedral lattice requires a 3-axis',
'Minus sign does not preceed 1, 2, 3, 4 or 6',
'Either a 5-axis anywhere or a 3-axis in field not allowed', ' ',
'I for COMPUTED GO TO out of range.',
'An a-glide mirror normal to A not allowed',
'A b-glide mirror normal to B not allowed',
'A c-glide mirror normal to C not allowed',
'D-glide in a primitive lattice not allowed',
'A 4-axis not allowed in the 2nd operator field',
'A 6-axis not allowed in the 2nd operator field',
'More than 24 matrices needed to define group', ' ',
'Improper construction of a rotation operator',
'Mirror following a / not allowed',
'A translation conflict between operators',
'The 2bar operator is not allowed',
'3 fields are legal only in R & m3 cubic groups',
'Syntax error. Expected I -4 3 d at this point', ' ',
'A or B centered tetragonal not allowed', ' ',
'unknown error in sgroup', ' ', ' ', ' ',
'Illegal character in the space group symbol']
try:
return ErrString[IErr]
except:
return 'Unknown error'
|
haanna-0.14.3 | haanna-0.14.3//haanna/haanna.pyclass:Haanna/get_active_name | @staticmethod
def get_active_name(root, schema_ids):
"""Get the active schema from a (list of) rule id(s)."""
active = None
for schema_id in schema_ids:
locator = root.find("rule[@id='" + schema_id + "']/active")
if locator.text == 'true':
active = root.find("rule[@id='" + schema_id + "']/name").text
return active
|
pybindingcurve | pybindingcurve//pybindingcurve.pyclass:Readout/fraction_l | @staticmethod
def fraction_l(system_parameters: dict, y):
""" Readout as fraction ligand bound """
return 'Fraction l bound', y / system_parameters['l']
|