repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
WeasyPrint-51 | WeasyPrint-51//weasyprint/css/media_queries.pyfile:/weasyprint/css/media_queries.py:function:evaluate_media_query/evaluate_media_query | def evaluate_media_query(query_list, device_media_type):
"""Return the boolean evaluation of `query_list` for the given
`device_media_type`.
:attr query_list: a cssutilts.stlysheets.MediaList
:attr device_media_type: a media type string (for now)
"""
return 'all' in query_list or device_media_type in query_list
|
pytrip98-3.1.0 | pytrip98-3.1.0//pytrip/cube.pyclass:Cube/check_compatibility | @staticmethod
def check_compatibility(a, b):
"""
Simple comparison of cubes. if X,Y,Z dims are the same, and
voxel sizes as well, then they are compatible. (Duck typed)
See also the function is_compatible().
:params Cube a: the first cube to be compared with the second (b).
:params Cube b: the second cube to be compared with the first (a).
"""
eps = 1e-05
if a.dimx != b.dimx:
return False
elif a.dimy != b.dimy:
return False
elif a.dimz != b.dimz:
return False
elif a.pixel_size - b.pixel_size > eps:
return False
elif a.slice_distance != b.slice_distance:
return False
else:
return True
|
uos.cli-1.6.1 | uos.cli-1.6.1//openstackclient/image/v1/image.pyfile:/openstackclient/image/v1/image.py:function:_format_visibility/_format_visibility | def _format_visibility(data):
"""Return a formatted visibility string
:param data:
The server's visibility (is_public) status value: True, False
:rtype:
A string formatted to public/private
"""
if data:
return 'public'
else:
return 'private'
|
diver-0.2.3 | diver-0.2.3//development_area/dataset_inspector.pyfile:/development_area/dataset_inspector.py:function:categorical_excess_cardinality_flagger_and_reducer/categorical_excess_cardinality_flagger_and_reducer | def categorical_excess_cardinality_flagger_and_reducer(df, useful_cols,
cardinality_fraction_threshold=0.1,
cardinality_max_categories_threshold=50, **kwargs):
"""
Function inspects categorical features in the input dataframe where there is excess cardinality (too many unique values), and returns a list of features which exceed these thresholds, as well as Series detailing the cardinality (in absolute terms as well as as a fraction of the total number of samples).
Parameters
----------
df : pandas.DataFrame
The input dataframe
useful_cols : pandas.DataFrame
Look-up dataframe, which contains information about the dtypes of desired features, and how to deal with
missing values for each feature
cardinality_fraction_threshold : int or None
Cardinality fraction is the ratio of unique values for a given feature to the total number of samples in the dataset
- If None, no threshold applied
- If int, this is the threshold beyond which a feature is deemed to have excessive cardinality
cardinality_max_categories_threshold : int or None
Cardinality max categories is the absolute number of unique values for a given feature
- If None, no threshold applied
- If int, this is the threshold beyond which a feature is deemed to have excessive cardinality
Returns
-------
df_cardinality : pandas.Series
Cardinality (number of unique elements) of the categorical features in the input dataset
df_cardinality_fraction : pandas.Series
Cardinality fractions of the categorical features in the input dataset
excess_cardinality : list
List of features which exceed one or more of the thresholds
"""
categorical_features = list(useful_cols.loc[useful_cols.dtype ==
'nominal', 'feature'])
categorical_data = df[categorical_features]
m_samples = categorical_data.shape[0]
df_cardinality = categorical_data.nunique()
df_cardinality_fraction = df_cardinality / m_samples
excess_cardinality_fraction = set(df_cardinality_fraction[
df_cardinality_fraction > cardinality_fraction_threshold].index)
print(
f'Following features exceed cardiality fraction threshold of {cardinality_fraction_threshold}: {excess_cardinality_fraction}'
)
excess_cardinality_absolute = set(df_cardinality[df_cardinality >
cardinality_max_categories_threshold].index)
print(
f'Following features exceed cardiality absolute number threshold of {cardinality_max_categories_threshold}: {excess_cardinality_absolute}'
)
excess_cardinality = list(set.union(excess_cardinality_fraction,
excess_cardinality_absolute))
if kwargs['reducer'] == True:
n_max_categories = kwargs['reducer_max_categories']
for feature in excess_cardinality:
value_counts = df[feature].value_counts()
small_categories = list(value_counts[n_max_categories:].index)
df.loc[df[feature].isin(small_categories), feature
] = 'SMALL_CATEGORY'
print(
f'Features {excess_cardinality} exceeded thresholds and have each been reduced down to a maximum {n_max_categories} categories per feature'
)
return df_cardinality, df_cardinality_fraction, excess_cardinality
|
ironic | ironic//objects/node.pyclass:Node/list | @classmethod
def list(cls, context, limit=None, marker=None, sort_key=None, sort_dir=
None, filters=None):
"""Return a list of Node objects.
:param cls: the :class:`Node`
:param context: Security context.
:param limit: maximum number of resources to return in a single result.
:param marker: pagination marker for large data sets.
:param sort_key: column to sort results by.
:param sort_dir: direction to sort. "asc" or "desc".
:param filters: Filters to apply.
:returns: a list of :class:`Node` object.
"""
db_nodes = cls.dbapi.get_node_list(filters=filters, limit=limit, marker
=marker, sort_key=sort_key, sort_dir=sort_dir)
return cls._from_db_object_list(context, db_nodes)
|
Flask-APScheduler-1.11.0 | Flask-APScheduler-1.11.0//flask_apscheduler/utils.pyfile:/flask_apscheduler/utils.py:function:pop_trigger/pop_trigger | def pop_trigger(data):
"""Pops trigger and trigger args from a given dict."""
trigger_name = data.pop('trigger')
trigger_args = {}
if trigger_name == 'date':
trigger_arg_names = 'run_date', 'timezone'
elif trigger_name == 'interval':
trigger_arg_names = ('weeks', 'days', 'hours', 'minutes', 'seconds',
'start_date', 'end_date', 'timezone')
elif trigger_name == 'cron':
trigger_arg_names = ('year', 'month', 'day', 'week', 'day_of_week',
'hour', 'minute', 'second', 'start_date', 'end_date', 'timezone')
else:
raise Exception('Trigger %s is not supported.' % trigger_name)
for arg_name in trigger_arg_names:
if arg_name in data:
trigger_args[arg_name] = data.pop(arg_name)
return trigger_name, trigger_args
|
mpop-1.5.1 | mpop-1.5.1//mpop/compositer.pyclass:Compositer/remove_attribute | @classmethod
def remove_attribute(cls, name):
"""Remove an attribute from the class.
"""
return delattr(cls, name)
|
redpipe | redpipe//fields.pyclass:FloatField/decode | @classmethod
def decode(cls, value):
"""
decode the bytes from redis back into a float
:param value: bytes
:return: float
"""
return None if value is None else float(value)
|
plone.checksum-0.1 | plone.checksum-0.1//plone/checksum/interfaces.pyclass:IChecksumManager/update_checksums | def update_checksums():
"""Calculate and update all checksums
"""
|
pybatsim-3.1.0 | pybatsim-3.1.0//batsim/tools/launcher.pyfile:/batsim/tools/launcher.py:function:module_to_class/module_to_class | def module_to_class(module):
"""
transform fooBar to FooBar
"""
return module[0].upper() + module[1:]
|
pygal | pygal//adapters.pyfile:/adapters.py:function:not_zero/not_zero | def not_zero(x):
"""Return None if value is zero"""
if x == 0:
return
return x
|
firestore_odm-0.0.1.dev5 | firestore_odm-0.0.1.dev5//firestore_odm/context.pyclass:Context/_reload_testing_flag | @classmethod
def _reload_testing_flag(cls, testing):
"""
When testing is set to True, all authenticate decorators in services returns uid as "testuid1"
:param testing:
:return:
"""
cls.testing = testing
|
fake-blender-api-2.79-0.3.1 | fake-blender-api-2.79-0.3.1//bpy/ops/action.pyfile:/bpy/ops/action.py:function:duplicate_move/duplicate_move | def duplicate_move(ACTION_OT_duplicate=None, TRANSFORM_OT_transform=None):
"""Make a copy of all selected keyframes and move them
:param ACTION_OT_duplicate: Duplicate Keyframes, Make a copy of all selected keyframes
:param TRANSFORM_OT_transform: Transform, Transform selected items by mode type
"""
pass
|
ppmutils | ppmutils//p2md.pyclass:P2MD/download_facebook_data | @classmethod
def download_facebook_data(cls, request, ppm_id):
"""
Downloads the Facebook dataset for the passed user
:param request: The original Django request object
:param ppm_id: The PPM ID of the requesting user
:return: The requested dataset
"""
response = cls.get(request, f'/sources/api/facebook/{ppm_id}/download',
raw=True)
if response:
return response.content
return None
|
icing-0.1.9 | icing-0.1.9//icing/core/similarity_scores.pyfile:/icing/core/similarity_scores.py:function:jaccard_index/jaccard_index | def jaccard_index(nodes_a, nodes_b):
"""Jaccard index of a bipartite graph."""
return len(nodes_a & nodes_b) / len(nodes_a | nodes_b)
|
auditlog-0.0.5 | auditlog-0.0.5//src/auditlog/diff.pyfile:/src/auditlog/diff.py:function:track_field/track_field | def track_field(field):
"""
Returns whether the given field should be tracked by Auditlog.
Untracked fields are many-to-many relations and relations to the Auditlog LogEntry model.
:param field: The field to check.
:type field: Field
:return: Whether the given field should be tracked.
:rtype: bool
"""
from auditlog.models import LogEntry
if field.many_to_many:
return False
if getattr(field, 'remote_field', None
) is not None and field.remote_field.model == LogEntry:
return False
elif getattr(field, 'rel', None) is not None and field.rel.to == LogEntry:
return False
return True
|
dcos | dcos//cosmos.pyfile:/cosmos.py:function:_matches_expected_response_header/_matches_expected_response_header | def _matches_expected_response_header(request_headers, response_headers):
"""
Returns true if the Content-Type value of the response header matches the
Accept value of the request header, false otherwise
:param request_headers: the headers for a cosmos request
:type request_headers: dict[str, str]
:param response_headers: the headers for a cosmos response
:type response_headers: dict[str, str]
:return: true if the Content-Type value of the response header matches the
Accept value of the request header, false otherwise
:rtype: bool
"""
return request_headers.get('Accept') in response_headers.get('Content-Type'
)
|
SquareMap-1.0.5 | SquareMap-1.0.5//squaremap/squaremap.pyclass:HotMapNavigator/previousChild | @staticmethod
def previousChild(hotmap, index):
""" Return the previous sibling of the node indicated by index. """
previousChildIndex = max(0, index - 1)
return hotmap[previousChildIndex][1]
|
bezier-2020.2.3 | bezier-2020.2.3//src/python/bezier/_py_triangle_helpers.pyfile:/src/python/bezier/_py_triangle_helpers.py:function:no_intersections/no_intersections | def no_intersections(nodes1, degree1, nodes2, degree2):
"""Determine if one triangle is in the other.
Helper for :func:`combine_intersections` that handles the case
of no points of intersection. In this case, either the triangles
are disjoint or one is fully contained in the other.
To check containment, it's enough to check if one of the corners
is contained in the other triangle.
Args:
nodes1 (numpy.ndarray): The nodes defining the first triangle in
the intersection (assumed in :math:\\mathbf{R}^2`).
degree1 (int): The degree of the triangle given by ``nodes1``.
nodes2 (numpy.ndarray): The nodes defining the second triangle in
the intersection (assumed in :math:\\mathbf{R}^2`).
degree2 (int): The degree of the triangle given by ``nodes2``.
Returns:
Tuple[Optional[list], Optional[bool]]: Pair (2-tuple) of
* Edges info list; will be empty or :data:`None`
* "Contained" boolean. If not :data:`None`, indicates
that one of the triangles is contained in the other.
"""
from bezier import _py_triangle_intersection
located = _py_triangle_intersection.locate_point(nodes2, degree2,
nodes1[0, 0], nodes1[1, 0])
if located is not None:
return None, True
located = _py_triangle_intersection.locate_point(nodes1, degree1,
nodes2[0, 0], nodes2[1, 0])
if located is not None:
return None, False
return [], None
|
airiam | airiam//find_unused/RuntimeIamScanner.pyclass:RuntimeIamScanner/convert_csv_to_json | @staticmethod
def convert_csv_to_json(csv_report: str):
"""
Convert a CSV string to a json file by parsing the first row as keys, and the rows as values
:param csv_report: a csv string, delimited with "," and rows split with "
"
:return: The csv as a json array
"""
json_report = []
rows = csv_report.split('\n')
headers = rows[0].split(',')
for row in rows[1:]:
values = row.split(',')
entity = {}
for i in range(len(values)):
if values[i] != 'N/A':
entity[headers[i]] = values[i]
json_report.append(entity)
return json_report
|
nlputils-1.0.7 | nlputils-1.0.7//nlputils/features.pyfile:/nlputils/features.py:function:replace_bigrams/replace_bigrams | def replace_bigrams(textdict, bigrams):
"""
replace bigrams in the texts
Input:
- textdict: a dict with {docid: preprocessed_text}
- bigrams: for bigrams scores
Returns:
- textdict: the same texts but preprocessed and with all bigrams joined as "word1_word2"
"""
docids = set(textdict.keys())
for did in docids:
text = textdict[did]
for bigram in bigrams:
if bigram in text:
text = text.replace(bigram, '%s_%s' % tuple(bigram.split()))
textdict[did] = text
return textdict
|
BarcodeFinder | BarcodeFinder//cli/bf.pyfile:/cli/bf.py:function:get_query_string/get_query_string | def get_query_string(arg):
"""
Based on given options, generate query string from Genbank.
"""
condition = []
if arg.group is not None:
condition.append('{}[filter]'.format(arg.group))
if arg.query is not None:
condition.append(arg.query)
if arg.gene is not None:
if ' ' in arg.gene:
condition.append('"{}"[gene]'.format(arg.gene))
else:
condition.append('{}[gene]'.format(arg.gene))
if arg.molecular is not None:
d = {'DNA': 'biomol_genomic[PROP]', 'RNA': 'biomol_mrna[PROP]'}
condition.append(d[arg.molecular])
if arg.taxon is not None:
condition.append('{}[ORGANISM]'.format(arg.taxon))
if arg.organelle is not None:
if arg.organelle in ('mt', 'mitochondrion'):
condition.append('{mitochondrion}[filter]')
else:
condition.append('(plastid[filter] OR chloroplast[filter])')
if arg.refseq:
condition.append('refseq[filter]')
if len(condition) > 0 and (arg.min_len is not None and arg.max_len is not
None):
condition.append('("{}"[SLEN] : "{}"[SLEN])'.format(arg.min_len,
arg.max_len))
if arg.exclude is not None:
condition.append('NOT ({})'.format(arg.exclude))
if not condition:
return None
else:
string = ' AND '.join(condition)
string = string.replace('AND NOT', 'NOT')
return string
|
argos | argos//inspector/qtplugins/text.pyclass:TextInspector/axesNames | @classmethod
def axesNames(cls):
""" The names of the axes that this inspector visualizes.
See the parent class documentation for a more detailed explanation.
Returns an empty tuple; the data under inspection is zero-dimensional.
"""
return tuple()
|
jina-0.1.8 | jina-0.1.8//jina/enums.pyclass:BetterEnum/to_yaml | @classmethod
def to_yaml(cls, representer, data):
"""Required by :mod:`ruamel.yaml.constructor` """
return representer.represent_scalar('!' + cls.__name__, str(data))
|
appkernel | appkernel//repository.pyclass:Repository/where | @classmethod
def where(cls, *expressions):
"""
Creates and returns a query object, used for further chaining functions like sorting and pagination;
:param expressions: the query filter expressions used to narrow the result-set
:return: a query object preconfigured with the
:rtype: Query
"""
raise NotImplemented('abstract method')
|
chromewhip-0.3.4 | chromewhip-0.3.4//chromewhip/protocol/console.pyclass:Console/disable | @classmethod
def disable(cls):
"""Disables console domain, prevents further console messages from being reported to the client.
"""
return cls.build_send_payload('disable', {}), None
|
tabfix-0.2.2 | tabfix-0.2.2//tabfix/cmd_walker.pyfile:/tabfix/cmd_walker.py:function:piggify/piggify | def piggify(fspec, target_fspec, opts, data):
"""Sample file processor."""
pass
|
plone.contentrules-2.0.10 | plone.contentrules-2.0.10//plone/contentrules/engine/interfaces.pyclass:IRuleExecutor/__call__ | def __call__(event, bubbled=False, rule_filter=None):
"""Execute all rules applicable in the current context
event is the triggering event. bubbled should be True if the rules
are being executed as part of a bubbling up of events (i.e. this
is a parent of the context where the event was triggered). filter,
if given, is a callable that will be passed each rule in turn and
can vote on whether it should be executed by returning True or
False. It should take the arguments (context, rule, event).
"""
|
numba-0.49.0 | numba-0.49.0//numba/core/inline_closurecall.pyfile:/numba/core/inline_closurecall.py:function:_get_all_scopes/_get_all_scopes | def _get_all_scopes(blocks):
"""Get all block-local scopes from an IR.
"""
all_scopes = []
for label, block in blocks.items():
if not block.scope in all_scopes:
all_scopes.append(block.scope)
return all_scopes
|
apwgsdk-1.0 | apwgsdk-1.0//versioneer.pyfile:/versioneer.py:function:render_git_describe/render_git_describe | def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
return rendered
|
c4ddev-1.7.1 | c4ddev-1.7.1//lib/c4ddev/pypkg.pyfile:/lib/c4ddev/pypkg.py:function:get_pyfile_pair/get_pyfile_pair | def get_pyfile_pair(filename):
""" Given the filename of a Python source or byte compiled filename,
returns a pair of the source and byte compiled filename. """
if filename.endswith('.py'):
filename = filename[:-3]
elif filename.endswith('.pyc'):
filename = filename[:-4]
else:
raise ValueError('filename does not end with .py or .pyc')
return filename + '.py', filename + '.pyc'
|
datarobot | datarobot//models/rating_table.pyclass:RatingTable/get | @classmethod
def get(cls, project_id, rating_table_id):
"""Retrieve a single rating table
Parameters
----------
project_id : str
The ID of the project the rating table is associated with.
rating_table_id : str
The ID of the rating table
Returns
-------
rating_table : RatingTable
The queried instance
"""
path = 'projects/{}/ratingTables/{}/'.format(project_id, rating_table_id)
rating_table = cls.from_location(path)
return rating_table
|
silva.core.interfaces-3.0.4 | silva.core.interfaces-3.0.4//src/silva/core/interfaces/auth.pyclass:IAccessSecurity/get_minimum_role | def get_minimum_role():
"""Get the minimum role needed to access the content here.
"""
|
sanic-openapi-0.6.1 | sanic-openapi-0.6.1//sanic_openapi/api.pyclass:API/_add_decorators | @classmethod
def _add_decorators(cls, func, get_attribute):
"""
Adds the custom route decorators from the `decorators` class attribute to the route.
Arguments:
func: The decorated request handler function.
get_attribute: Attribute getter function to use.
"""
decorators = get_attribute(cls, 'decorators', None)
if decorators is not None:
for decorator in reversed(decorators):
func = decorator(func)
return func
|
twisted | twisted//internet/interfaces.pyclass:IUNIXDatagramConnectedTransport/getPeer | def getPeer():
"""
Returns L{UNIXAddress}.
"""
|
drawm-0.0.2 | drawm-0.0.2//drawm/common.pyfile:/drawm/common.py:function:is_float/is_float | def is_float(s):
"""Test if a string represents a float."""
try:
float(s)
return True
except ValueError:
return False
|
switch_model | switch_model//utilities.pyfile:/utilities.py:function:pre_solve/pre_solve | def pre_solve(instance, outputs_dir=None):
"""
Call pre-solve function (if present) in all modules used to compose this model.
This function can be used to adjust the instance after it is created and before it is solved.
"""
for module in instance.get_modules():
if hasattr(module, 'pre_solve'):
module.pre_solve(instance)
|
flatlibfr-0.0.1.dev4 | flatlibfr-0.0.1.dev4//flatlibfr/angle.pyfile:/flatlibfr/angle.py:function:_fixSlist/_fixSlist | def _fixSlist(slist):
""" Guarantees that a signed list has exactly four elements. """
slist.extend([0] * (4 - len(slist)))
return slist[:4]
|
luigi-2.8.13 | luigi-2.8.13//luigi/task.pyclass:Task/get_task_family | @classmethod
def get_task_family(cls):
"""
The task family for the given class.
If ``task_namespace`` is not set, then it's simply the name of the
class. Otherwise, ``<task_namespace>.`` is prefixed to the class name.
Note: You normally don't want to override this.
"""
if not cls.get_task_namespace():
return cls.__name__
else:
return '{}.{}'.format(cls.get_task_namespace(), cls.__name__)
|
antares | antares//apps/core/utils/date_utils.pyclass:DateUtils/convert_days_to_time_unit | @classmethod
def convert_days_to_time_unit(cls, days, time_unit=None):
"""
Converts an amount of days to the specified time unit, as the ORM returns differences in days.
"""
from ..models import SystemParameter
from ..constants import TimeUnitType, FieldDataType
from django.utils.translation import ugettext as _
if isinstance(str, time_unit):
time_unit = TimeUnitType.to_enum(time_unit)
if time_unit is None:
time_unit = TimeUnitType.to_enum(SystemParameter.find_one(
'DEFAULT_TIME_UNIT', FieldDataType.STRING, TimeUnitType.HOUR))
if time_unit == TimeUnitType.YEAR:
return days / 365
if time_unit == TimeUnitType.MONTH:
return days / 30
if time_unit == TimeUnitType.DAY:
return days
if time_unit == TimeUnitType.HOUR:
return days * 24
if time_unit == TimeUnitType.MINUTE:
return days * 24 * 60
if time_unit == TimeUnitType.SECOND:
return days * 24 * 60 * 60
raise ValueError(_(__name__ + '.exceptions.invalid_time_unit'))
|
tf_tagger | tf_tagger//utils/extract_entities.pyfile:/utils/extract_entities.py:function:extract_entities/extract_entities | def extract_entities(seq: list, x=None) ->list:
"""Extract entities from a sequences
---
input: ['B', 'I', 'I', 'O', 'B', 'I']
output: [(0, 3, ''), (4, 6, '')]
---
input: ['B-loc', 'I-loc', 'I-loc', 'O', 'B-per', 'I-per']
output: [(0, 3, '-loc'), (4, 6, '-per')]
---
input:
seq=['B-loc', 'I-loc', 'I-loc', 'O', 'B-per', 'I-per']
x='我爱你欧巴桑'
output:
[(0, 3, '-loc', '我爱你'), (4, 6, '-per', '巴桑')]
"""
ret = []
start_ind, start_type = -1, None
for i, tag in enumerate(seq):
if tag.startswith('S'):
if x is not None:
ret.append((i, i + 1, tag[1:], x[i:i + 1]))
else:
ret.append((i, i + 1, tag[1:]))
start_ind, start_type = -1, None
if tag.startswith('B') or tag.startswith('O'):
if start_ind >= 0:
if x is not None:
ret.append((start_ind, i, start_type, x[start_ind:i]))
else:
ret.append((start_ind, i, start_type))
start_ind, start_type = -1, None
if tag.startswith('B'):
start_ind = i
start_type = tag[1:]
if start_ind >= 0:
if x is not None:
ret.append((start_ind, len(seq), start_type, x[start_ind:]))
else:
ret.append((start_ind, len(seq), start_type))
start_ind, start_type = -1, None
return ret
|
pystol-0.5.20 | pystol-0.5.20//pystol/operator.pyfile:/pystol/operator.py:function:watch_for_pystol_timeouts/watch_for_pystol_timeouts | def watch_for_pystol_timeouts(stop):
"""
Watch for action with timeouts.
This method will listen for custom objects
that times out.
"""
while True:
return True
|
uwsgiconf | uwsgiconf//uwsgi_stub.pyfile:/uwsgi_stub.py:function:mule_msg/mule_msg | def mule_msg(message, mule_farm=None):
"""Sends a message to a mule(s)/farm.
:param str|unicode message:
:param mule_farm: Mule ID, or farm name.
:rtype: bool
:raises ValueError: If no mules, or mule ID or farm name is not recognized.
"""
return False
|
addresslib3-0.4.27 | addresslib3-0.4.27//addresslib/validate.pyfile:/addresslib/validate.py:function:preparse_address/preparse_address | def preparse_address(addr_spec):
"""
Preparses email addresses. Used to handle odd behavior by ESPs.
"""
parts = addr_spec.split('@')
if len(parts) < 2:
return None
if parts[1] == 'gmail.com' or parts[1] == 'googlemail.com':
parts[0] = parts[0].replace('.', '')
return parts
|
mongodbforms | mongodbforms//util.pyfile:/util.py:function:with_metaclass/with_metaclass | def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta('NewBase', bases, {})
|
datatable | datatable//utils/misc.pyfile:/utils/misc.py:function:normalize_slice/normalize_slice | def normalize_slice(e, n):
"""
Return the slice tuple normalized for an ``n``-element object.
:param e: a slice object representing a selector
:param n: number of elements in a sequence to which ``e`` is applied
:returns: tuple ``(start, count, step)`` derived from ``e``.
"""
if n == 0:
return 0, 0, 1
step = e.step
if step is None:
step = 1
if step == 0:
start = e.start
count = e.stop
if isinstance(start, int) and isinstance(count, int) and count >= 0:
if start < 0:
start += n
if start < 0:
return 0, 0, 0
return start, count, 0
else:
raise ValueError('Invalid slice %r' % e)
assert isinstance(step, int) and step != 0
if e.start is None:
start = 0 if step > 0 else n - 1
else:
start = e.start
if start < 0:
start += n
if start < 0 and step < 0 or start >= n and step > 0:
return 0, 0, 0
start = min(max(0, start), n - 1)
assert isinstance(start, int
) and 0 <= start < n, 'Invalid start: %r' % start
if e.stop is None:
if step > 0:
count = (n - 1 - start) // step + 1
else:
count = start // -step + 1
else:
stop = e.stop
if stop < 0:
stop += n
if step > 0:
if stop > start:
count = (min(n, stop) - 1 - start) // step + 1
else:
count = 0
elif stop < start:
count = (start - max(stop, -1) - 1) // -step + 1
else:
count = 0
assert isinstance(count, int) and count >= 0
assert count == 0 or 0 <= start + step * (count - 1
) < n, 'Wrong tuple: (%d, %d, %d)' % (start, count, step)
return start, count, step
|
yapdfminer-1.2.2 | yapdfminer-1.2.2//pdfminer/utils.pyfile:/pdfminer/utils.py:function:drange/drange | def drange(v0, v1, d):
"""Returns a discrete range."""
assert v0 < v1, str((v0, v1, d))
return list(range(int(v0) // d, int(v1 + d) // d))
|
kiwis-pie-0.10.0 | kiwis-pie-0.10.0//versioneer.pyfile:/versioneer.py:function:scan_setup_py/scan_setup_py | def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open('setup.py', 'r') as f:
for line in f.readlines():
if 'import versioneer' in line:
found.add('import')
if 'versioneer.get_cmdclass()' in line:
found.add('cmdclass')
if 'versioneer.get_version()' in line:
found.add('get_version')
if 'versioneer.VCS' in line:
setters = True
if 'versioneer.versionfile_source' in line:
setters = True
if len(found) != 3:
print('')
print('Your setup.py appears to be missing some important items')
print('(but I might be wrong). Please make sure it has something')
print('roughly like the following:')
print('')
print(' import versioneer')
print(' setup( version=versioneer.get_version(),')
print(' cmdclass=versioneer.get_cmdclass(), ...)')
print('')
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print('now lives in setup.cfg, and should be removed from setup.py')
print('')
errors += 1
return errors
|
silva.core.references-3.0.4 | silva.core.references-3.0.4//src/silva/core/references/interfaces.pyclass:IReferenceService/delete_references | def delete_references(content, name=None):
"""Lookup and remove multiple references.
"""
|
subclone-prism-1.0.1 | subclone-prism-1.0.1//src/prism/util.pyfile:/src/prism/util.py:function:jaccard_similarity/jaccard_similarity | def jaccard_similarity(s1, s2):
"""Compute Jaccard similarity of two sets."""
s1 = set(s1)
s2 = set(s2)
return len(s1 & s2) / len(s1 | s2)
|
espei-0.7.7 | espei-0.7.7//versioneer.pyfile:/versioneer.py:function:render_pep440_old/render_pep440_old | def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
return rendered
|
blmath-1.6.3 | blmath-1.6.3//blmath/numerics/numpy_ext.pyfile:/blmath/numerics/numpy_ext.py:function:np_reshape_safe/np_reshape_safe | def np_reshape_safe(A, shape, just_warn=False):
"""
ndarray.reshape has the potential to create a copy. This will will raise an exception or issue
a warning if a copy would be made.
"""
import warnings
res = A.view()
if just_warn:
try:
res.shape = shape
except AttributeError:
warnings.warn(
'Reshape made a copy rather than just returning a view',
stacklevel=2)
res = A.reshape(shape)
else:
res.shape = shape
return res
|
fake-bpy-module-2.78-20200428 | fake-bpy-module-2.78-20200428//bpy/ops/object.pyfile:/bpy/ops/object.py:function:hide_view_clear/hide_view_clear | def hide_view_clear():
"""Reveal the object by setting the hide flag
"""
pass
|
mars | mars//tensor/einsum/einsumfunc.pyfile:/tensor/einsum/einsumfunc.py:function:_find_contraction/_find_contraction | def _find_contraction(positions, input_sets, output_set):
"""
Finds the contraction for a given set of input and output sets.
Parameters
----------
positions : iterable
Integer positions of terms used in the contraction.
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
Returns
-------
new_result : set
The indices of the resulting contraction
remaining : list
List of sets that have not been contracted, the new set is appended to
the end of this list
idx_removed : set
Indices removed from the entire contraction
idx_contraction : set
The indices used in the current contraction
Examples
--------
# A simple dot product test case
>>> pos = (0, 1)
>>> isets = [set('ab'), set('bc')]
>>> oset = set('ac')
>>> _find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
# A more complex case with additional terms in the contraction
>>> pos = (0, 2)
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('ac')
>>> _find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
"""
idx_contract = set()
idx_remain = output_set.copy()
remaining = []
for ind, value in enumerate(input_sets):
if ind in positions:
idx_contract |= value
else:
remaining.append(value)
idx_remain |= value
new_result = idx_remain & idx_contract
idx_removed = idx_contract - new_result
remaining.append(new_result)
return new_result, remaining, idx_removed, idx_contract
|
rtv | rtv//terminal.pyclass:Terminal/add_space | @staticmethod
def add_space(window):
"""
Shortcut for adding a single space to a window at the current position
"""
row, col = window.getyx()
_, max_cols = window.getmaxyx()
n_cols = max_cols - col - 1
if n_cols <= 0:
return
window.addstr(row, col, ' ')
|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/opsworks.pyfile:/pyboto3/opsworks.py:function:deregister_volume/deregister_volume | def deregister_volume(VolumeId=None):
"""
Deregisters an Amazon EBS volume. The volume can then be registered by another stack. For more information, see Resource Management .
See also: AWS API Documentation
:example: response = client.deregister_volume(
VolumeId='string'
)
:type VolumeId: string
:param VolumeId: [REQUIRED]
The AWS OpsWorks Stacks volume ID, which is the GUID that AWS OpsWorks Stacks assigned to the instance when you registered the volume with the stack, not the Amazon EC2 volume ID.
"""
pass
|
simple_audit | simple_audit//m2m_audit.pyfile:/m2m_audit.py:function:m2m_clean_unchanged_fields/m2m_clean_unchanged_fields | def m2m_clean_unchanged_fields(dict_diff):
"""
returns a list of dicts with only the changes
"""
dict_list = []
for key in sorted(dict_diff):
new_dict = {}
dict_ = dict_diff.get(key)
for value in sorted(dict_):
compound_key = '%s.%s' % (key, value)
if dict_[value][0] == dict_[value][1]:
del dict_[value]
else:
new_dict[compound_key] = dict_[value]
del dict_diff[key]
if new_dict:
dict_list.append(new_dict)
return dict_list
|
pytypes | pytypes//util.pyfile:/util.py:function:fromargskw/fromargskw | def fromargskw(argskw, argspecs, slf_or_clsm=False):
"""Turns a linearized list of args into (args, keywords) form
according to given argspecs (like inspect module provides).
"""
res_args = argskw
try:
kwds = argspecs.keywords
except AttributeError:
kwds = argspecs.varkw
if not kwds is None:
res_kw = argskw[-1]
res_args = argskw[:-1]
else:
res_kw = None
if not argspecs.varargs is None:
vargs_pos = len(argspecs.args) - 1 if slf_or_clsm else len(argspecs
.args)
if vargs_pos > 0:
res_lst = list(argskw[:vargs_pos])
res_lst.extend(argskw[vargs_pos])
res_args = tuple(res_lst)
else:
res_args = argskw[0]
try:
if len(argspecs.kwonlyargs) > 0:
res_kw = {} if res_kw is None else dict(res_kw)
ipos = -len(argspecs.kwonlyargs) - (0 if kwds is None else 1)
for name in argspecs.kwonlyargs:
res_kw[name] = argskw[ipos]
ipos += 1
except AttributeError:
pass
if res_kw is None:
res_kw = {}
return res_args, res_kw
|
fluids-0.1.78 | fluids-0.1.78//fluids/core.pyfile:/fluids/core.py:function:Confinement/Confinement | def Confinement(D, rhol, rhog, sigma, g=g):
"""Calculates Confinement number or `Co` for a fluid in a channel of
diameter `D` with liquid and gas densities `rhol` and `rhog` and surface
tension `sigma`, under the influence of gravitational force `g`.
.. math::
\\text{Co}=\\frac{\\left[\\frac{\\sigma}{g(\\rho_l-\\rho_g)}\\right]^{0.5}}{D}
Parameters
----------
D : float
Diameter of channel, [m]
rhol : float
Density of liquid phase, [kg/m^3]
rhog : float
Density of gas phase, [kg/m^3]
sigma : float
Surface tension between liquid-gas phase, [N/m]
g : float, optional
Acceleration due to gravity, [m/s^2]
Returns
-------
Co : float
Confinement number [-]
Notes
-----
Used in two-phase pressure drop and heat transfer correlations. First used
in [1]_ according to [3]_.
.. math::
\\text{Co} = \\frac{\\frac{\\text{surface tension force}}
{\\text{buoyancy force}}}{\\text{Channel area}}
Examples
--------
>>> Confinement(0.001, 1077, 76.5, 4.27E-3)
0.6596978265315191
References
----------
.. [1] Cornwell, Keith, and Peter A. Kew. "Boiling in Small Parallel
Channels." In Energy Efficiency in Process Technology, edited by Dr P.
A. Pilavachi, 624-638. Springer Netherlands, 1993.
doi:10.1007/978-94-011-1454-7_56.
.. [2] Kandlikar, Satish G. Heat Transfer and Fluid Flow in Minichannels
and Microchannels. Elsevier, 2006.
.. [3] Tran, T. N, M. -C Chyu, M. W Wambsganss, and D. M France. Two-Phase
Pressure Drop of Refrigerants during Flow Boiling in Small Channels: An
Experimental Investigation and Correlation Development." International
Journal of Multiphase Flow 26, no. 11 (November 1, 2000): 1739-54.
doi:10.1016/S0301-9322(99)00119-6.
"""
return (sigma / (g * (rhol - rhog))) ** 0.5 / D
|
openstack-heat-13.0.1 | openstack-heat-13.0.1//heat/engine/api.pyfile:/heat/engine/api.py:function:_parse_object_status/_parse_object_status | def _parse_object_status(status):
"""Parse input status into action and status if possible.
This function parses a given string (or list of strings) and see if it
contains the action part. The action part is exacted if found.
:param status: A string or a list of strings where each string contains
a status to be checked.
:returns: (actions, statuses) tuple, where actions is a set of actions
extracted from the input status and statuses is a set of pure
object status.
"""
if not isinstance(status, list):
status = [status]
status_set = set()
action_set = set()
for val in status:
for s in ('COMPLETE', 'FAILED', 'IN_PROGRESS'):
index = val.rfind(s)
if index != -1:
status_set.add(val[index:])
if index > 1:
action_set.add(val[:index - 1])
break
return action_set, status_set
|
glue-vispy-viewers-0.12.2 | glue-vispy-viewers-0.12.2//glue_vispy_viewers/extern/vispy/app/backends/_template.pyfile:/glue_vispy_viewers/extern/vispy/app/backends/_template.py:function:_set_config/_set_config | def _set_config(c):
"""Set gl configuration for template"""
raise NotImplementedError
|
faux | faux//client.pyfile:/client.py:function:cache/cache | def cache(path):
"""
Method for setting options during request session.
Example:
>>> from faux import requests
>>> requests.cache('/path/to/local/cache')
>>> requests.get('http://localhost')
"""
global CACHE
if not path:
raise AssertionError(
'Invalid path specified for cache directory ("{}")!'.format(path))
CACHE = path
return
|
pactools-0.2.0b0 | pactools-0.2.0b0//pactools/mne_api.pyfile:/pactools/mne_api.py:function:_check_mne/_check_mne | def _check_mne(name):
"""Helper to check if h5py is installed"""
try:
import mne
except ImportError:
raise ImportError('Please install MNE-python to use %s.' % name)
return mne
|
aeolis | aeolis//utils.pyfile:/utils.py:function:isarray/isarray | def isarray(x):
"""Check if variable is an array"""
if isinstance(x, str):
return False
if hasattr(x, '__getitem__'):
return True
else:
return False
|
csirtg_dnsdb-0.0.0a5 | csirtg_dnsdb-0.0.0a5//versioneer.pyfile:/versioneer.py:function:render_pep440_pre/render_pep440_pre | def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '.post.dev%d' % pieces['distance']
else:
rendered = '0.post.dev%d' % pieces['distance']
return rendered
|
boiler | boiler//bootstrap.pyfile:/bootstrap.py:function:add_navigation/add_navigation | def add_navigation(app):
""" Add navigation to app """
from boiler.feature.navigation import navigation_feature
navigation_feature(app)
|
sanic-to-json-0.1.9 | sanic-to-json-0.1.9//sanic_to_json/INI_helpers.pyfile:/sanic_to_json/INI_helpers.py:function:extract_ini_from_doc/extract_ini_from_doc | def extract_ini_from_doc(doc):
"""Extracts INI from doc strings."""
return doc.rsplit('INI')[-1]
|
cosmic-popsynth-3.2.0 | cosmic-popsynth-3.2.0//cosmic/MC_samp.pyfile:/cosmic/MC_samp.py:function:select_component_mass/select_component_mass | def select_component_mass(gx_component):
"""Select the Galactic component mass according to
McMillan (2011)
Parameters
----------
gx_component : str
Choose from: 'ThinDisk', 'Bulge', 'ThickDisk'
Returns
-------
gx_component_mass : float
Galactic component mass [Msun]
"""
if gx_component == 'ThinDisk':
gx_component_mass = 43200000000.0
elif gx_component == 'Bulge':
gx_component_mass = 8900000000.0
elif gx_component == 'ThickDisk':
gx_component_mass = 14400000000.0
return gx_component_mass
|
pyote-3.5.1 | pyote-3.5.1//src/pyoteapp/csvreader.pyfile:/src/pyoteapp/csvreader.py:function:roteParser/roteParser | def roteParser(line, frame, time, value, ref1, ref2, ref3, extra):
"""
R-OTE sample line ---
1.00,[17:25:39.3415],2737.8,3897.32,675.3,892.12
"""
part = line.split(',')
frame.append(part[0])
time.append(part[1])
value.append(part[2])
if len(part) >= 4:
if part[3]:
ref1.append(part[3])
if len(part) >= 5:
if part[4]:
ref2.append(part[4])
if len(part) >= 6:
if part[5]:
ref3.append(part[5])
|
fake-blender-api-2.79-0.3.1 | fake-blender-api-2.79-0.3.1//bpy/ops/sequencer.pyfile:/bpy/ops/sequencer.py:function:meta_separate/meta_separate | def meta_separate():
"""Put the contents of a metastrip back in the sequencer
"""
pass
|
ludwig-0.2.2.4 | ludwig-0.2.2.4//ludwig/utils/h3_util.pyfile:/ludwig/utils/h3_util.py:function:set_bit/set_bit | def set_bit(v, index, x):
"""Set the index:th bit of v to 1 if x is truthy, else to 0, and return the new value."""
mask = 1 << index
v &= ~mask
if x:
v |= mask
return v
|
django-ra-0.1.1 | django-ra-0.1.1//ra/reporting/views.pyclass:ReportView/get_report_model | @classmethod
def get_report_model(cls):
"""
Problem: During tests, override settings is used, making the report model always returning the model
'first to be found' not the potentially swapped one ,raising an error. so , it is advised to use this method instead
of declaring the report model on the module level.
:return: the Model to use
"""
return cls.report_model
|
stackinawsgi-0.1 | stackinawsgi-0.1//stackinawsgi/wsgi/app.pyclass:App/response_for_status | def response_for_status(cls, status):
"""
Generate a status string for the status code
:param int status: the status code to look-up
:returns: string for the value or an appropriate Unknown value
"""
if status in cls.status_values:
return cls.status_values[status]
elif status >= 100 and status < 200:
return 'Unknown Informational Status'
elif status >= 200 and status < 300:
return 'Unknown Success Status'
elif status >= 300 and status < 400:
return 'Unknown Redirection Status'
elif status >= 400 and status < 500:
return 'Unknown Client Error'
elif status >= 500 and status < 600:
return 'Unknown Server Error'
else:
return 'Unknown Status'
|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/apigateway.pyfile:/pyboto3/apigateway.py:function:get_resource/get_resource | def get_resource(restApiId=None, resourceId=None, embed=None):
"""
Lists information about a resource.
See also: AWS API Documentation
:example: response = client.get_resource(
restApiId='string',
resourceId='string',
embed=[
'string',
]
)
:type restApiId: string
:param restApiId: [REQUIRED]
The RestApi identifier for the resource.
:type resourceId: string
:param resourceId: [REQUIRED]
The identifier for the Resource resource.
:type embed: list
:param embed: A query parameter to retrieve the specified resources embedded in the returned Resource representation in the response. This embed parameter value is a list of comma-separated strings. Currently, the request supports only retrieval of the embedded Method resources this way. The query parameter value must be a single-valued list and contain the 'methods' string. For example, GET /restapis/{restapi_id}/resources/{resource_id}?embed=methods .
(string) --
:rtype: dict
:return: {
'id': 'string',
'parentId': 'string',
'pathPart': 'string',
'path': 'string',
'resourceMethods': {
'string': {
'httpMethod': 'string',
'authorizationType': 'string',
'authorizerId': 'string',
'apiKeyRequired': True|False,
'requestValidatorId': 'string',
'operationName': 'string',
'requestParameters': {
'string': True|False
},
'requestModels': {
'string': 'string'
},
'methodResponses': {
'string': {
'statusCode': 'string',
'responseParameters': {
'string': True|False
},
'responseModels': {
'string': 'string'
}
}
},
'methodIntegration': {
'type': 'HTTP'|'AWS'|'MOCK'|'HTTP_PROXY'|'AWS_PROXY',
'httpMethod': 'string',
'uri': 'string',
'credentials': 'string',
'requestParameters': {
'string': 'string'
},
'requestTemplates': {
'string': 'string'
},
'passthroughBehavior': 'string',
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT',
'cacheNamespace': 'string',
'cacheKeyParameters': [
'string',
],
'integrationResponses': {
'string': {
'statusCode': 'string',
'selectionPattern': 'string',
'responseParameters': {
'string': 'string'
},
'responseTemplates': {
'string': 'string'
},
'contentHandling': 'CONVERT_TO_BINARY'|'CONVERT_TO_TEXT'
}
}
}
}
}
}
:returns:
(string) --
(boolean) --
"""
pass
|
barman-2.10 | barman-2.10//barman/output.pyfile:/barman/output.py:function:_format_message/_format_message | def _format_message(message, args):
"""
Format a message using the args list. The result will be equivalent to
message % args
If args list contains a dictionary as its only element the result will be
message % args[0]
:param str message: the template string to be formatted
:param tuple args: a list of arguments
:return: the formatted message
:rtype: str
"""
if len(args) == 1 and isinstance(args[0], dict):
return message % args[0]
elif len(args) > 0:
return message % args
else:
return message
|
gl-enhancer-1.0.4 | gl-enhancer-1.0.4//glenhancer/glredis.pyfile:/glenhancer/glredis.py:function:get_project_milestones/get_project_milestones | def get_project_milestones(gl, project_id):
""" Get Project's Milestones
:param gl: GitLab Object Instance
:param project_id: Project Identifier (int)
:return: Milestones (List)
"""
return False
|
oasislmf-1.7.0.data | oasislmf-1.7.0.data//purelib/oasislmf/model_execution/bash.pyfile:/purelib/oasislmf/model_execution/bash.py:function:get_getmodel_itm_cmd/get_getmodel_itm_cmd | def get_getmodel_itm_cmd(number_of_samples, gul_threshold,
use_random_number_file, gul_alloc_rule, item_output, process_id,
max_process_id, correlated_output, **kwargs):
"""
Gets the getmodel ktools command (3.1.0+) Gulcalc item stream
:param number_of_samples: The number of samples to run
:type number_of_samples: int
:param gul_threshold: The GUL threshold to use
:type gul_threshold: float
:param use_random_number_file: flag to use the random number file
:type use_random_number_file: bool
:param gul_alloc_rule: back allocation rule for gulcalc
:type gul_alloc_rule: int
:param item_output: The item output
:type item_output: str
:return: The generated getmodel command
"""
cmd = 'eve {0} {1} | getmodel | gulcalc -S{2} -L{3}'.format(process_id,
max_process_id, number_of_samples, gul_threshold)
if use_random_number_file:
cmd = '{} -r'.format(cmd)
if correlated_output != '':
cmd = '{} -j {}'.format(cmd, correlated_output)
cmd = '{} -a{} -i {}'.format(cmd, gul_alloc_rule, item_output)
return cmd
|
cea | cea//plots/demand/comfort_chart.pyfile:/plots/demand/comfort_chart.py:function:p_w_from_rh_p_and_ws/p_w_from_rh_p_and_ws | def p_w_from_rh_p_and_ws(rh, p_ws):
"""
Calculate water vapor pressure from relative humidity and water vapor saturation pressure
Eq(6) in "CHAPTER 6 - PSYCHROMETRICS" in "2001 ASHRAE Fundamentals Handbook (SI)"
:param rh: relative humidity [-]
:type rh: double
:param p_ws: water vapor saturation pressure [Pa]
:type p_ws: double
:return: water vapor pressure [Pa]
:rtype: double
"""
return rh * p_ws
|
dropbox | dropbox//team_log.pyclass:EventDetails/team_merge_request_canceled_shown_to_primary_team_details | @classmethod
def team_merge_request_canceled_shown_to_primary_team_details(cls, val):
"""
Create an instance of this class set to the
``team_merge_request_canceled_shown_to_primary_team_details`` tag with
value ``val``.
:param TeamMergeRequestCanceledShownToPrimaryTeamDetails val:
:rtype: EventDetails
"""
return cls('team_merge_request_canceled_shown_to_primary_team_details', val
)
|
nmrglue-0.7 | nmrglue-0.7//nmrglue/process/proc_base.pyfile:/nmrglue/process/proc_base.py:function:neg_edges/neg_edges | def neg_edges(data):
"""
Negate edge half (non-middle) of spectra.
"""
data[(...), :int(data.shape[-1] * 1.0 / 4)] = -data[(...), :int(data.
shape[-1] * 1.0 / 4)]
data[(...), int(data.shape[-1] * 3.0 / 4):] = -data[(...), int(data.
shape[-1] * 3.0 / 4):]
return data
|
orthopy | orthopy//line_segment/jacobi_relations.pyfile:/line_segment/jacobi_relations.py:function:_n3/_n3 | def _n3(n, a, b, x):
"""Recurrence coefficients for n-1, n, n+11 (a, b all equal).
"""
return {(n - 1, a, b): +2 * (n + a) * (n + b) * (2 * n + a + b + 2), (n,
a, b): -(2 * n + a + b + 1) * (a ** 2 - b ** 2 + x * (2 * n + a + b
) * (2 * n + a + b + 2)), (n + 1, a, b): +2 * (n + 1) * (n + a + b +
1) * (2 * n + a + b)}
|
the-useful-tools-0.4 | the-useful-tools-0.4//utils/file.pyfile:/utils/file.py:function:create_file_with_license_notice/create_file_with_license_notice | def create_file_with_license_notice(path: str, file_extension: str) ->None:
"""utility function to create a new file with license notice included"""
with open('LICENSE_NOTICE', 'r') as file:
license_notice: str = file.read()
if 'c' == file_extension or 'cpp' == file_extension:
from data.new_file.cpp import FILE_CONTENT
elif 'java' == file_extension:
from data.new_file.java import FILE_CONTENT
elif 'js' == file_extension:
from data.new_file.js import FILE_CONTENT
else:
from data.new_file.py import FILE_CONTENT
FILE_CONTENT = FILE_CONTENT.replace('[NOTICE]', license_notice)
with open(path, 'w') as file:
file.write(FILE_CONTENT)
|
everest | everest//missions/kepler/kepler.pyfile:/missions/kepler/kepler.py:function:GetTargetCBVs/GetTargetCBVs | def GetTargetCBVs(model):
"""
Returns the design matrix of CBVs for the given target.
:param model: An instance of the :py:obj:`everest` model for the target
"""
raise NotImplementedError('This mission is not yet supported.')
|
redpipe | redpipe//fields.pyclass:BooleanField/decode | @classmethod
def decode(cls, value):
"""
convert from redis bytes into a boolean value
:param value: bytes
:return: bool
"""
return None if value is None else bool(value)
|
pyWireGuard_proto-2019.12.0 | pyWireGuard_proto-2019.12.0//pywireguard/peer.pyclass:Peer/import_ | @classmethod
def import_(cls, config):
"""
Imports peer from string.
:param config:
:return:
"""
instnace = cls()
for line in config:
line = line.strip()
setattr(instnace, line.split(':')[0].replace(' ', '_'), line.split(
':')[1])
return instnace
|
eli5-0.10.1 | eli5-0.10.1//eli5/sklearn/utils.pyfile:/eli5/sklearn/utils.py:function:is_multitarget_regressor/is_multitarget_regressor | def is_multitarget_regressor(clf):
"""
Return True if a regressor is multitarget
or False if it predicts a single target.
"""
return len(clf.coef_.shape) > 1 and clf.coef_.shape[0] > 1
|
geoana | geoana//spatial.pyfile:/spatial.py:function:vector_dot/vector_dot | def vector_dot(xyz, vector):
"""
Take a dot product between an array of vectors, xyz and a vector [x, y, z]
**Required**
:param numpy.ndarray xyz: grid (npoints x 3)
:param numpy.ndarray vector: vector (1 x 3)
**Returns**
:returns: dot product between the grid and the (1 x 3) vector, returns an
(npoints x 1) array
:rtype: numpy.ndarray
"""
if len(vector) != 3:
raise Exception('vector should be length 3, the provided length is {}'
.format(len(vector)))
return vector[0] * xyz[:, (0)] + vector[1] * xyz[:, (1)] + vector[2] * xyz[
:, (2)]
|
pixelsorter-1.2 | pixelsorter-1.2//pixelsorter/keys.pyfile:/pixelsorter/keys.py:function:luma/luma | def luma(p):
"""
Returns brightness of a pixel, based on relative luminance
This is based on ITU-R BT.709. Note that human eyes are most sensitive to green light.
:param p: A tuple of (R,G,B) values
:return: The relative luminance of a pixel (in the range [0, 255]).
"""
return 0.2126 * p[0] + 0.7152 * p[1] + 0.0722 * p[2]
|
python-unsplash-1.0.1 | python-unsplash-1.0.1//unsplash/models.pyclass:Model/parse | @classmethod
def parse(cls, data):
"""Parse a JSON object into a model instance."""
raise NotImplementedError
|
baseband-3.1.1 | baseband-3.1.1//baseband/guppi/header.pyclass:GUPPIHeader/fromkeys | @classmethod
def fromkeys(cls, *args, verify=True, mutable=True, **kwargs):
"""Initialise a header from keyword values.
Like fromvalues, but without any interpretation of keywords.
Note that this just passes kwargs to the class initializer as a dict
(for compatibility with fits.Header). It is present for compatibility
with other header classes only.
"""
return cls(kwargs, *args, verify=verify, mutable=mutable)
|
codeviking.math-0.20.1 | codeviking.math-0.20.1//src/codeviking/math/ease.pyfile:/src/codeviking/math/ease.py:function:in_p3/in_p3 | def in_p3(t: float) ->float:
"""
cubic ease-in function
:param t: input value in range [0,1]
"""
return t * t * t
|
pydevd-pycharm-201.7223.92 | pydevd-pycharm-201.7223.92//pydevd_attach_to_process/winappdbg/util.pyclass:MemoryAddresses/get_buffer_size_in_pages | @classmethod
def get_buffer_size_in_pages(cls, address, size):
"""
Get the number of pages in use by the given buffer.
@type address: int
@param address: Aligned memory address.
@type size: int
@param size: Buffer size.
@rtype: int
@return: Buffer size in number of pages.
"""
if size < 0:
size = -size
address = address - size
begin, end = cls.align_address_range(address, address + size)
return int(float(end - begin) / float(cls.pageSize))
|
mbed-devices-1.0.0 | mbed-devices-1.0.0//mbed_devices/_internal/darwin/device_detector.pyfile:/mbed_devices/_internal/darwin/device_detector.py:function:_format_vendor_id/_format_vendor_id | def _format_vendor_id(vendor_id: str) ->str:
"""Strips vendor name from vendor_id field.
Example:
>>> _format_vendor_id("0x1234 (Nice Vendor Inc.)") # "0x1234"
"""
return vendor_id.split(maxsplit=1)[0]
|
spynl | spynl//main/endpoints.pyfile:/main/endpoints.py:function:request_echo/request_echo | def request_echo(request):
"""return request args (see utils.get_args) - for testing"""
return request.args
|
dropbox | dropbox//users.pyclass:PaperAsFilesValue/enabled | @classmethod
def enabled(cls, val):
"""
Create an instance of this class set to the ``enabled`` tag with value
``val``.
:param bool val:
:rtype: PaperAsFilesValue
"""
return cls('enabled', val)
|
servconn-1.3.3 | servconn-1.3.3//servconn/socket_connector.pyclass:SocketConnector/send_to | @classmethod
def send_to(cls, host, port, data, **kwargs):
"""
Initializes a SocketConnector and sends the packet. Deletes the SocketConnector
afterwards. Alias for:
SocketConnector(host, port, **kwargs).send(data)
"""
return cls(host, port, **kwargs).send(data)
|
zc.extrinsicreference-0.3.0 | zc.extrinsicreference-0.3.0//src/zc/extrinsicreference/interfaces.pyclass:IExtrinsicReferences/update | def update(obj, values):
"""For given object, add all values in iterable values.
Object and each value must be adaptable to IKeyReference. Identical
values (as determined by IKeyReference) are collapsed to a single
instance (so, for instance, a set of [A, B, B, C, B] will be
collapsed to a logical set of A, B, C).
"""
|
bang-0.18 | bang-0.18//bang/util.pyfile:/bang/util.py:function:bump_version_tail/bump_version_tail | def bump_version_tail(oldver):
"""
Takes any dot-separated version string and increments the rightmost field
(which it expects to be an integer).
"""
head, tail = oldver.rsplit('.', 1)
return '%s.%d' % (head, int(tail) + 1)
|
som-0.2.3 | som-0.2.3//som/utils.pyfile:/som/utils.py:function:get_listfirst/get_listfirst | def get_listfirst(item, group):
"""return the first found in a list (group) from a
dictionary item.
"""
if not isinstance(group, list):
group = [group]
for contender in group:
if contender in item:
return item[contender]
return None
|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/cloudhsm.pyfile:/pyboto3/cloudhsm.py:function:describe_hapg/describe_hapg | def describe_hapg(HapgArn=None):
"""
Retrieves information about a high-availability partition group.
See also: AWS API Documentation
:example: response = client.describe_hapg(
HapgArn='string'
)
:type HapgArn: string
:param HapgArn: [REQUIRED]
The ARN of the high-availability partition group to describe.
:rtype: dict
:return: {
'HapgArn': 'string',
'HapgSerial': 'string',
'HsmsLastActionFailed': [
'string',
],
'HsmsPendingDeletion': [
'string',
],
'HsmsPendingRegistration': [
'string',
],
'Label': 'string',
'LastModifiedTimestamp': 'string',
'PartitionSerialList': [
'string',
],
'State': 'READY'|'UPDATING'|'DEGRADED'
}
"""
pass
|