repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
wmcore-1.1.19.2 | wmcore-1.1.19.2//src/python/WMCore/WMSpec/WMWorkloadTools.pyfile:/src/python/WMCore/WMSpec/WMWorkloadTools.py:function:_datasetExists/_datasetExists | def _datasetExists(dbsInst, inputData):
"""
__datasetExists_
Check if dataset exists in DBS. Exception is raised in case it does not exist.
"""
if inputData is None:
return
dbsInst.checkDatasetPath(inputData)
return
|
skmultiflow | skmultiflow//data/agrawal_generator.pyclass:AGRAWALGenerator/_classification_function_one | @staticmethod
def _classification_function_one(salary, commission, age, elevel, car,
zipcode, hvalue, hyears, loan):
""" classification_function_one
Parameters
----------
salary: float
Numeric feature: Salary.
commission: float
Numeric feature: Commission.
age: int
Numeric feature: Age.
elevel: int
Categorical feature: Education level.
car: int
Categorical feature: Car maker.
zipcode; int
Categorical feature: Zipcode.
hvalue: flaot
Numeric feature: Value of the house.
hyears: float
Numeric feature: Years house owned.
loan: float
Numeric feature: Total amount of loan.
Returns
-------
int
Returns the sample class label, either 0 or 1.
"""
if age < 40:
return 0 if 50000 <= salary and salary <= 100000 else 1
elif age < 60:
return 0 if 75000 <= salary and salary <= 125000 else 1
else:
return 0 if 25000 <= salary and salary <= 75000 else 1
|
pyFTS | pyFTS//common/FuzzySet.pyfile:/common/FuzzySet.py:function:__binary_search/__binary_search | def __binary_search(x, fuzzy_sets, ordered_sets):
"""
Search for elegible fuzzy sets to fuzzyfy x
:param x: input value to be fuzzyfied
:param fuzzy_sets: a dictionary where the key is the fuzzy set name and the value is the fuzzy set object.
:param ordered_sets: a list with the fuzzy sets names ordered by their centroids.
:return: A list with the best fuzzy sets that may contain x
"""
max_len = len(fuzzy_sets) - 1
first = 0
last = max_len
while first <= last:
midpoint = (first + last) // 2
fs = ordered_sets[midpoint]
fs1 = ordered_sets[midpoint - 1] if midpoint > 0 else ordered_sets[0]
fs2 = ordered_sets[midpoint + 1
] if midpoint < max_len else ordered_sets[max_len]
if fuzzy_sets[fs1].centroid <= fuzzy_sets[fs].transform(x
) <= fuzzy_sets[fs2].centroid:
return midpoint - 1, midpoint, midpoint + 1
elif midpoint <= 1:
return [0]
elif midpoint >= max_len:
return [max_len]
elif fuzzy_sets[fs].transform(x) < fuzzy_sets[fs].centroid:
last = midpoint - 1
else:
first = midpoint + 1
|
jsontableschema_sql | jsontableschema_sql//mappers.pyfile:/mappers.py:function:bucket_to_tablename/bucket_to_tablename | def bucket_to_tablename(prefix, bucket):
"""Convert bucket to SQLAlchemy tablename.
"""
return prefix + bucket
|
zun-4.0.0 | zun-4.0.0//zun/pci/utils.pyfile:/zun/pci/utils.py:function:_get_sysfs_netdev_path/_get_sysfs_netdev_path | def _get_sysfs_netdev_path(pci_addr, pf_interface):
"""Get the sysfs path based on the PCI address of the device.
Assumes a networking device - will not check for the existence of the path.
"""
if pf_interface:
return '/sys/bus/pci/devices/%s/physfn/net' % pci_addr
return '/sys/bus/pci/devices/%s/net' % pci_addr
|
reynir-2.2.0 | reynir-2.2.0//src/reynir/fastparser.pyclass:ParseForestPrinter/print_forest | @classmethod
def print_forest(cls, root_node, detailed=False, file=None, show_scores=
False, show_ids=False, visit_all=True, skip_duplicates=False):
""" Print a parse forest to the given file, or stdout if none """
cls(detailed, file, show_scores, show_ids, visit_all, skip_duplicates=
skip_duplicates).go(root_node)
|
galore-0.6.1 | galore-0.6.1//galore/formats.pyfile:/galore/formats.py:function:is_doscar/is_doscar | def is_doscar(filename):
"""Determine whether file is a DOSCAR by checking fourth line"""
with open(filename, 'r') as f:
for i in range(3):
f.readline()
if f.readline().strip() == 'CAR':
return True
else:
return False
|
selang-0.1.1 | selang-0.1.1//selang/serepr.pyfile:/selang/serepr.py:function:of_object/of_object | def of_object(obj: object, name: str, parent: str, *, content: [str]=()):
"""SpaceEngine script representation of given star or planet."""
return obj.se_repr(name, parent, content=content)
|
marrow | marrow//mongo/core/trait/collection.pyclass:Collection/bind | @classmethod
def bind(cls, target):
"""Bind a copy of the collection to the class, modified per our class' settings.
The given target (and eventual collection returned) must be safe within the context the document sublcass
being bound is constructed within. E.g. at the module scope this binding must be thread-safe.
"""
if cls.__bound__ is not None:
return cls
cls.__bound__ = cls.get_collection(target)
return cls
|
fake-bpy-module-2.78-20200428 | fake-bpy-module-2.78-20200428//bpy/ops/view3d.pyfile:/bpy/ops/view3d.py:function:view_selected/view_selected | def view_selected(use_all_regions: bool=False):
"""Move the view to the selection center
:param use_all_regions: All Regions, View selected for all regions
:type use_all_regions: bool
"""
pass
|
mercurial-5.4 | mercurial-5.4//mercurial/merge.pyfile:/mercurial/merge.py:function:driverconclude/driverconclude | def driverconclude(repo, ms, wctx, labels=None):
"""run the conclude step of the merge driver, if any
This is currently not implemented -- it's an extension point."""
return True
|
gamtools-1.1.1 | gamtools-1.1.1//lib/gamtools/count_tables.pyfile:/lib/gamtools/count_tables.py:function:frequency_to_probability/frequency_to_probability | def frequency_to_probability(counts_table):
"""
Convert a contingency table expressed in frequencies to one
expressed in probabilities.
"""
total = counts_table.sum()
probs_table = counts_table / float(total)
return probs_table
|
cooked_input | cooked_input//get_table.pyfile:/get_table.py:function:return_row_action/return_row_action | def return_row_action(row, action_dict):
"""
Default action function for Tables. This function returns the whole row of data including the tag. Used by
the **TABLE_RETURN_ROW** action.
:param List row: the data associated with the selected row
:param Dict action_dict: the dictionary of values associated with the action - ignored in this function
:return: A list containing all of the data values for the selected row of the table.
:rtype: List
"""
return [row.tag] + row.values
|
ansible-2.9.7 | ansible-2.9.7//lib/ansible/plugins/callback/selective.pyfile:/lib/ansible/plugins/callback/selective.py:function:dict_diff/dict_diff | def dict_diff(prv, nxt):
"""Return a dict of keys that differ with another config object."""
keys = set(prv.keys() + nxt.keys())
result = {}
for k in keys:
if prv.get(k) != nxt.get(k):
result[k] = prv.get(k), nxt.get(k)
return result
|
metpy | metpy//plots/skewt.pyclass:Hodograph/_form_line_args | @staticmethod
def _form_line_args(kwargs):
"""Simplify taking the default line style and extending with kwargs."""
def_args = {'linewidth': 3}
def_args.update(kwargs)
return def_args
|
docutils-0.16 | docutils-0.16//docutils/nodes.pyfile:/docutils/nodes.py:function:fully_normalize_name/fully_normalize_name | def fully_normalize_name(name):
"""Return a case- and whitespace-normalized name."""
return ' '.join(name.lower().split())
|
csa-0.1.12 | csa-0.1.12//csa/geometry.pyfile:/csa/geometry.py:function:grid3d/grid3d | def grid3d(width, xScale=1.0, yScale=1.0, zScale=1.0, x0=0.0, y0=0.0, z0=0.0):
"""Returns a 3D grid between (0, 0, 0) and (1, 1, 1)
:param width: The number of rows/columns the grid has
:type width: int
:param xScale: Scales the grid along the x axis
:type xScale: float
:param yScale: Scales the grid along the y axis
:type yScale: float
:param zScale: Scales the grid along the z axis
:type zScale: float
:param x0: Translates the grid along the x axis
:type xScale: float
:param y0: Translates the grid along the y axis
:type yScale: float
:param z0: Translates the grid along the z axis
:type zScale: float
:return: A callable grid that returns 3d positions when given an index"""
xScale /= width
yScale /= width
zScale /= width
g = lambda i: (x0 + xScale * (i % width), y0 + yScale * (i % (width *
width) / width), z0 + zScale * (i / (width * width)))
g.type = 'grid3d'
g.width = width
g.xScale = xScale
g.yScale = yScale
g.zScale = zScale
g.x0 = x0
g.y0 = y0
g.z0 = z0
g.inverse = lambda x, y, z: int(round(x / xScale - x0)) + width * int(
round(y / yScale - y0) + width * int(round(z / zScale - z0)))
return g
|
retryp-0.3.post9 | retryp-0.3.post9//versioneer.pyfile:/versioneer.py:function:plus_or_dot/plus_or_dot | def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if '+' in pieces.get('closest-tag', ''):
return '.'
return '+'
|
PyFVCOM | PyFVCOM//plot.pyfile:/plot.py:function:colorbar_extension/colorbar_extension | def colorbar_extension(colour_min, colour_max, data_min, data_max):
"""
For the range specified by `colour_min' to `colour_max', return whether the data range specified by `data_min'
and `data_max' is inside, outside or partially overlapping. This allows you to automatically set the `extend'
keyword on a `matplotlib.pyplot.colorbar' call.
Parameters
----------
colour_min, colour_max : float
Minimum and maximum value of the current colour bar limits.
data_min, data_max : float
Minimum and maximum value of the data limits.
Returns
-------
extension : str
Will be 'neither', 'both', 'min, or 'max' for the case when the colour_min and colour_max values are: equal
to the data; inside the data range; only larger or only smaller, respectively.
"""
if data_min < colour_min and data_max > colour_max:
extension = 'both'
elif data_min < colour_min and data_max <= colour_max:
extension = 'min'
elif data_min >= colour_min and data_max > colour_max:
extension = 'max'
else:
extension = 'neither'
return extension
|
peerassets-btcpy-0.6.2 | peerassets-btcpy-0.6.2//btcpy/lib/bech32.pyfile:/btcpy/lib/bech32.py:function:convertbits/convertbits | def convertbits(data, frombits, tobits, pad=True):
"""General power-of-2 base conversion."""
acc = 0
bits = 0
ret = []
maxv = (1 << tobits) - 1
max_acc = (1 << frombits + tobits - 1) - 1
for value in data:
if value < 0 or value >> frombits:
return None
acc = (acc << frombits | value) & max_acc
bits += frombits
while bits >= tobits:
bits -= tobits
ret.append(acc >> bits & maxv)
if pad:
if bits:
ret.append(acc << tobits - bits & maxv)
elif bits >= frombits or acc << tobits - bits & maxv:
return None
return ret
|
reader | reader//viewer.pyfile:/viewer.py:function:show_list/show_list | def show_list(site, titles):
"""Show list of articles"""
print(u'The latest tutorials from {}'.format(site))
for article_id, title in enumerate(titles):
print(u'{:>3} {}'.format(article_id, title))
|
astropy-4.0.1.post1 | astropy-4.0.1.post1//astropy/io/fits/hdu/image.pyclass:_ImageBaseHDU/match_header | @classmethod
def match_header(cls, header):
"""
_ImageBaseHDU is sort of an abstract class for HDUs containing image
data (as opposed to table data) and should never be used directly.
"""
raise NotImplementedError
|
ibmsecurity | ibmsecurity//isam/base/system_alerts/logdb.pyfile:/isam/base/system_alerts/logdb.py:function:get_all/get_all | def get_all(isamAppliance, check_mode=False, force=False):
"""
Get all logdb objects
"""
return isamAppliance.invoke_get('Get all logdb objects',
'/core/rsp_logdb_objs')
|
flow | flow//project.pyclass:FlowProject/_collect_pre_conditions | @classmethod
def _collect_pre_conditions(cls):
"""Collect all pre-conditions that were added via decorator."""
return cls._collect_conditions('_OPERATION_PRE_CONDITIONS')
|
quantarhei-0.0.57 | quantarhei-0.0.57//quantarhei/wizard/examples/ex_300_ParallelIterators.pyfile:/quantarhei/wizard/examples/ex_300_ParallelIterators.py:function:setter/setter | def setter(cont, tag, data):
"""Setter function setting data to a primitive container (here a dictionary)
"""
cont[tag] = data
|
pypowervm-1.1.24 | pypowervm-1.1.24//pypowervm/tasks/client_storage.pyfile:/pypowervm/tasks/client_storage.py:function:udid_to_scsi_mapping/udid_to_scsi_mapping | def udid_to_scsi_mapping(vios_w, udid, lpar_id, ignore_orphan=True):
"""Finds the SCSI mapping (if any) for a given backing storage udid.
This is a helper method that will parse through a given VIOS wrapper
(retrieved with pypowervm.const.XAG.VIO_SMAP) and will find the client
SCSI mapping for a given backing storage element (LU, PV, LV, VOpt).
:param vios_w: The Virtual I/O Server wrapper. Should have the Storage
and SCSI mapping XAG associated with it.
:param udid: The volume's udid.
:param lpar_id: The LPARs 'short' id.
:param ignore_orphan: (Optional, Default: True) If set to True, any orphan
SCSI mappings (those with no client adapter) will be
ignored.
:return: The first matching SCSI mapping (or None).
"""
for scsi_map in vios_w.scsi_mappings:
if not scsi_map.backing_storage:
continue
if not scsi_map.client_adapter and ignore_orphan:
continue
if lpar_id != scsi_map.server_adapter.lpar_id:
continue
if scsi_map.backing_storage.udid == udid:
return scsi_map
return None
|
chevah | chevah//compat/interfaces.pyclass:ILocalFilesystem/iterateFolderContent | def iterateFolderContent(segments):
"""
Return an iterator for the name of each direct child of folder.
"""
|
pytorch-land-0.1.6 | pytorch-land-0.1.6//torchland/utils/spectrogram.pyfile:/torchland/utils/spectrogram.py:function:denormalize_db_spectrogram/denormalize_db_spectrogram | def denormalize_db_spectrogram(spec, high=20.0, low=-100.0):
"""
Denormalizes normalized spectrum into db-spectrum.
Args:
spec: normalized spectrum
high (float): maximum db level
low (float): mean db level
Returns:
denormalized db spectrum
"""
mid = (low + high) / 2.0
scale = high - mid
return spec * scale + mid
|
bpy | bpy//ops/curve.pyfile:/ops/curve.py:function:smooth_tilt/smooth_tilt | def smooth_tilt():
"""Interpolate tilt of selected points
"""
pass
|
jinja2 | jinja2//filters.pyfile:/filters.py:function:contextfilter/contextfilter | def contextfilter(f):
"""Decorator for marking context dependent filters. The current
:class:`Context` will be passed as first argument.
"""
f.contextfilter = True
return f
|
cvxpy | cvxpy//utilities/key_utils.pyfile:/utilities/key_utils.py:function:to_tuple/to_tuple | def to_tuple(key):
"""Convert key to tuple if necessary.
"""
if isinstance(key, tuple):
return key
else:
return key,
|
azure-cli-cosmosdb-0.2.11 | azure-cli-cosmosdb-0.2.11//azure/cli/command_modules/cosmosdb/custom.pyfile:/azure/cli/command_modules/cosmosdb/custom.py:function:cli_cosmosdb_network_rule_list/cli_cosmosdb_network_rule_list | def cli_cosmosdb_network_rule_list(client, resource_group_name, account_name):
""" Lists the virtual network accounts associated with a Cosmos DB account """
cosmos_db_account = client.get(resource_group_name, account_name)
return cosmos_db_account.virtual_network_rules
|
vworker-0.1.9.8 | vworker-0.1.9.8//vworker/mongofun.pyclass:Mongofun/mongo_find_pagenum | @classmethod
def mongo_find_pagenum(cls, collection, skip_num=0, page_size=10,
sort_filter='c', sorttype=-1, find_filter={}, select_filter={}):
""" mgdb find 页码翻页大礼包
[必选]
:collection: collection对象
[可选]
:skip: 跳过数目
:page_size: 每页条目个数 默认:10条
:sort_filter: 排序选项 默认根据 c 排序
:sorttype: 1:正序 -1:倒序
:find_filter: 查找过滤器字典 mongodb格式 例:{'s_id':123456}
:select_filter: 选择过滤器字典 mongodb格式 例:{'or_id':1,'or_num':1}
"""
total = collection().find(find_filter).count()
if select_filter:
cursor = collection().find(find_filter, select_filter).sort(sort_filter
, sorttype).skip(skip_num).limit(page_size)
else:
cursor = collection().find(find_filter).sort(sort_filter, sorttype
).skip(skip_num).limit(page_size)
return cursor, total
|
WS2801_RPI-1.0.2 | WS2801_RPI-1.0.2//WS2801_RPI.pyfile:/WS2801_RPI.py:function:set_number_of_leds/set_number_of_leds | def set_number_of_leds(leds=128):
"""
Define the number of leds on your strip.
This function has to be called if different number than 128!
"""
global __NUMBER_LEDS
__NUMBER_LEDS = leds
global __rgb_leds
__rgb_leds = [0] * __NUMBER_LEDS * 3
|
Products.CurrencyUtility-0.1 | Products.CurrencyUtility-0.1//Products/CurrencyUtility/interfaces.pyclass:ICurrencyUtility/getCurrencySymbol | def getCurrencySymbol(id=None):
"""returns the symbol of the currency"""
|
zag | zag//utils/iter_utils.pyfile:/utils/iter_utils.py:function:countdown_iter/countdown_iter | def countdown_iter(start_at, decr=1):
"""Generator that decrements after each generation until <= zero.
NOTE(harlowja): we can likely remove this when we can use an
``itertools.count`` that takes a step (on py2.6 which we still support
that step parameter does **not** exist and therefore can't be used).
"""
if decr <= 0:
raise ValueError(
'Decrement value must be greater than zero and not %s' % decr)
while start_at > 0:
yield start_at
start_at -= decr
|
gis-0.2.1 | gis-0.2.1//gis.pyfile:/gis.py:function:query/query | def query(args):
"""Query the existing index
"""
import ruido
ruido.query('.index', 'find {} return .')
return '[]'
|
alchemize | alchemize//helpers.pyclass:JsonModel/from_json | @classmethod
def from_json(cls, data, **transmute_options):
"""Creates a new instance of the model from a JSON string."""
return cls.transmute_from(data, **transmute_options)
|
user-messages-0.1.1 | user-messages-0.1.1//user_messages/models.pyclass:Thread/ordered | @classmethod
def ordered(cls, objs):
"""
Returns the iterable ordered the correct way, this is a class method
because we don"t know what the type of the iterable will be.
"""
objs = list(objs)
objs.sort(key=lambda o: o.latest_message.sent_at, reverse=True)
return objs
|
monguo-0.2.2 | monguo-0.2.2//monguo/connection.pyclass:Connection/get_connection | @classmethod
def get_connection(cls, connection_name=None, pymongo=False):
"""
Get a connection, return None if the specified connection hasn't been
created.
:Parameters:
- `connection_name(optional)`: The connection name. If not set it
will return the current connection.
- `pymongo(optional)`: If true it will return an instance of
:class:`~pymongo.MongoClient` or :class:`~pymongo.MongoReplicaSetClient`
otherwise :class:`~motor.MotorClient` or :class:`~motor.MotorReplicaSetClient`.
"""
if connection_name is None:
connection_name = cls._default_connection
for connection in cls._connections:
if connection_name in connection.keys():
return connection[connection_name][1 if pymongo else 0]
return None
|
pytzer | pytzer//parameters.pyfile:/parameters.py:function:lambd_NH3_CH3COO_CB89/lambd_NH3_CH3COO_CB89 | def lambd_NH3_CH3COO_CB89(T, P):
"""n-a: ammonia ethanoate [CB89]."""
lambd = 0.036
valid = T == 298.15
return lambd, valid
|
bigml-4.31.0 | bigml-4.31.0//bigml/tree.pyfile:/bigml/tree.py:function:mean/mean | def mean(distribution):
"""Computes the mean of a distribution in the [[point, instances]] syntax
"""
addition = 0.0
count = 0.0
for point, instances in distribution:
addition += point * instances
count += instances
if count > 0:
return addition / count
return float('nan')
|
PyFMI-2.5 | PyFMI-2.5//src/common/xmlparser.pyfile:/src/common/xmlparser.py:function:_translate_xmlbool/_translate_xmlbool | def _translate_xmlbool(xmlbool):
"""
Helper function which translates strings 'true' and 'false' to bool types.
"""
if xmlbool == 'false':
return False
elif xmlbool == 'true':
return True
else:
raise Exception('The xml boolean ' + str(xmlbool) +
' does not have a valid value.')
|
pyscience | pyscience//math/number.pyfile:/math/number.py:function:is_even/is_even | def is_even(n):
"""Return if ``n`` is a even number"""
return not n % 2
|
pyotf | pyotf//phaseretrieval.pyfile:/phaseretrieval.py:function:_calc_mse/_calc_mse | def _calc_mse(data1, data2):
"""utility to calculate mean square error"""
return ((data1 - data2) ** 2).mean()
|
Diofant-0.11.0 | Diofant-0.11.0//diofant/plotting/plot.pyfile:/diofant/plotting/plot.py:function:_matplotlib_list/_matplotlib_list | def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals.
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start, intervalx.end,
intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end, intervaly.end,
intervaly.start, None])
else:
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist
|
ganessa | ganessa//util.pyfile:/util.py:function:envoi_msg/envoi_msg | def envoi_msg(exp, dst, objet, texte, serveur='smtp.safege.net', pwd=None):
"""Sends a simple message by smtp"""
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
msg = MIMEMultipart()
msg['Subject'] = objet
msg['From'] = exp
if isinstance(dst, (list, tuple)):
msg['To'] = ';'.join(dst)
else:
msg['To'] = dst
msg.preamble = 'SUEZ Consulting - DCS 2019'
msg.attach(MIMEText(texte, 'plain', 'utf-8'))
s = smtplib.SMTP(timeout=3)
try:
s.connect(serveur)
if pwd is not None:
s.starttls()
s.login(exp, pwd)
s.sendmail(exp, dst, msg.as_string())
s.close()
return True
except:
return False
|
gwpy | gwpy//timeseries/core.pyclass:TimeSeriesBase/read | @classmethod
def read(cls, source, *args, **kwargs):
"""Read data into a `TimeSeries`
Arguments and keywords depend on the output format, see the
online documentation for full details for each format, the parameters
below are common to most formats.
Parameters
----------
source : `str`, `list`
Source of data, any of the following:
- `str` path of single data file,
- `str` path of LAL-format cache file,
- `list` of paths.
name : `str`, `~gwpy.detector.Channel`
the name of the channel to read, or a `Channel` object.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS start time of required data, defaults to start of data found;
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, defaults to end of data found;
any input parseable by `~gwpy.time.to_gps` is fine
format : `str`, optional
source format identifier. If not given, the format will be
detected if possible. See below for list of acceptable
formats.
nproc : `int`, optional
number of parallel processes to use, serial process by
default.
pad : `float`, optional
value with which to fill gaps in the source data,
by default gaps will result in a `ValueError`.
Raises
------
IndexError
if ``source`` is an empty list
Notes
-----"""
from .io.core import read as timeseries_reader
return timeseries_reader(cls, source, *args, **kwargs)
|
pdbtools | pdbtools//pdb_delelem.pyfile:/pdb_delelem.py:function:delete_elements/delete_elements | def delete_elements(fhandle, element_set):
"""Removes specific atoms matching the given element(s).
"""
records = 'ATOM', 'HETATM', 'ANISOU'
for line in fhandle:
if line.startswith(records):
if line[76:78].strip() in element_set:
continue
yield line
|
search | search//a_star.pyfile:/a_star.py:function:ordered_node_weighted_astar/ordered_node_weighted_astar | def ordered_node_weighted_astar(weight):
"""
Creates an ordered search node (basically, a tuple containing the node
itself and an ordering) for weighted A* search (order: g+weight*h).
@param weight The weight to be used for h
@param node The node itself
@param h The heuristic value
@param node_tiebreaker An increasing value to prefer the value first
inserted if the ordering is the same
@returns A tuple to be inserted into priority queues
"""
"""
Calling ordered_node_weighted_astar(42) actually returns a function (a
lambda expression) which is the *actual* generator for ordered nodes.
Thus, a call like
ordered_node_weighted_astar(42)(node, heuristic, tiebreaker)
creates an ordered node with weighted A* ordering and a weight of 42.
"""
return lambda node, h, node_tiebreaker: (node.g + weight * h, h,
node_tiebreaker, node)
|
Products.CMFEditions-3.3.4 | Products.CMFEditions-3.3.4//Products/CMFEditions/interfaces/IModifier.pyclass:IModifierRegistrySet/unregister | def unregister(id):
"""Unregisters a before save and after retrieve modifier.
Unregistering can be done by passing the method the id or
the position.
"""
|
pydevd-pycharm-201.7223.92 | pydevd-pycharm-201.7223.92//pydevd_attach_to_process/winappdbg/system.pyclass:System/set_postmortem_debugger | @classmethod
def set_postmortem_debugger(cls, cmdline, auto=None, hotkey=None, bits=None):
"""
Sets the postmortem debugging settings in the Registry.
@warning: This method requires administrative rights.
@see: L{get_postmortem_debugger}
@type cmdline: str
@param cmdline: Command line to the new postmortem debugger.
When the debugger is invoked, the first "%ld" is replaced with the
process ID and the second "%ld" is replaced with the event handle.
Don't forget to enclose the program filename in double quotes if
the path contains spaces.
@type auto: bool
@param auto: Set to C{True} if no user interaction is allowed, C{False}
to prompt a confirmation dialog before attaching.
Use C{None} to leave this value unchanged.
@type hotkey: int
@param hotkey: Virtual key scan code for the user defined hotkey.
Use C{0} to disable the hotkey.
Use C{None} to leave this value unchanged.
@type bits: int
@param bits: Set to C{32} for the 32 bits debugger, or C{64} for the
64 bits debugger. Set to {None} for the default (L{System.bits}).
@rtype: tuple( str, bool, int )
@return: Previously defined command line and auto flag.
@raise WindowsError:
Raises an exception on error.
"""
if bits is None:
bits = cls.bits
elif bits not in (32, 64):
raise NotImplementedError('Unknown architecture (%r bits)' % bits)
if bits == 32 and cls.bits == 64:
keyname = (
'HKLM\\SOFTWARE\\Wow6432Node\\Microsoft\\Windows NT\\CurrentVersion\\AeDebug'
)
else:
keyname = (
'HKLM\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\AeDebug')
key = cls.registry[keyname]
if cmdline is not None:
key['Debugger'] = cmdline
if auto is not None:
key['Auto'] = int(bool(auto))
if hotkey is not None:
key['UserDebuggerHotkey'] = int(hotkey)
|
signapadpy | signapadpy//image.pyfile:/image.py:function:pad/pad | def pad(size, padding):
"""Apply padding to width and height.
:param size: two-tuple of width and height
:param padding: padding to apply to width and height
:returns: two-tuple of width and height with padding applied
"""
width = size[0] + padding.left + padding.right
height = size[1] + padding.top + padding.bottom
return width, height
|
fake-bpy-module-2.80-20200428 | fake-bpy-module-2.80-20200428//bpy/ops/paint.pyfile:/bpy/ops/paint.py:function:brush_colors_flip/brush_colors_flip | def brush_colors_flip():
"""Toggle foreground and background brush colors
"""
pass
|
django-print-settings-0.1.2 | django-print-settings-0.1.2//django_print_settings/management/commands/print_settings.pyclass:Command/print_simple | @staticmethod
def print_simple(a_dict):
"""A very simple output format"""
for key, value in a_dict.items():
print('%-40s = %r' % (key, value))
|
soql-1.0.2 | soql-1.0.2//soql/loaders.pyfile:/soql/loaders.py:function:get_total_count/get_total_count | def get_total_count(data):
"""
Retrieves the total count from a Salesforce SOQL query.
:param dict data: data from the Salesforce API
:rtype: int
"""
return data['totalSize']
|
alignak-2.1.5 | alignak-2.1.5//alignak/util.pyfile:/alignak/util.py:function:format_t_into_dhms_format/format_t_into_dhms_format | def format_t_into_dhms_format(timestamp):
""" Convert an amount of second into day, hour, min and sec
:param timestamp: seconds
:type timestamp: int
:return: 'Ad Bh Cm Ds'
:rtype: str
>>> format_t_into_dhms_format(456189)
'5d 6h 43m 9s'
>>> format_t_into_dhms_format(3600)
'0d 1h 0m 0s'
"""
mins, timestamp = divmod(timestamp, 60)
hour, mins = divmod(mins, 60)
day, hour = divmod(hour, 24)
return '%sd %sh %sm %ss' % (day, hour, mins, timestamp)
|
dynamo3-0.4.10 | dynamo3-0.4.10//dynamo3/fields.pyclass:LocalIndex/keys | @classmethod
def keys(cls, name, range_key):
""" Create an index that projects only key attributes """
return cls(cls.KEYS, name, range_key)
|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/route53.pyfile:/pyboto3/route53.py:function:get_health_check_status/get_health_check_status | def get_health_check_status(HealthCheckId=None):
"""
Gets status of a specified health check.
See also: AWS API Documentation
:example: response = client.get_health_check_status(
HealthCheckId='string'
)
:type HealthCheckId: string
:param HealthCheckId: [REQUIRED]
The ID for the health check that you want the current status for. When you created the health check, CreateHealthCheck returned the ID in the response, in the HealthCheckId element.
Note
If you want to check the status of a calculated health check, you must use the Amazon Route 53 console or the CloudWatch console. You can't use GetHealthCheckStatus to get the status of a calculated health check.
:rtype: dict
:return: {
'HealthCheckObservations': [
{
'Region': 'us-east-1'|'us-west-1'|'us-west-2'|'eu-west-1'|'ap-southeast-1'|'ap-southeast-2'|'ap-northeast-1'|'sa-east-1',
'IPAddress': 'string',
'StatusReport': {
'Status': 'string',
'CheckedTime': datetime(2015, 1, 1)
}
},
]
}
"""
pass
|
gabbi | gabbi//case.pyfile:/case.py:function:_is_complex_type/_is_complex_type | def _is_complex_type(data):
"""If data is a list or dict return True."""
return isinstance(data, list) or isinstance(data, dict)
|
lino-20.5.0 | lino-20.5.0//lino/modlib/users/utils.pyfile:/lino/modlib/users/utils.py:function:set_user_profile/set_user_profile | def set_user_profile(profile):
"""Used in doctests to set a default profile"""
global _for_user_profile
_for_user_profile = profile
|
system_tray | system_tray//system_tray.pyfile:/system_tray.py:function:raise_error/raise_error | def raise_error(error):
"""used to pass errors from a child thread to parent thread"""
print('raising da error')
raise error
|
onc-2.3.5 | onc-2.3.5//onc/modules/_util.pyfile:/onc/modules/_util.py:function:_messageForError/_messageForError | def _messageForError(status: int):
"""
Return a description string for an HTTP error code
"""
errors = {(500): 'Internal server error', (503):
'Service temporarily unavailable', (598): 'Network read timeout error'}
return errors[status]
|
itinerum-tripkit-0.0.22 | itinerum-tripkit-0.0.22//tripkit/process/complete_days/canue/counter.pyfile:/tripkit/process/complete_days/canue/counter.py:function:add_inactivity_periods/add_inactivity_periods | def add_inactivity_periods(daily_summaries):
"""
Iterate over the daily summaries once forwards and once backwards to supply inactivity information
to adjacent days to the start and end dates.
"""
inactive_days, summary_before, first_inactive_day = None, None, None
for date, summary in sorted(daily_summaries.items()):
no_trip_data = not any([summary['has_trips'], summary['is_complete']])
if no_trip_data:
if not inactive_days:
inactive_days = 0
inactive_days += 1
if not first_inactive_day:
first_inactive_day = date
else:
inactive_days = None
first_inactive_day = None
summary['consecutive_inactive_days'] = inactive_days
summary['first_inactive_day'] = first_inactive_day
if summary_before is not None:
summary['before_is_complete'] = summary_before['is_complete'
] is True
else:
summary['before_is_complete'] = False
summary_before = summary
summary_after = None
latest_streak_max = 0
for date, summary in sorted(daily_summaries.items(), reverse=True):
if summary['consecutive_inactive_days']:
latest_streak_max = max(latest_streak_max, summary[
'consecutive_inactive_days'])
summary['inactive_day_streak'] = latest_streak_max
if summary_after is not None:
summary['after_is_complete'] = summary_after['is_complete'] is True
else:
summary['after_is_complete'] = False
summary_after = summary
max_inactivity_streak = latest_streak_max if latest_streak_max else None
for date, summary in daily_summaries.items():
summary['max_inactivity_streak'] = max_inactivity_streak
return daily_summaries
|
ERPpeek-1.7.1 | ERPpeek-1.7.1//erppeek.pyfile:/erppeek.py:function:is_list_of_dict/is_list_of_dict | def is_list_of_dict(iterator):
"""Return True if the first non-false item is a dict."""
for item in iterator:
if item:
return isinstance(item, dict)
return False
|
tutorials | tutorials//ogh_old.pyfile:/ogh_old.py:function:compile_Livneh2013_locations/compile_Livneh2013_locations | def compile_Livneh2013_locations(maptable):
"""
Compile a list of file URLs for Livneh et al. 2013 (CIG)
maptable: (dataframe) a dataframe that contains the FID, LAT, LONG_, and ELEV for each interpolated data file
"""
locations = []
for ind, row in maptable.iterrows():
basename = '_'.join(['data', str(row['LAT']), str(row['LONG_'])])
url = [
'http://www.cses.washington.edu/rocinante/Livneh/Livneh_WWA_2013/forcs_dhsvm/'
, basename]
locations.append(''.join(url))
return locations
|
typped-0.1.3 | typped-0.1.3//src/typped/predefined_token_sets.pyfile:/src/typped/predefined_token_sets.py:function:def_default_whitespace/def_default_whitespace | def def_default_whitespace(parser, space_label='k_space', space_regex=
'[ \\t]+', newline_label='k_newline', newline_regex='[\\n\\f\\r\\v]+',
matcher_options=None):
"""Define the standard whitespace tokens for space and newline, setting
them as ignored tokens."""
tok = parser.def_ignored_token
tok(space_label, space_regex, matcher_options=matcher_options)
tok(newline_label, newline_regex, matcher_options=matcher_options)
|
PyMeeus-0.3.7 | PyMeeus-0.3.7//pymeeus/Epoch.pyclass:Epoch/tt2ut | @staticmethod
def tt2ut(year, month):
"""This method provides an approximation of the difference, in seconds,
between Terrestrial Time and Universal Time, denoted **DeltaT**, where:
DeltaT = TT - UT.
Here we depart from Meeus book and use the polynomial expressions from:
https://eclipse.gsfc.nasa.gov/LEcat5/deltatpoly.html
Which are regarded as more elaborate and precise than Meeus'.
Please note that, by definition, the UTC time used internally in this
Epoch class by default is kept within 0.9 seconds from UT. Therefore,
UTC is in itself a quite good approximation to UT, arguably better than
some of the results provided by this method.
:param year: Year we want to compute DeltaT for.
:type year: int, float
:param month: Month we want to compute DeltaT for.
:type month: int, float
:returns: DeltaT, in seconds
:rtype: float
>>> round(Epoch.tt2ut(1642, 1), 1)
62.1
>>> round(Epoch.tt2ut(1680, 1), 1)
15.3
>>> round(Epoch.tt2ut(1700, 1), 1)
8.8
>>> round(Epoch.tt2ut(1726, 1), 1)
10.9
>>> round(Epoch.tt2ut(1750, 1), 1)
13.4
>>> round(Epoch.tt2ut(1774, 1), 1)
16.7
>>> round(Epoch.tt2ut(1800, 1), 1)
13.7
>>> round(Epoch.tt2ut(1820, 1), 1)
11.9
>>> round(Epoch.tt2ut(1890, 1), 1)
-6.1
>>> round(Epoch.tt2ut(1928, 2), 1)
24.2
>>> round(Epoch.tt2ut(1977, 2), 1)
47.7
>>> round(Epoch.tt2ut(1998, 1), 1)
63.0
>>> round(Epoch.tt2ut(2015, 7), 1)
69.3
"""
y = year + (month - 0.5) / 12.0
if year < -500:
u = (year - 1820.0) / 100.0
dt = -20.0 + 32.0 * u * u
elif year >= -500 and year < 500:
u = y / 100.0
dt = 10583.6 + u * (-1014.41 + u * (33.78311 + u * (-5.952053 + u *
(-0.1798452 + u * (0.022174192 + 0.0090316521 * u)))))
elif year >= 500 and year < 1600:
dt = 1574.2 + u * (-556.01 + u * (71.23472 + u * (0.319781 + u * (-
0.8503463 + u * (-0.005050998 + 0.0083572073 * u)))))
elif year >= 1600 and year < 1700:
t = y - 1600.0
dt = 120.0 + t * (-0.9808 + t * (-0.01532 + t / 7129.0))
elif year >= 1700 and year < 1800:
t = y - 1700.0
dt = 8.83 + t * (0.1603 + t * (-0.0059285 + t * (0.00013336 - t /
1174000.0)))
elif year >= 1800 and year < 1860:
t = y - 1800.0
dt = 13.72 + t * (-0.332447 + t * (0.0068612 + t * (0.0041116 + t *
(-0.00037436 + t * (1.21272e-05 + t * (-1.699e-07 + 8.75e-10 *
t))))))
elif year >= 1860 and year < 1900:
t = y - 1860.0
dt = 7.62 + t * (0.5737 + t * (-0.251754 + t * (0.01680668 + t * (-
0.0004473624 + t / 233174.0))))
elif year >= 1900 and year < 1920:
t = y - 1900.0
dt = -2.79 + t * (1.494119 + t * (-0.0598939 + t * (0.0061966 -
0.000197 * t)))
elif year >= 1920 and year < 1941:
t = y - 1920.0
dt = 21.2 + t * (0.84493 + t * (-0.0761 + 0.0020936 * t))
elif year >= 1941 and year < 1961:
t = y - 1950.0
dt = 29.07 + t * (0.407 + t * (-1.0 / 233.0 + t / 2547.0))
elif year >= 1961 and year < 1986:
t = y - 1975.0
dt = 45.45 + t * (1.067 + t * (-1.0 / 260.0 - t / 718.0))
elif year >= 1986 and year < 2005:
t = y - 2000.0
dt = 63.86 + t * (0.3345 + t * (-0.060374 + t * (0.0017275 + t * (
0.000651814 + 2.373599e-05 * t))))
elif year >= 2005 and year < 2050:
t = y - 2000.0
dt = 62.92 + t * (0.32217 + 0.005589 * t)
elif year >= 2050 and year < 2150:
dt = -20.0 + 32.0 * ((y - 1820.0) / 100.0) ** 2 - 0.5628 * (2150.0 - y)
else:
u = (year - 1820.0) / 100.0
dt = -20.0 + 32.0 * u * u
return dt
|
gmql-0.1.1 | gmql-0.1.1//gmql/settings.pyfile:/gmql/settings.py:function:set_remote_address/set_remote_address | def set_remote_address(address):
""" Enables the user to set the address of the GMQL remote service
:param address: a string representing the URL of GMQL remote service
:return: None
"""
global __remote_address
__remote_address = address
|
aiida_quantumespresso | aiida_quantumespresso//tools/dbexporters/tcod_plugins/pw.pyclass:PwTcodtranslator/get_computation_wallclock_time | @classmethod
def get_computation_wallclock_time(cls, calc, **kwargs):
"""Return the computation wallclock time in seconds."""
parameters = calc.outputs.output_parameters
if 'wall_time_seconds' not in parameters.attrs():
return None
return parameters.get_attr('wall_time_seconds')
|
qsiprep | qsiprep//workflows/fieldmap/utils.pyfile:/workflows/fieldmap/utils.py:function:demean_image/demean_image | def demean_image(in_file, in_mask=None, out_file=None):
"""
Demean image data inside mask
"""
import numpy as np
import nibabel as nb
import os.path as op
from nipype.utils import NUMPY_MMAP
if out_file is None:
fname, fext = op.splitext(op.basename(in_file))
if fext == '.gz':
fname, _ = op.splitext(fname)
out_file = op.abspath('./%s_demean.nii.gz' % fname)
im = nb.load(in_file, mmap=NUMPY_MMAP)
data = im.get_data().astype(np.float32)
msk = np.ones_like(data)
if in_mask is not None:
msk = nb.load(in_mask, mmap=NUMPY_MMAP).get_data().astype(np.float32)
msk[msk > 0] = 1.0
msk[msk < 1] = 0.0
mean = np.median(data[msk == 1].reshape(-1))
data[msk == 1] = data[msk == 1] - mean
nb.Nifti1Image(data, im.affine, im.header).to_filename(out_file)
return out_file
|
sitemetrics | sitemetrics//providers.pyclass:MetricsProvider/get_template_name | @classmethod
def get_template_name(cls):
"""Returns js counter code template path."""
return 'sitemetrics/%s.html' % cls.alias
|
sqlalchemy_mixins | sqlalchemy_mixins//smartquery.pyclass:SmartQueryMixin/where | @classmethod
def where(cls, **filters):
"""
Shortcut for smart_query() method
Example 1:
Product.where(subject_ids__in=[1,2], grade_from_id=2).all()
Example 2:
filters = {'subject_ids__in': [1,2], 'grade_from_id': 2}
Product.where(**filters).all()
Example 3 (with joins):
Post.where(public=True, user___name__startswith='Bi').all()
"""
return cls.smart_query(filters)
|
idgrms | idgrms//plotdgrms.pyfile:/plotdgrms.py:function:plot_diagram/plot_diagram | def plot_diagram(figure, points=(), marked_points=(), colored_points=()):
"""
Plot a single diagram.
Parameters
----------
figure : matplotlib.figure.Figure
A single figure.
points : tuple
A tuple made of two masked_arrays. The arrays store x, y coordinates
of each point which is plotted on a diagram.
marked_points : tuple
A tuple made of two subtuples. Each subtuple stores x, y coordinates
of each marked point, respectively.
colored_points : tuple
A tuple made of three subtuples. Each subtuple stores x, y coordinates
and a string with color name, respectively. A single coordinate is
stored in a masked_array.
"""
ax = figure.axes[0]
ax.scatter(points[0], points[1], 60, c='gray', alpha=0.4, zorder=1)
if marked_points != ():
ax.scatter(marked_points[0], marked_points[1], 100, c='red', alpha=
1.0, zorder=3)
if colored_points != ():
for cp in colored_points:
ax.scatter(cp[0], cp[1], 60, c=cp[2], alpha=0.6, zorder=2)
ax.scatter(points[0], points[1], 50, alpha=0.0, picker=3)
figure.canvas.draw_idle()
|
irida_uploader_cl | irida_uploader_cl//parsers/miniseq/sample_parser.pyfile:/parsers/miniseq/sample_parser.py:function:_parse_out_sequence_file/_parse_out_sequence_file | def _parse_out_sequence_file(sample):
"""
Removes keys in argument sample that are not in sample_keys and
stores them in sequence_file_dict
arguments:
sample -- Sample object
the dictionary inside the Sample object is changed
returns a dictionary containing keys not in sample_keys to be used to
create a SequenceFile object
"""
sample_keys = ['sequencer_sample_name', 'sampleName', 'sample_project']
sequence_file_dict = {}
sample_dict = sample.get_uploadable_dict()
for key in list(sample_dict.keys()):
if key not in sample_keys:
sequence_file_dict[key] = sample_dict[key]
return sequence_file_dict
|
PyCO2SYS-1.3.0 | PyCO2SYS-1.3.0//PyCO2SYS/salts.pyfile:/PyCO2SYS/salts.py:function:calcium_C65/calcium_C65 | def calcium_C65(Sal):
"""Calcium in mol/kg-sw following C65."""
return 0.01026 * Sal / 35
|
sign_xpi_lib | sign_xpi_lib//sign_xpi_lib.pyfile:/sign_xpi_lib.py:function:manifest_header/manifest_header | def manifest_header(type_name, version='1.0'):
"""Returns a header, suitable for use in a manifest.
>>> manifest_header("signature")
"Signature-Version: 1.0"
:param type_name: The kind of manifest which needs a header:
"manifest", "signature".
:param version: The manifest version to encode in the header
(default: '1.0')
"""
return '{}-Version: {}'.format(type_name.title(), version)
|
Circle-Map-1.1.4 | Circle-Map-1.1.4//circlemap/utils.pyfile:/circlemap/utils.py:function:check_compatibility/check_compatibility | def check_compatibility(seq1, seq2):
"""Function that takes as input two DNA sequence and checks whether their alphabets have at least one element
in common. This due to an old bug in edlib"""
for base in seq1:
for base2 in seq2:
if base == base2:
return True
return False
|
package_delivery_app | package_delivery_app//classes/truck.pyclass:Truck/speed_function | @classmethod
def speed_function(cls, location1, location2):
"""Return average speed between two locations in miles per hour.
This function currently just returns the class prop average_speed
but could easily be extended in the future to account for 'real life',
for example some roads could be faster than others.
"""
return cls.average_speed
|
pylint | pylint//checkers/strings.pyfile:/checkers/strings.py:function:get_access_path/get_access_path | def get_access_path(key, parts):
""" Given a list of format specifiers, returns
the final access path (e.g. a.b.c[0][1]).
"""
path = []
for is_attribute, specifier in parts:
if is_attribute:
path.append('.{}'.format(specifier))
else:
path.append('[{!r}]'.format(specifier))
return str(key) + ''.join(path)
|
zonefile-0.1.1 | zonefile-0.1.1//zone_file/parse_zone_file.pyfile:/zone_file/parse_zone_file.py:function:serialize/serialize | def serialize(tokens):
"""
Serialize tokens:
* quote whitespace-containing tokens
* escape semicolons
"""
ret = []
for tok in tokens:
if ' ' in tok:
tok = '"%s"' % tok
if ';' in tok:
tok = tok.replace(';', '\\;')
ret.append(tok)
return ' '.join(ret)
|
gordon_janitor | gordon_janitor//interfaces.pyclass:IAuthority/__init__ | def __init__(config, rrset_channel, metrics=None):
"""Initialize an Authority Plugin client."""
|
naarad-1.0.16 | naarad-1.0.16//src/naarad/utils.pyfile:/src/naarad/utils.py:function:get_rule_strings/get_rule_strings | def get_rule_strings(config_obj, section):
"""
Extract rule strings from a section
:param config_obj: ConfigParser object
:param section: Section name
:return: the rule strings
"""
rule_strings = {}
kwargs = dict(config_obj.items(section))
for key in kwargs.keys():
if key.endswith('.sla'):
rule_strings[key.replace('.sla', '')] = kwargs[key]
del kwargs[key]
return rule_strings, kwargs
|
hyperspy | hyperspy//_signals/hologram_image.pyfile:/_signals/hologram_image.py:function:_estimate_fringe_contrast_statistical/_estimate_fringe_contrast_statistical | def _estimate_fringe_contrast_statistical(holo):
"""
Estimates average fringe contrast of a hologram using statistical definition:
V = STD / MEAN.
Parameters
----------
holo_data: ndarray
The data of the hologram.
Returns
-------
Fringe contrast as a float
"""
axes = holo.axes_manager.signal_axes
return holo.std(axes) / holo.mean(axes)
|
torchray-1.0.0.2 | torchray-1.0.0.2//torchray/utils.pyfile:/torchray/utils.py:function:tensor_to_im/tensor_to_im | def tensor_to_im(tensor):
"""Reshape a tensor as a grayscale image stack.
The function reshapes the tensor :attr:`x` of size
:math:`N\\times K\\times H\\times W`
to have shape :math:`(NK)\\times 1\\times H\\times W`.
Args:
tensor (:class:`torch.Tensor`): tensor to rearrange.
Returns:
:class:`torch.Tensor`: Reshaped tensor.
"""
return tensor.reshape(-1, *tensor.shape[2:])[:, (None), :, :]
|
hotstar_m3u8-0.5.5 | hotstar_m3u8-0.5.5//m3u8/parser.pyfile:/m3u8/parser.py:function:remove_quotes/remove_quotes | def remove_quotes(string):
"""
Remove quotes from string.
Ex.:
"foo" -> foo
'foo' -> foo
'foo -> 'foo
"""
quotes = '"', "'"
if string.startswith(quotes) and string.endswith(quotes):
return string[1:-1]
return string
|
Flask-Dashed-0.1b2 | Flask-Dashed-0.1b2//flask_dashed/views.pyfile:/flask_dashed/views.py:function:compute_args/compute_args | def compute_args(request, update={}):
"""Merges all view_args and request args then update with
user args.
:param update: The user args
"""
args = request.view_args.copy()
args = dict(dict(request.args.to_dict(flat=True)), **args)
args = dict(args, **update)
return args
|
adblockparser | adblockparser//parser.pyfile:/parser.py:function:_domain_variants/_domain_variants | def _domain_variants(domain):
"""
>>> list(_domain_variants("foo.bar.example.com"))
['foo.bar.example.com', 'bar.example.com', 'example.com']
>>> list(_domain_variants("example.com"))
['example.com']
>>> list(_domain_variants("localhost"))
['localhost']
"""
parts = domain.split('.')
if len(parts) == 1:
yield parts[0]
else:
for i in range(len(parts), 1, -1):
yield '.'.join(parts[-i:])
|
py2d-0.1 | py2d-0.1//py2d/Navigation.pyfile:/py2d/Navigation.py:function:poly_midpoint_distance/poly_midpoint_distance | def poly_midpoint_distance(poly_a, poly_b):
"""Polygon distance function that takes the euclidean distance between polygon midpoints."""
return (poly_a.get_centerpoint() - poly_b.get_centerpoint()).length
|
papers | papers//encoding.pyfile:/encoding.py:function:_parse_file/_parse_file | def _parse_file(file):
""" parse a single file entry
"""
sfile = file.split(':')
if len(sfile) == 1:
path, type = file, ''
elif len(sfile) == 2:
path, type = sfile
elif len(sfile) == 3:
basename, path, type = sfile
else:
raise ValueError('unknown `file` format: ' + repr(file))
return path
|
scrapy_warcio | scrapy_warcio//warcio.pyfile:/warcio.py:function:_str/_str | def _str(val):
"""
always returns string from str or bytes
"""
return val if isinstance(val, str) else str(val, 'utf-8')
|
workflows-1.7.2 | workflows-1.7.2//workflows/transport/common_transport.pyclass:CommonTransport/connect | @staticmethod
def connect():
"""Connect the transport class. This function must be overridden.
:return: True-like value when connection successful,
False-like value otherwise."""
return False
|
sqreen_security_signal_sdk | sqreen_security_signal_sdk//utils.pyfile:/utils.py:function:codecs_error_ascii_to_hex/codecs_error_ascii_to_hex | def codecs_error_ascii_to_hex(exception):
"""On unicode decode error (bytes -> unicode error), tries to replace
invalid unknown bytes by their hex notation."""
if isinstance(exception, UnicodeDecodeError):
obj = exception.object
start = exception.start
end = exception.end
invalid_part = obj[start:end]
result = []
for character in invalid_part:
if isinstance(character, str):
result.append(u'\\x{}'.format(character.encode('hex')))
elif isinstance(character, int):
result.append(u'\\{}'.format(hex(character)[1:]))
else:
raise exception
result = ''.join(result), end
return result
raise exception
|
mercurial | mercurial//interfaces/repository.pyclass:imanifeststorage/emitrevisions | def emitrevisions(nodes, nodesorder=None, revisiondata=False,
assumehaveparentrevisions=False):
"""Produce ``irevisiondelta`` describing revisions.
See the documentation for ``ifiledata`` for more.
"""
|
fake-bpy-module-2.80-20200428 | fake-bpy-module-2.80-20200428//bpy/ops/object.pyfile:/bpy/ops/object.py:function:paths_calculate/paths_calculate | def paths_calculate(start_frame: int=1, end_frame: int=250):
"""Calculate motion paths for the selected objects
:param start_frame: Start, First frame to calculate object paths on
:type start_frame: int
:param end_frame: End, Last frame to calculate object paths on
:type end_frame: int
"""
pass
|
xt-training-1.8.4 | xt-training-1.8.4//xt_training/metrics.pyfile:/xt_training/metrics.py:function:_auc/_auc | def _auc(fpr, tpr):
"""Calculate AUC given FPR and TPR values.
Note that this function assumes the FPR and TPR values are sorted according to monotonically
increasing probability thresholds.
"""
widths = fpr[:-1] - fpr[1:]
heights = (tpr[:-1] + tpr[1:]) / 2
return (widths * heights).sum()
|
pyutm | pyutm//locate.pyclass:Point/reduce_to_100k | @staticmethod
def reduce_to_100k(number):
"""
Reduces the given coordinate to the nearest 100,000 meters.
Examples:
100,000.0 -> 100,000
123,456.7 -> 100,000
199,999.9 -> 100,000
1,123,456.7 -> 1,100,000
:param number: float, UTM coordinate
:return: int, simplified UTM coordinate
"""
return int(number / 100000) * 100000
|
easyjwt | easyjwt//easyjwt.pyclass:EasyJWT/_is_claim | @classmethod
def _is_claim(cls, instance_var: str) ->bool:
"""
Determine if a given instance variable is part of the token's claim set.
An instance variable will be considered to be a claim if:
* it is listed in :attr:`_private_claims`, or
* it does not start with an underscore, but
* is not listed in :attr:`_public_non_claims'.
:param instance_var: The name of the instance variable to check.
:return: `True` if the instance variable is part of the claim set, `False` otherwise.
"""
if instance_var in cls._private_claims:
return True
if instance_var.startswith('_'):
return False
if instance_var in cls._public_non_claims:
return False
return True
|
dengraph | dengraph//quality/inter_intra.pyfile:/quality/inter_intra.py:function:inter_cluster_variance/inter_cluster_variance | def inter_cluster_variance(clusters, graph):
"""
The inter cluster variance, or the within-cluster sum of squares returns for all given clusters
the sum of squared distances of each sample within that cluster to the clusters centroid.
The sum of those squared distances is returned. For distance calculation the distance function
provided by the given graph is used.
:param clusters: The clusters to determine the squared sum for
:param graph: The underlying graph that offers a distance function
:return: Sum of inter cluster variances for all given clusters
"""
if len(clusters) > 0:
result = 0
for cluster in clusters:
cluster_mean = graph.distance.mean(list(cluster))
for node in cluster:
result += graph.distance(cluster_mean, node) ** 2
return result
return float('inf')
|
cloudmesh-common-4.3.8 | cloudmesh-common-4.3.8//cloudmesh/common/parameter.pyclass:Parameter/find | @staticmethod
def find(name, *dicts):
"""
Finds the value for the key name in multiple dicts
:param name: the key to find
:param dicts: the list of dicts
:return:
"""
for d in dicts:
if type(d) == str:
return d
elif name in d and d[name] is not None:
return d[name]
return None
|