repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
PyEIS | PyEIS//PyEIS_Lin_KK.pyfile:/PyEIS_Lin_KK.py:function:KK_RC56_fit/KK_RC56_fit | def KK_RC56_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
Kristian B. Knudsen (kknu@berkeley.edu / kristianbknudsen@gmail.com)
"""
Rs = params['Rs']
R1 = params['R1']
R2 = params['R2']
R3 = params['R3']
R4 = params['R4']
R5 = params['R5']
R6 = params['R6']
R7 = params['R7']
R8 = params['R8']
R9 = params['R9']
R10 = params['R10']
R11 = params['R11']
R12 = params['R12']
R13 = params['R13']
R14 = params['R14']
R15 = params['R15']
R16 = params['R16']
R17 = params['R17']
R18 = params['R18']
R19 = params['R19']
R20 = params['R20']
R21 = params['R21']
R22 = params['R22']
R23 = params['R23']
R24 = params['R24']
R25 = params['R25']
R26 = params['R26']
R27 = params['R27']
R28 = params['R28']
R29 = params['R29']
R30 = params['R30']
R31 = params['R31']
R32 = params['R32']
R33 = params['R33']
R34 = params['R34']
R35 = params['R35']
R36 = params['R36']
R37 = params['R37']
R38 = params['R38']
R39 = params['R39']
R40 = params['R40']
R41 = params['R41']
R42 = params['R42']
R43 = params['R43']
R44 = params['R44']
R45 = params['R45']
R46 = params['R46']
R47 = params['R47']
R48 = params['R48']
R49 = params['R49']
R50 = params['R50']
R51 = params['R51']
R52 = params['R52']
R53 = params['R53']
R54 = params['R54']
R55 = params['R55']
R56 = params['R56']
return Rs + R1 / (1 + w * 1.0j * t_values[0]) + R2 / (1 + w * 1.0j *
t_values[1]) + R3 / (1 + w * 1.0j * t_values[2]) + R4 / (1 + w *
1.0j * t_values[3]) + R5 / (1 + w * 1.0j * t_values[4]) + R6 / (1 +
w * 1.0j * t_values[5]) + R7 / (1 + w * 1.0j * t_values[6]) + R8 / (
1 + w * 1.0j * t_values[7]) + R9 / (1 + w * 1.0j * t_values[8]
) + R10 / (1 + w * 1.0j * t_values[9]) + R11 / (1 + w * 1.0j *
t_values[10]) + R12 / (1 + w * 1.0j * t_values[11]) + R13 / (1 + w *
1.0j * t_values[12]) + R14 / (1 + w * 1.0j * t_values[13]) + R15 / (
1 + w * 1.0j * t_values[14]) + R16 / (1 + w * 1.0j * t_values[15]
) + R17 / (1 + w * 1.0j * t_values[16]) + R18 / (1 + w * 1.0j *
t_values[17]) + R19 / (1 + w * 1.0j * t_values[18]) + R20 / (1 + w *
1.0j * t_values[19]) + R21 / (1 + w * 1.0j * t_values[20]) + R22 / (
1 + w * 1.0j * t_values[21]) + R23 / (1 + w * 1.0j * t_values[22]
) + R24 / (1 + w * 1.0j * t_values[23]) + R25 / (1 + w * 1.0j *
t_values[24]) + R26 / (1 + w * 1.0j * t_values[25]) + R27 / (1 + w *
1.0j * t_values[26]) + R28 / (1 + w * 1.0j * t_values[27]) + R29 / (
1 + w * 1.0j * t_values[28]) + R30 / (1 + w * 1.0j * t_values[29]
) + R31 / (1 + w * 1.0j * t_values[30]) + R32 / (1 + w * 1.0j *
t_values[31]) + R33 / (1 + w * 1.0j * t_values[32]) + R34 / (1 + w *
1.0j * t_values[33]) + R35 / (1 + w * 1.0j * t_values[34]) + R36 / (
1 + w * 1.0j * t_values[35]) + R37 / (1 + w * 1.0j * t_values[36]
) + R38 / (1 + w * 1.0j * t_values[37]) + R39 / (1 + w * 1.0j *
t_values[38]) + R40 / (1 + w * 1.0j * t_values[39]) + R41 / (1 + w *
1.0j * t_values[40]) + R42 / (1 + w * 1.0j * t_values[41]) + R43 / (
1 + w * 1.0j * t_values[42]) + R44 / (1 + w * 1.0j * t_values[43]
) + R45 / (1 + w * 1.0j * t_values[44]) + R46 / (1 + w * 1.0j *
t_values[45]) + R47 / (1 + w * 1.0j * t_values[46]) + R48 / (1 + w *
1.0j * t_values[47]) + R49 / (1 + w * 1.0j * t_values[48]) + R50 / (
1 + w * 1.0j * t_values[49]) + R51 / (1 + w * 1.0j * t_values[50]
) + R52 / (1 + w * 1.0j * t_values[51]) + R53 / (1 + w * 1.0j *
t_values[52]) + R54 / (1 + w * 1.0j * t_values[53]) + R55 / (1 + w *
1.0j * t_values[54]) + R56 / (1 + w * 1.0j * t_values[55])
|
elist-0.4.64 | elist-0.4.64//elist/elist.pyfile:/elist/elist.py:function:find_first/find_first | def find_first(ol, test_func, *args):
"""
from elist.elist import *
def test_func(ele,x):
cond = (ele > x)
return(cond)
ol = [1,2,3,4]
first = find_first(ol,test_func,3)
first
#####
ol = [10,20,30,40]
first = find_first(ol,test_func,3)
first
####find is the same as find_first
"""
length = ol.__len__()
for i in range(0, length):
cond = test_func(ol[i], *args)
if cond:
return {'index': i, 'value': ol[i]}
else:
pass
return {'index': None, 'value': None}
|
Exscript | Exscript//util/ipv4.pyfile:/util/ipv4.py:function:pfxlen2mask_int/pfxlen2mask_int | def pfxlen2mask_int(pfxlen):
"""
Converts the given prefix length to an IP mask value.
:type pfxlen: int
:param pfxlen: A prefix length.
:rtype: long
:return: The mask, as a long value.
"""
return 4294967295 << 32 - int(pfxlen)
|
renom_q | renom_q//utilitys.pyfile:/utilitys.py:function:statevector/statevector | def statevector(circuit):
""" Get the qubit statevector.
Args:
circuit (renom_q.QuantumCircuit):
A class of QuantumCircuit.
Returns:
(array):
A array of the qubit statevector.
Example:
>>> import renom_q
>>> q = renom_q.QuantumRegister(1)
>>> c = renom_q.ClassicalRegister(1)
>>> qc = renom_q.QuantumCircuit(q, c)
>>> qc.x(q[0])
>>> renom_q.statevector(qc)
array([0.+0.j, 1.+0.j])
"""
return circuit.Qr.qubit
|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/emr.pyfile:/pyboto3/emr.py:function:modify_instance_fleet/modify_instance_fleet | def modify_instance_fleet(ClusterId=None, InstanceFleet=None):
"""
Modifies the target On-Demand and target Spot capacities for the instance fleet with the specified InstanceFleetID within the cluster specified using ClusterID. The call either succeeds or fails atomically.
See also: AWS API Documentation
:example: response = client.modify_instance_fleet(
ClusterId='string',
InstanceFleet={
'InstanceFleetId': 'string',
'TargetOnDemandCapacity': 123,
'TargetSpotCapacity': 123
}
)
:type ClusterId: string
:param ClusterId: [REQUIRED]
The unique identifier of the cluster.
:type InstanceFleet: dict
:param InstanceFleet: [REQUIRED]
The unique identifier of the instance fleet.
InstanceFleetId (string) -- [REQUIRED]A unique identifier for the instance fleet.
TargetOnDemandCapacity (integer) --The target capacity of On-Demand units for the instance fleet. For more information see InstanceFleetConfig$TargetOnDemandCapacity .
TargetSpotCapacity (integer) --The target capacity of Spot units for the instance fleet. For more information, see InstanceFleetConfig$TargetSpotCapacity .
"""
pass
|
tedi-1.3 | tedi-1.3//tedi/utils.pyfile:/tedi/utils.py:function:phase_folding/phase_folding | def phase_folding(t, y, yerr, period):
"""
phase_folding() allows the phase folding (duh...) of a given data
accordingly to a given period
Parameters
----------
t: array
Time array
y: array
Measurements array
yerr: array
Measurement errors arrays
period: float
Period to fold the data
Returns
-------
phase: array
Phase
folded_y: array
Sorted measurments according to the phase
folded_yerr: array
Sorted errors according to the phase
"""
foldtimes = t / period
foldtimes = foldtimes % 1
if yerr is None:
yerr = 0 * y
phase, folded_y, folded_yerr = zip(*sorted(zip(foldtimes, y, yerr)))
return phase, folded_y, folded_yerr
|
fake-bpy-module-2.80-20200428 | fake-bpy-module-2.80-20200428//bpy/ops/outliner.pyfile:/bpy/ops/outliner.py:function:collection_link/collection_link | def collection_link():
"""Link selected collections to active scene
"""
pass
|
dtale-1.8.11 | dtale-1.8.11//dtale/dash_application/layout.pyfile:/dtale/dash_application/layout.py:function:build_option/build_option | def build_option(value, label=None):
"""
Returns value/label inputs in a dictionary for use in
:dash:`dash_core_components.Dropdown <dash-core-components/Dropdown>`
"""
return {'label': label or value, 'value': value}
|
pyxrd | pyxrd//file_parsers/base_parser.pyclass:BaseParser/setup_file_filter | @classmethod
def setup_file_filter(cls):
"""
Creates a file filter based on a list of extensions set in the
'extensions' attribute of the class using the 'description' attribute
as the name for the filter. If the 'mimetypes' attribute is also set,
it will also set these. If additional properties are needed, this function
should be overriden by subclasses.
"""
if cls.file_filter == None and cls.description != '' and cls.extensions:
try:
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
except ImportError:
pass
else:
cls.file_filter = Gtk.FileFilter()
cls.file_filter.set_name(cls.description)
for mtpe in cls.mimetypes:
pass
for expr in cls.extensions:
cls.file_filter.add_pattern(expr)
setattr(cls.file_filter, 'parser', cls)
|
gossipcat | gossipcat//Preprocess.pyfile:/Preprocess.py:function:splitting/splitting | def splitting(df, target):
""" Split data into train and test date sets.
"""
from sklearn.model_selection import train_test_split
return train_test_split(df, test_size=0.2, stratify=df[target],
random_state=2019)
|
cellx-1.9 | cellx-1.9//cellx/util.pyfile:/cellx/util.py:function:cmp/cmp | def cmp(a, b):
"""Compare two numbers A and B, and return 1, 0, and -1 when A > B, A = B,
and A < B, respectively."""
if a > b:
return 1
elif a < b:
return -1
else:
return 0
|
package_delivery_app | package_delivery_app//cli.pyfile:/cli.py:function:ask_if_route_display_wanted/ask_if_route_display_wanted | def ask_if_route_display_wanted():
"""Return whether user wants to view each route and its stops."""
user_says = input('Would you like to see each route that is calculated?\n')
return user_says.lower().strip() in ('y', 'yes')
|
pumpy-1.1.3 | pumpy-1.1.3//pumpy.pyfile:/pumpy.py:function:remove_crud/remove_crud | def remove_crud(string):
"""Return string without useless information.
Return string with trailing zeros after a decimal place, trailing
decimal points, and leading and trailing spaces removed.
"""
if '.' in string:
string = string.rstrip('0')
string = string.lstrip('0 ')
string = string.rstrip(' .')
return string
|
senlin_tempest_plugin | senlin_tempest_plugin//common/utils.pyfile:/common/utils.py:function:get_a_node/get_a_node | def get_a_node(base, node_id, show_details=False):
"""Utility function that gets a Senlin node."""
params = None
if show_details:
params = {'show_details': True}
res = base.client.get_obj('nodes', node_id, params)
return res['body']
|
coremltools | coremltools//converters/keras/_layers.pyfile:/converters/keras/_layers.py:function:convert_softmax/convert_softmax | def convert_softmax(builder, layer, input_names, output_names, keras_layer):
"""Convert a softmax layer from keras to coreml.
Parameters
keras_layer: layer
----------
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = input_names[0], output_names[0]
builder.add_softmax(name=layer, input_name=input_name, output_name=
output_name)
|
nibabel | nibabel//spatialimages.pyclass:SpatialImage/from_image | @classmethod
def from_image(klass, img):
""" Class method to create new instance of own class from `img`
Parameters
----------
img : ``spatialimage`` instance
In fact, an object with the API of ``spatialimage`` -
specifically ``dataobj``, ``affine``, ``header`` and ``extra``.
Returns
-------
cimg : ``spatialimage`` instance
Image, of our own class
"""
return klass(img.dataobj, img.affine, klass.header_class.from_header(
img.header), extra=img.extra.copy())
|
mql5_zmq_backtrader | mql5_zmq_backtrader//mt5store.pyclass:MTraderStore/getbroker | @classmethod
def getbroker(cls, *args, **kwargs):
"""Returns broker with *args, **kwargs from registered `BrokerCls`"""
return cls.BrokerCls(*args, **kwargs)
|
tocka-django-cms-3.1.2a0 | tocka-django-cms-3.1.2a0//cms/plugin_rendering.pyfile:/cms/plugin_rendering.py:function:render_plugins/render_plugins | def render_plugins(plugins, context, placeholder, processors=None):
"""
Renders a collection of plugins with the given context, using the appropriate processors
for a given placeholder name, and returns a list containing a "rendered content" string
for each plugin.
This is the main plugin rendering utility function, use this function rather than
Plugin.render_plugin().
"""
out = []
total = len(plugins)
for index, plugin in enumerate(plugins):
plugin._render_meta.total = total
plugin._render_meta.index = index
context.push()
out.append(plugin.render_plugin(context, placeholder, processors=
processors))
context.pop()
return out
|
lifelib | lifelib//projects/solvency2/projection.pyfile:/projects/solvency2/projection.py:function:PolsAnnuity/PolsAnnuity | def PolsAnnuity(t):
"""Number of policies: Annuity"""
return 0
|
lightbus | lightbus//transports/redis/utilities.pyfile:/transports/redis/utilities.py:function:redis_stream_id_add_one/redis_stream_id_add_one | def redis_stream_id_add_one(message_id):
"""Add one to the message ID
This is useful when we need to xrange() events exclusive of the given ID,
rather than inclusive of the given ID (which is the sensible default).
There is no chance of missing events with this method.
"""
milliseconds, n = map(int, message_id.split('-'))
return f'{milliseconds:013d}-{n + 1}'
|
tag2network | tag2network//Network/louvain.pyfile:/Network/louvain.py:function:__remove/__remove | def __remove(node, com, weight, status):
""" Remove node from community com and modify status"""
status.degrees[com] = status.degrees.get(com, 0.0) - status.gdegrees.get(
node, 0.0)
status.internals[com] = float(status.internals.get(com, 0.0) - weight -
status.loops.get(node, 0.0))
status.node2com[node] = -1
if status.directed:
status.inoutdegrees[com] = status.inoutdegrees.get(com, 0.0
) - status.gindegrees.get(node, 0.0) * status.goutdegrees.get(node,
0.0)
|
tsstats-2.0.1 | tsstats-2.0.1//tsstats/utils.pyfile:/tsstats/utils.py:function:seconds_to_text/seconds_to_text | def seconds_to_text(seconds):
"""
convert `seconds` to a text-representation
:param seconds: seconds to convert
:type seconds: int
:return: `seconds` as text-representation
:rtype: str
"""
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
hours = str(hours) + 'h ' if hours > 0 else ''
minutes = str(minutes) + 'm ' if minutes > 0 else ''
seconds = str(seconds) + 's' if seconds > 0 else ''
return hours + minutes + seconds
|
dlispy | dlispy//RCReader.pyfile:/RCReader.py:function:readIDENT/readIDENT | def readIDENT(stream):
"""
Read a Variable-Length Identifier from given stream. Note: some of DLIS file vendor doesn't follow the standard,
for example whitespace is included also some not of non ASCII char are included, so in case it failed to decode with
ASCII, we will also try with CP1252.
:type stream: FileIO or BytesIO
:param stream: Where to read from
:return: result
:rtype: str
"""
l = stream.read(1)[0]
payload = stream.read(l)
try:
return payload.decode('ascii')
except UnicodeDecodeError:
return payload.decode('cp1252')
|
cassandra | cassandra//datastax/insights/util.pyfile:/datastax/insights/util.py:function:_module_internal_namespace_or_emtpy_string/_module_internal_namespace_or_emtpy_string | def _module_internal_namespace_or_emtpy_string(cls):
"""
Best-effort method for getting the module-internal namespace in which a
class is defined -- i.e. the namespace _inside_ the module.
"""
try:
qualname = cls.__qualname__
except AttributeError:
return ''
return '.'.join(qualname.split('.')[:-1])
|
tiddlyweb-2.4.2 | tiddlyweb-2.4.2//tiddlyweb/util.pyfile:/tiddlyweb/util.py:function:superclass_name/superclass_name | def superclass_name(instance):
"""
Given an instance return the lowerclass name of the penultimate
class in the hierarchy (the last is object). This is used to do
dynamic method lookups in adaptor classes via serializer.py and
store.py while still allowing model entities to be subclassed.
Those subclasses must insure that their __mro__ results in
Bag, User, Recipe or Tiddler in the penultimate slot.
"""
return instance.__class__.mro()[-2].__name__.lower()
|
cdo_api_client | cdo_api_client//Client.pyclass:Client/squash_results | @staticmethod
def squash_results(responses):
""" combines results from multiple responses into one list of results """
results = []
for r in responses:
r_json = r.json()
if 'results' in r_json.keys():
results += r.json()['results']
return results
|
stagpy-0.14.1 | stagpy-0.14.1//stagpy/stagyydata.pyfile:/stagpy/stagyydata.py:function:_as_view_item/_as_view_item | def _as_view_item(obj):
"""Return None or a suitable iterable to build a _StepsView."""
try:
iter(obj)
return obj
except TypeError:
pass
if isinstance(obj, slice):
return obj,
|
ally-py-0.9.0 | ally-py-0.9.0//internationalization/core/impl/extract_html.pyfile:/internationalization/core/impl/extract_html.py:function:validate_inner_strings/validate_inner_strings | def validate_inner_strings(line_part):
"""
Checking the line_part to be of structure "a param", 'another param', ...
Returns the found params or None if wrong line_part
"""
PARAMS_START = 1
STRING_INNER = 2
STRING_AFTER = 4
STRING_BETWEEN = 8
quots = '\'"'
params = []
wrong = None
line_part = line_part.strip()
if not line_part:
return params
check_position = 0
state = PARAMS_START
taken_string = ''
string_opened = ''
line_part_len = len(line_part)
while True:
if check_position == line_part_len:
if state not in [PARAMS_START, STRING_AFTER]:
return wrong
return params
check_char = usage_char = line_part[check_position]
check_position += 1
if (STRING_INNER == state and check_char == '\\' and check_position <
line_part_len):
usage_char += line_part[check_position]
check_char = '_'
check_position += 1
if PARAMS_START == state:
if check_char not in quots:
return wrong
string_opened = check_char
state = STRING_INNER
continue
if STRING_INNER == state:
if check_char == string_opened:
state = STRING_AFTER
params.append(taken_string)
taken_string = ''
continue
taken_string += usage_char
continue
if STRING_AFTER == state:
if check_char in ' \t':
continue
if check_char == ',':
state = STRING_BETWEEN
continue
return wrong
if STRING_BETWEEN == state:
if check_char in ' \t':
continue
if check_char in quots:
string_opened = check_char
state = STRING_INNER
continue
return wrong
|
wikiexpand | wikiexpand//expand/tools.pyfile:/expand/tools.py:function:join_title/join_title | def join_title(namespace, title):
"""
prefix a namespace to a page title
"""
if namespace:
return '%s:%s' % (namespace, title)
return title
|
flametree-0.1.10 | flametree-0.1.10//flametree/DiskFileManager.pyclass:DiskFileManager/write | @staticmethod
def write(fileobject, content, mode='a'):
"""Write the content (str, bytes) to the given file object."""
with open(fileobject._path, mode=mode) as f:
f.write(content)
|
plone.server-1.0a16 | plone.server-1.0a16//src/plone.server/plone/server/interfaces/content.pyclass:IRegistry/register_interface | def register_interface(interface, omit=(), prefix=None):
"""Create a set of records based on the given interface. For each
schema field in the interface, a record will be inserted with a
name like `${interface.__identifier__}.${field.__name__}`, and a
value equal to default value of that field. Any field with a name
listed in `omit`, or with the `readonly` property set to True, will
be ignored. Supply an alternative identifier with `prefix`.
"""
|
red_star-2.1.5 | red_star-2.1.5//red_star/rs_utils.pyfile:/red_star/rs_utils.py:function:ordinal/ordinal | def ordinal(n):
"""
Black magic that turns numbers into ordinal representation (1 -> 1st)
:param n: number to be converted
:return: string with ordinal number
"""
return '%d%s' % (n, 'tsnrhtdd'[(n // 10 % 10 != 1) * (n % 10 < 4) * n %
10::4])
|
eppy-0.5.52 | eppy-0.5.52//eppy/modeleditor.pyfile:/eppy/modeleditor.py:function:getnamedargs/getnamedargs | def getnamedargs(*args, **kwargs):
"""allows you to pass a dict and named args
so you can pass ({'a':5, 'b':3}, c=8) and get
dict(a=5, b=3, c=8)"""
adict = {}
for arg in args:
if isinstance(arg, dict):
adict.update(arg)
adict.update(kwargs)
return adict
|
mxnet-1.6.0.data | mxnet-1.6.0.data//purelib/mxnet/symbol/gen_linalg.pyfile:/purelib/mxnet/symbol/gen_linalg.py:function:gelqf/gelqf | def gelqf(A=None, name=None, attr=None, out=None, **kwargs):
"""LQ factorization for general matrix.
Input is a tensor *A* of dimension *n >= 2*.
If *n=2*, we compute the LQ factorization (LAPACK *gelqf*, followed by *orglq*). *A*
must have shape *(x, y)* with *x <= y*, and must have full rank *=x*. The LQ
factorization consists of *L* with shape *(x, x)* and *Q* with shape *(x, y)*, so
that:
*A* = *L* \\* *Q*
Here, *L* is lower triangular (upper triangle equal to zero) with nonzero diagonal,
and *Q* is row-orthonormal, meaning that
*Q* \\* *Q*\\ :sup:`T`
is equal to the identity matrix of shape *(x, x)*.
If *n>2*, *gelqf* is performed separately on the trailing two dimensions for all
inputs (batch mode).
.. note:: The operator supports float32 and float64 data types only.
Examples::
Single LQ factorization
A = [[1., 2., 3.], [4., 5., 6.]]
Q, L = gelqf(A)
Q = [[-0.26726124, -0.53452248, -0.80178373],
[0.87287156, 0.21821789, -0.43643578]]
L = [[-3.74165739, 0.],
[-8.55235974, 1.96396101]]
Batch LQ factorization
A = [[[1., 2., 3.], [4., 5., 6.]],
[[7., 8., 9.], [10., 11., 12.]]]
Q, L = gelqf(A)
Q = [[[-0.26726124, -0.53452248, -0.80178373],
[0.87287156, 0.21821789, -0.43643578]],
[[-0.50257071, -0.57436653, -0.64616234],
[0.7620735, 0.05862104, -0.64483142]]]
L = [[[-3.74165739, 0.],
[-8.55235974, 1.96396101]],
[[-13.92838828, 0.],
[-19.09768702, 0.52758934]]]
Defined in src/operator/tensor/la_op.cc:L798
Parameters
----------
A : Symbol
Tensor of input matrices to be factorized
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return 0,
|
dropbox | dropbox//team_log.pyclass:EventDetails/sso_change_login_url_details | @classmethod
def sso_change_login_url_details(cls, val):
"""
Create an instance of this class set to the
``sso_change_login_url_details`` tag with value ``val``.
:param SsoChangeLoginUrlDetails val:
:rtype: EventDetails
"""
return cls('sso_change_login_url_details', val)
|
pyatran-0.3.1 | pyatran-0.3.1//versioneer.pyfile:/versioneer.py:function:render_pep440_old/render_pep440_old | def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
return rendered
|
deliverable_model | deliverable_model//serving/deliverable_model.pyclass:DeliverableModel/_check_compatible | @classmethod
def _check_compatible(cls, metadata):
"""
check if version is compatible
if check pass nothing happen, otherwise raise a exception
"""
pass
|
django-expire-1.0.1 | django-expire-1.0.1//django_expire/utils/module_wrapper.pyfile:/django_expire/utils/module_wrapper.py:function:restore_attrs/restore_attrs | def restore_attrs(wrapper):
"""
Restore all attributes for a wrapped module to their original values
(removing any which weren't originally set).
"""
for key, value in wrapper._attrs_to_restore.iteritems():
setattr(wrapper._wrapped_module, key, value)
for setting in wrapper._delete_settings:
try:
delattr(wrapper._wrapped_module, setting)
except AttributeError:
pass
|
django_cohort_analysis-0.0.1 | django_cohort_analysis-0.0.1//django_cohort_analysis/cohorts.pyfile:/django_cohort_analysis/cohorts.py:function:get_file_or_default/get_file_or_default | def get_file_or_default(metric_file):
""" Returns the module name from which to extract metrics. Defaults to cohorts.metrics
:param str metric_file: The name of the module to extract metrics from
:return: The name of the module where metric functions reside
"""
return metric_file if metric_file is not None else 'cohorts.metrics'
|
interkamen_career-1.18.9 | interkamen_career-1.18.9//interkamen_career/modules/support_modules/dump_to_exl.pyclass:DumpToExl/_create_pass_name | @classmethod
def _create_pass_name(cls, passport) ->str:
"""Create passport name."""
pass_name = '{}-{}-{} {}'.format(passport.params.year, passport.params.
month, passport.params.day, int(passport.params.number))
return pass_name
|
lifelib | lifelib//projects/simplelife/projection.pyfile:/projects/simplelife/projection.py:function:PolsLiving/PolsLiving | def PolsLiving(t):
"""Number of policies: Living benefits"""
return 0
|
matplotlylib-0.1.0 | matplotlylib-0.1.0//matplotlylib/tools.pyfile:/matplotlylib/tools.py:function:check_bar_match/check_bar_match | def check_bar_match(old_bar, new_bar):
"""Check if two bars belong in the same collection (bar chart).
Positional arguments:
old_bar -- a previously sorted bar dictionary.
new_bar -- a new bar dictionary that needs to be sorted.
"""
tests = []
tests += new_bar['bardir'] == old_bar['bardir'],
tests += new_bar['facecolor'] == old_bar['facecolor'],
if new_bar['bardir'] == 'v':
new_width = new_bar['x1'] - new_bar['x0']
old_width = old_bar['x1'] - old_bar['x0']
tests += new_width - old_width < 1e-06,
tests += new_bar['y0'] == old_bar['y0'],
elif new_bar['bardir'] == 'h':
new_height = new_bar['y1'] - new_bar['y0']
old_height = old_bar['y1'] - old_bar['y0']
tests += new_height - old_height < 1e-06,
tests += new_bar['x0'] == old_bar['x0'],
if all(tests):
return True
else:
return False
|
NasNas | NasNas//reslib/resource_manager.pyclass:Res/all_files | @classmethod
def all_files(cls):
"""Returns all the loaded resources without any particular order"""
return cls._assets.all_files()
|
odtbrain-0.4.0 | odtbrain-0.4.0//odtbrain/apple.pyfile:/odtbrain/apple.py:function:count_to_half/count_to_half | def count_to_half(array):
"""Determination of half-initial value index
Return first index at which array values decrease below 1/2 of
the initial initial value `array[0]`.
"""
num = 0
for item in array[1:]:
if item < array[0] / 2:
break
else:
num += 1
return num
|
scipy | scipy//signal/filter_design.pyfile:/signal/filter_design.py:function:_falling_factorial/_falling_factorial | def _falling_factorial(x, n):
"""
Return the factorial of `x` to the `n` falling.
This is defined as:
.. math:: x^\\underline n = (x)_n = x (x-1) \\cdots (x-n+1)
This can more efficiently calculate ratios of factorials, since:
n!/m! == falling_factorial(n, n-m)
where n >= m
skipping the factors that cancel out
the usual factorial n! == ff(n, n)
"""
val = 1
for k in range(x - n + 1, x + 1):
val *= k
return val
|
flufl.flake8-0.10 | flufl.flake8-0.10//setup_helpers.pyfile:/setup_helpers.py:function:description/description | def description(filename):
"""Provide a short description."""
with open(filename) as fp:
for lineno, line in enumerate(fp):
if lineno < 3:
continue
line = line.strip()
if len(line) > 0:
return line
|
zope.mimetype-2.5.0 | zope.mimetype-2.5.0//src/zope/mimetype/interfaces.pyclass:ICharsetGetter/__call__ | def __call__(name=None, data=None, content_type=None):
"""Look up a charset.
If a charset cannot be determined based on the input,
this returns `None`.
"""
|
cdbclient-1.0.9 | cdbclient-1.0.9//cdbclient/openstack/common/apiclient/auth.pyclass:BaseAuthPlugin/get_opt | @staticmethod
def get_opt(opt_name, args):
"""Return option name and value.
:param opt_name: name of the option, e.g., "username"
:param args: parsed arguments
"""
return opt_name, getattr(args, 'os_%s' % opt_name, None)
|
qsiprep-0.8.0 | qsiprep-0.8.0//versioneer.pyfile:/versioneer.py:function:render_git_describe/render_git_describe | def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
return rendered
|
enablebanking | enablebanking//models/seb_connector_settings.pyclass:SEBConnectorSettings/__repr__ | def __repr__(A):
"""For `print` and `pprint`"""
return A.to_str()
|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/elasticsearchservice.pyfile:/pyboto3/elasticsearchservice.py:function:describe_elasticsearch_instance_type_limits/describe_elasticsearch_instance_type_limits | def describe_elasticsearch_instance_type_limits(DomainName=None,
InstanceType=None, ElasticsearchVersion=None):
"""
Describe Elasticsearch Limits for a given InstanceType and ElasticsearchVersion. When modifying existing Domain, specify the `` DomainName `` to know what Limits are supported for modifying.
See also: AWS API Documentation
:example: response = client.describe_elasticsearch_instance_type_limits(
DomainName='string',
InstanceType='m3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
ElasticsearchVersion='string'
)
:type DomainName: string
:param DomainName: DomainName represents the name of the Domain that we are trying to modify. This should be present only if we are querying for Elasticsearch `` Limits `` for existing domain.
:type InstanceType: string
:param InstanceType: [REQUIRED]
The instance type for an Elasticsearch cluster for which Elasticsearch `` Limits `` are needed.
:type ElasticsearchVersion: string
:param ElasticsearchVersion: [REQUIRED]
Version of Elasticsearch for which `` Limits `` are needed.
:rtype: dict
:return: {
'LimitsByRole': {
'string': {
'StorageTypes': [
{
'StorageTypeName': 'string',
'StorageSubTypeName': 'string',
'StorageTypeLimits': [
{
'LimitName': 'string',
'LimitValues': [
'string',
]
},
]
},
],
'InstanceLimits': {
'InstanceCountLimits': {
'MinimumInstanceCount': 123,
'MaximumInstanceCount': 123
}
},
'AdditionalLimits': [
{
'LimitName': 'string',
'LimitValues': [
'string',
]
},
]
}
}
}
:returns:
Data: If the given InstanceType is used as Data node
Master: If the given InstanceType is used as Master node
"""
pass
|
zun-4.0.0 | zun-4.0.0//zun/objects/volume.pyclass:Volume/_from_db_object | @staticmethod
def _from_db_object(volume, db_volume):
"""Converts a database entity to a formal object."""
for field in volume.fields:
setattr(volume, field, db_volume[field])
volume.obj_reset_changes()
return volume
|
pyepw-0.1 | pyepw-0.1//pyepw/epw.pyclass:GroundTemperatures/_to_str | @classmethod
def _to_str(cls, value):
"""Represents values either as string or None values as empty string.
Args:
value: a value
"""
if value is None:
return ''
else:
return str(value)
|
python-rhnapi-5.4.1.post4 | python-rhnapi-5.4.1.post4//rhnapi/utils.pyfile:/rhnapi/utils.py:function:get_errid/get_errid | def get_errid(errobj):
"""
fetch the YYYY:NNNN part from an errata dict object
basically strips off the CLA/RHSA etc prefix
parameters:
errobj(dict): dict representing an erratum in RHN
returns:
string: YYYY:NNNN from an erratum
"""
return errobj.get('advisory').split('-')[1]
|
dipy | dipy//utils/_importlib.pyfile:/utils/_importlib.py:function:_resolve_name/_resolve_name | def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in range(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError('attempted relative import beyond top-level ' +
'package')
return '%s.%s' % (package[:dot], name)
|
Django_504 | Django_504//contrib/gis/gdal/prototypes/errcheck.pyfile:/contrib/gis/gdal/prototypes/errcheck.py:function:check_str_arg/check_str_arg | def check_str_arg(result, func, cargs):
"""
This is for the OSRGet[Angular|Linear]Units functions, which
require that the returned string pointer not be freed. This
returns both the double and string values.
"""
dbl = result
ptr = cargs[-1]._obj
return dbl, ptr.value.decode()
|
panda-pilot-2.5.4.13 | panda-pilot-2.5.4.13//pilot/user/generic/common.pyfile:/pilot/user/generic/common.py:function:get_payload_command/get_payload_command | def get_payload_command(job):
"""
Return the full command for execuring the payload, including the sourcing of all setup files and setting of
environment variables.
:param job: job object
:return: command (string)
"""
return ''
|
wpedit-0.4.5 | wpedit-0.4.5//wpedit/html2text.pyfile:/wpedit/html2text.py:function:onlywhite/onlywhite | def onlywhite(line):
"""Return true if the line does only consist of whitespace characters."""
for c in line:
if c is not ' ' and c is not ' ':
return c is ' '
return line
|
rinoh | rinoh//backend/pdf/xobject/purepng.pyfile:/backend/pdf/xobject/purepng.py:function:isinteger/isinteger | def isinteger(x):
"""Check if `x` is platform native integer"""
try:
return int(x) == x
except (TypeError, ValueError):
return False
|
refnx | refnx//util/general.pyfile:/util/general.py:function:height_of_beam_after_dx/height_of_beam_after_dx | def height_of_beam_after_dx(d1, d2, L12, distance):
"""
Calculate the total widths of beam a given distance away from a collimation
slit.
if distance >= 0, then it's taken to be the distance after d2.
if distance < 0, then it's taken to be the distance before d1.
Parameters
----------
d1: float
opening of first collimation slit
d2: float
opening of second collimation slit
L12: float
distance between first and second collimation slits
distance: float
distance from first or last slit to a given position
Notes
-----
Units - equivalent distances (inches, mm, light years)
Returns
-------
(umbra, penumbra): float, float
full width of umbra and penumbra
"""
alpha = (d1 + d2) / 2.0 / L12
beta = abs(d1 - d2) / 2.0 / L12
if distance >= 0:
return beta * distance * 2 + d2, alpha * distance * 2 + d2
else:
return beta * abs(distance) * 2 + d1, alpha * abs(distance) * 2 + d1
|
mercurial-5.4 | mercurial-5.4//hgext/histedit.pyfile:/hgext/histedit.py:function:movetopmostbookmarks/movetopmostbookmarks | def movetopmostbookmarks(repo, oldtopmost, newtopmost):
"""Move bookmark from oldtopmost to newly created topmost
This is arguably a feature and we may only want that for the active
bookmark. But the behavior is kept compatible with the old version for now.
"""
if not oldtopmost or not newtopmost:
return
oldbmarks = repo.nodebookmarks(oldtopmost)
if oldbmarks:
with repo.lock(), repo.transaction(b'histedit') as tr:
marks = repo._bookmarks
changes = []
for name in oldbmarks:
changes.append((name, newtopmost))
marks.applychanges(repo, tr, changes)
|
Pytzer-0.4.3 | Pytzer-0.4.3//pytzer/parameters.pyfile:/pytzer/parameters.py:function:psi_K_Na_SO4_HMW84/psi_K_Na_SO4_HMW84 | def psi_K_Na_SO4_HMW84(T, P):
"""c-c'-a: potassium sodium sulfate [HMW84]."""
psi = -0.01
valid = T == 298.15
return psi, valid
|
integrations | integrations//hg/zulip_changegroup.pyfile:/hg/zulip_changegroup.py:function:format_commit_lines/format_commit_lines | def format_commit_lines(web_url: str, repo: repo, base: int, tip: int) ->str:
"""
Format the per-commit information for the message, including the one-line
commit summary and a link to the diff if a web URL has been configured:
"""
if web_url:
rev_base_url = web_url.rstrip('/') + '/rev/'
commit_summaries = []
for rev in range(base, tip):
rev_node = repo.changelog.node(rev)
rev_ctx = repo[rev_node]
one_liner = rev_ctx.description().split('\n')[0]
if web_url:
summary_url = rev_base_url + str(rev_ctx)
summary = '* [{summary}]({url})'.format(summary=one_liner, url=
summary_url)
else:
summary = '* {summary}'.format(summary=one_liner)
commit_summaries.append(summary)
return '\n'.join(summary for summary in commit_summaries)
|
nitime-0.8.1 | nitime-0.8.1//nitime/lazy.pyfile:/nitime/lazy.py:function:enabled/enabled | def enabled():
"""Returns ``True`` if LazyImports are globally enabled"""
import nitime.lazyimports as l
return not l.disable_lazy_imports
|
pymongo_mate | pymongo_mate//query_builder.pyclass:Array/item_not_in | @staticmethod
def item_not_in(items):
"""
Single item is not in item sets.
Example::
{"item_type": "Seafood"}, "Fruit" not in ["Fruit", "Meat"]
"""
return {'$nin': items}
|
mujpy-1.0.1 | mujpy-1.0.1//mujpy/aux/aux.pyfile:/mujpy/aux/aux.py:function:ps/ps | def ps(data, p0=0.0, p1=0.0, inv=False):
"""
Linear phase correction
Parameters
----------
data : ndarray
Array of NMR data.
p0 : float
Zero order phase in degrees.
p1 : float
First order phase in degrees.
inv : bool, optional
True for inverse phase correction
Returns
-------
ndata : ndarray
Phased NMR data.
"""
import numpy as np
p0 = p0 * np.pi / 180.0
p1 = p1 * np.pi / 180.0
size = data.shape[-1]
apod = np.exp(1.0j * (p0 + p1 * np.arange(size) / size)).astype(data.dtype)
if inv:
apod = 1 / apod
return apod * data
|
machinable | machinable//utils/dicts.pyfile:/utils/dicts.py:function:read_path_dict/read_path_dict | def read_path_dict(dict_like, path):
"""
Resolves an non-recursive jsonpath like string over a given dictionary
# Arguments
dict_like: Mapping, Lookup dictionary
path: String, lookup path, e.g. path.to[key].foo
# Examples
```python
read_path_dict({'foo': {'bar': 42}}, 'foo.bar')
>>> 42
```
# Raises
KeyError
"""
path = path.replace('][', '].[')
segments = path.split('.')
current = dict_like
for segment in segments:
if segment.find('[') >= 0:
key = segment[segment.find('[') + 1:-1]
if key.isdigit():
key = int(key)
if not segment.startswith('['):
current = current[segment[:segment.find('[')]]
current = current[key]
else:
current = current[segment]
return current
|
warcio | warcio//statusandheaders.pyclass:StatusAndHeadersParser/split_prefix | @staticmethod
def split_prefix(key, prefixs):
"""
split key string into prefix and remainder
for first matching prefix from a list
"""
key_upper = key.upper()
for prefix in prefixs:
if key_upper.startswith(prefix):
plen = len(prefix)
return key_upper[:plen], key[plen:]
|
NZMATH-1.1.0 | NZMATH-1.1.0//nzmath/equation.pyfile:/nzmath/equation.py:function:e1/e1 | def e1(x):
"""
0 = x[0] + x[1]*t
"""
if x[1] == 0:
raise ZeroDivisionError('No Solution')
else:
return -x[0] / x[1]
|
coreapi-cli-1.0.9 | coreapi-cli-1.0.9//coreapi_cli/codec_plugins.pyfile:/coreapi_cli/codec_plugins.py:function:supports/supports | def supports(codec):
"""
Return a list of strings indicating supported operations.
"""
if hasattr(codec, 'encode') and hasattr(codec, 'decode'):
return ['encoding', 'decoding']
elif hasattr(codec, 'encode'):
return ['encoding']
elif hasattr(codec, 'decode'):
return ['decoding']
return codec.supports
|
icalendar | icalendar//cli.pyfile:/cli.py:function:_format_name/_format_name | def _format_name(address):
"""Retrieve the e-mail and optionally the name from an address.
:arg vCalAddress address: An address object.
:returns str: The name and optionally the e-mail address.
"""
if not address:
return ''
email = address.title().split(':')[1]
if 'cn' in address.params:
return '{} <{}>'.format(address.params['cn'], email)
return email
|
senlin-8.0.0 | senlin-8.0.0//senlin/events/base.pyclass:EventBackend/dump | @classmethod
def dump(cls, level, action, **kwargs):
"""A method for sub-class to override.
:param level: An integer as defined by python logging module.
:param action: The action that triggered this dump.
:param dict kwargs: Additional parameters such as ``phase``,
``timestamp`` or ``extra``.
:returns: None
"""
raise NotImplementedError
|
org_todo_metrics | org_todo_metrics//org_todo_metrics.pyfile:/org_todo_metrics.py:function:string_between/string_between | def string_between(first: str, second: str, string: str):
"""
https://stackoverflow.com/a/16835195
"""
return string[string.find(first) + 1:string.find(second)]
|
expyriment | expyriment//_api_reference_tool.pyfile:/_api_reference_tool.py:function:_search_doc/_search_doc | def _search_doc(search_str, doc_dict):
"""Search the documentation.
Parameters
----------
search_str : string
string to search for (str)
doc_dict : dict
documentation dict to search in(dict
"""
rtn = []
for k in list(doc_dict.keys()):
if k.lower().find(search_str.lower()) > -1 or doc_dict[k].lower().find(
search_str.lower()) > -1:
rtn.append(k)
return rtn
|
wdom-0.3.1 | wdom-0.3.1//wdom/util.pyfile:/wdom/util.py:function:reset/reset | def reset() ->None:
"""Reset all wdom objects.
This function clear all connections, elements, and resistered custom
elements. This function also makes new document/application and set them.
"""
from wdom.document import get_new_document, set_document
from wdom.element import Element
from wdom.server import _tornado
from wdom.window import customElements
set_document(get_new_document())
_tornado.connections.clear()
_tornado.set_application(_tornado.Application())
Element._elements_with_id.clear()
Element._element_buffer.clear()
customElements.reset()
|
zadarapy | zadarapy//vpsa/settings.pyfile:/vpsa/settings.py:function:get_nfs_domain/get_nfs_domain | def get_nfs_domain(session, return_type=None, **kwargs):
"""
Retrieves the NFS domain for the VPSA.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
path = '/api/settings/nfs_domain.json'
return session.get_api(path=path, return_type=return_type, **kwargs)
|
catsoop-2019.9.5 | catsoop-2019.9.5//catsoop/mail.pyfile:/catsoop/mail.py:function:setup_smtp_object/setup_smtp_object | def setup_smtp_object(smtp, user, passwd):
"""
Helper function. Set up an `smtplib.SMTP` object for use with CAT-SOOP,
enabling TLS if possible and logging in a user if information is specified.
**Parameters**:
* `smtp`: the `smtplib.SMTP` object to configure
* `user`: the username to use when logging in
* `password`: the password to use when logging in
**Returns:** the same `smtplib.SMTP` object that was passed in, after
configuring it.
"""
smtp.set_debuglevel(False)
smtp.ehlo()
if user is not None and passwd is not None:
if smtp.has_extn('STARTTLS'):
smtp.starttls()
smtp.ehlo()
smtp.login(user, passwd)
return smtp
|
breezy-3.0.2 | breezy-3.0.2//breezy/plugins/weave_fmt/repository.pyclass:RepositoryFormat7/get_format_string | @classmethod
def get_format_string(cls):
"""See RepositoryFormat.get_format_string()."""
return b'Bazaar-NG Repository format 7'
|
meta-ml-0.0.15 | meta-ml-0.0.15//metalearn/rnn_code_generator.pyfile:/metalearn/rnn_code_generator.py:function:create_training_data/create_training_data | def create_training_data(algorithm_env, n=5):
"""Samples from algorithm env and creates 4 variants per sample.
- executable code sample (Estimator class)
- executable code sample (Estimator instance)
- non-executable partially randomized code
- non-executable fully randomized code
"""
training_data = []
for i in range(n):
sample_data = algorithm_env.sample_algorithm_code()
training_data.extend([sample_data, algorithm_env.
algorithm_obj_to_instance(sample_data)])
return training_data
|
audiotools | audiotools//toc/yaccrules.pyfile:/toc/yaccrules.py:function:p_track_index/p_track_index | def p_track_index(t):
"""track_flag : INDEX TIMESTAMP"""
from audiotools.toc import TOCFlag_INDEX
from fractions import Fraction
t[0] = TOCFlag_INDEX(Fraction(t[2], 75))
|
praatio | praatio//tgio.pyfile:/tgio.py:function:_removeUltrashortIntervals/_removeUltrashortIntervals | def _removeUltrashortIntervals(tier, minLength, minTimestamp):
"""
Remove intervals that are very tiny
Doing many small manipulations on intervals can lead to the creation
of ultrashort intervals (e.g. 1*10^-15 seconds long). This function
removes such intervals.
"""
newEntryList = []
j = 0
for start, stop, label in tier.entryList:
if stop - start < minLength:
if len(newEntryList) > 0:
lastStart, _, lastLabel = newEntryList[j - 1]
newEntryList[j - 1] = lastStart, stop, lastLabel
else:
if len(newEntryList) == 0 and start != minTimestamp:
newEntryList.append((minTimestamp, stop, label))
else:
newEntryList.append((start, stop, label))
j += 1
j = 0
while j < len(newEntryList) - 1:
diff = abs(newEntryList[j][1] - newEntryList[j + 1][0])
if diff > 0 and diff < minLength:
newEntryList[j] = newEntryList[j][0], newEntryList[j + 1][0
], newEntryList[j][2]
j += 1
return tier.new(entryList=newEntryList)
|
nti | nti//i18n/locales/interfaces.pyclass:ICountryAvailability/getCountryListing | def getCountryListing():
"""
Return a sequence of unicode country code and country name tuples.
"""
|
wittgenstein | wittgenstein//base.pyfile:/base.py:function:rnd/rnd | def rnd(float, places=None):
"""Round a float to decimal places.
float : float
Value to round.
places : int, default=None
Number of decimal places to round to. None defaults to 1 decimal place if float < 100, otherwise defaults to 0 places.
"""
if places is None:
if float < 1:
places = 2
elif float < 100:
places = 1
else:
places = 0
rounded = round(float, places)
if rounded != int(rounded):
return rounded
else:
return int(rounded)
|
mozlog | mozlog//reader.pyfile:/reader.py:function:imap_log/imap_log | def imap_log(log_iter, action_map):
"""Create an iterator that will invoke a callback per action for each item in a
iterable containing structured log entries
:param log_iter: Iterator returning structured log entries
:param action_map: Dictionary mapping action name to callback function. Log items
with actions not in this dictionary will be skipped.
"""
for item in log_iter:
if item['action'] in action_map:
yield action_map[item['action']](item)
|
yggdrasil | yggdrasil//command_line.pyfile:/command_line.py:function:yggtime_lang/yggtime_lang | def yggtime_lang():
"""Plot timing statistics comparing the different languages."""
from yggdrasil import timing
timing.plot_scalings(compare='language')
|
patoolib | patoolib//programs/ar.pyfile:/programs/ar.py:function:list_ar/list_ar | def list_ar(archive, compression, cmd, verbosity, interactive):
"""List a AR archive."""
opts = 't'
if verbosity > 1:
opts += 'v'
return [cmd, opts, archive]
|
odoo | odoo//migration/migration.pyfile:/migration/migration.py:function:table_of_model/table_of_model | def table_of_model(cr, model):
"""Return the table for the provided model name."""
return {'ir.actions.actions': 'ir_actions', 'ir.actions.act_url':
'ir_act_url', 'ir.actions.act_window': 'ir_act_window',
'ir.actions.act_window_close': 'ir_actions',
'ir.actions.act_window.view': 'ir_act_window_view',
'ir.actions.client': 'ir_act_client', 'ir.actions.report.xml':
'ir_act_report_xml', 'ir.actions.report': 'ir_act_report_xml',
'ir.actions.server': 'ir_act_server', 'ir.actions.wizard':
'ir_act_wizard'}.get(model, model.replace('.', '_'))
|
scrapydart | scrapydart//interfaces.pyclass:IEggStorage/delete | def delete(project, version=None):
"""Delete the egg stored for the given project and version. If should
also delete the project if no versions are left"""
|
oemof | oemof//db/tools.pyfile:/db/tools.py:function:grant_db_access/grant_db_access | def grant_db_access(conn, schema, table, role):
"""Gives access to database users/ groups
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
grant_str = (
"""GRANT ALL ON TABLE {schema}.{table}
TO {role} WITH GRANT OPTION;"""
.format(schema=schema, table=table, role=role))
conn.execute(grant_str)
|
checkbox-ng-1.4.0 | checkbox-ng-1.4.0//plainbox/vendor/rpyc/core/netref.pyfile:/plainbox/vendor/rpyc/core/netref.py:function:asyncreq/asyncreq | def asyncreq(proxy, handler, *args):
"""Performs an asynchronous request on the given proxy object.
Not intended to be invoked directly.
:param proxy: the proxy on which to issue the request
:param handler: the request handler (one of the ``HANDLE_XXX`` members of
``rpyc.protocol.consts``)
:param args: arguments to the handler
:returns: an :class:`~rpyc.core.async_.AsyncResult` representing
the operation
"""
conn = object.__getattribute__(proxy, '____conn__')
return conn.async_request(handler, proxy, *args)
|
mmtfPyspark-0.3.6 | mmtfPyspark-0.3.6//mmtfPyspark/datasets/drugBankDataset.pyfile:/mmtfPyspark/datasets/drugBankDataset.py:function:_remove_spaces_from_column_names/_remove_spaces_from_column_names | def _remove_spaces_from_column_names(original):
"""Remove spaces from column names to ensure compatibility with parquet
Parameters
----------
original : dataset
the original dataset
Returns
-------
dataset
dataset with columns renamed
"""
for existingName in original.columns:
newName = existingName.replace(' ', '')
original = original.withColumnRenamed(existingName, newName)
return original
|
gevent | gevent//builtins.pyfile:/builtins.py:function:_unlock_imports/_unlock_imports | def _unlock_imports():
"""
Internal function, called when gevent needs to perform imports
lazily, but does not know the state of the system. It may be impossible
to take the import lock because there are no other running greenlets, for
example. This causes a monkey-patched __import__ to avoid taking any locks.
until the corresponding call to lock_imports. This should only be done for limited
amounts of time and when the set of imports is statically known to be "safe".
"""
global __lock_imports
__lock_imports = False
|
plone.app.layout-3.4.2 | plone.app.layout-3.4.2//plone/app/layout/globals/interfaces.pyclass:IContextState/workflow_state | def workflow_state():
"""The workflow state of the current object
"""
|
co2sim-3.0.0 | co2sim-3.0.0//src/co2mpas/model/physical/cycle/WLTP.pyfile:/src/co2mpas/model/physical/cycle/WLTP.py:function:get_downscale_phases/get_downscale_phases | def get_downscale_phases(class_data):
"""
Returns downscale phases [s].
:param class_data:
WLTP class data.
:type class_data: dict
:return:
Downscale phases [s].
:rtype: list
"""
return class_data['downscale']['phases']
|
escher | escher//version.pyfile:/version.py:function:get_full_version/get_full_version | def get_full_version(main_version, post_version=None):
"""Generate a PEP440 compliant version with an optional post-release."""
if post_version is None:
return main_version
else:
return '%s.post%s' % (main_version, post_version)
|
src | src//drel/drel_ast_yacc.pyfile:/drel/drel_ast_yacc.py:function:p_small_stmt/p_small_stmt | def p_small_stmt(p):
"""small_stmt : expr_stmt
| print_stmt
| break_stmt
| next_stmt"""
p[0] = p[1]
|
openreview | openreview//openreview.pyclass:Edge/from_json | @classmethod
def from_json(Edge, e):
"""
Returns a deserialized object from a json string
:arg t: The json string consisting of a serialized object of type "Edge"
"""
edge = Edge(id=e.get('id'), cdate=e.get('cdate'), tcdate=e.get('tcdate'
), tmdate=e.get('tmdate'), ddate=e.get('ddate'), tddate=e.get(
'tddate'), invitation=e.get('invitation'), readers=e.get('readers'),
nonreaders=e.get('nonreaders'), writers=e.get('writers'),
signatures=e.get('signatures'), head=e.get('head'), tail=e.get(
'tail'), weight=e.get('weight'), label=e.get('label'))
return edge
|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/codecommit.pyfile:/pyboto3/codecommit.py:function:update_repository_description/update_repository_description | def update_repository_description(repositoryName=None,
repositoryDescription=None):
"""
Sets or changes the comment or description for a repository.
See also: AWS API Documentation
:example: response = client.update_repository_description(
repositoryName='string',
repositoryDescription='string'
)
:type repositoryName: string
:param repositoryName: [REQUIRED]
The name of the repository to set or change the comment or description for.
:type repositoryDescription: string
:param repositoryDescription: The new comment or description for the specified repository. Repository descriptions are limited to 1,000 characters.
"""
pass
|
pmagpy | pmagpy//pmag.pyfile:/pmag.py:function:chart_maker/chart_maker | def chart_maker(Int, Top, start=100, outfile='chart.txt'):
"""
Makes a chart for performing IZZI experiments. Print out the file and
tape it to the oven. This chart will help keep track of the different
steps.
Z : performed in zero field - enter the temperature XXX.0 in the sio
formatted measurement file created by the LabView program
I : performed in the lab field written at the top of the form
P : a pTRM step - performed at the temperature and in the lab field.
Parameters
__________
Int : list of intervals [e.g., 50,10,5]
Top : list of upper bounds for each interval [e.g., 500, 550, 600]
start : first temperature step, default is 100
outfile : name of output file, default is 'chart.txt'
Output
_________
creates a file with:
file: write down the name of the measurement file
field: write down the lab field for the infield steps (in uT)
the type of step (Z: zerofield, I: infield, P: pTRM step
temperature of the step and code for SIO-like treatment steps
XXX.0 [zero field]
XXX.1 [in field]
XXX.2 [pTRM check] - done in a lab field
date : date the step was performed
run # : an optional run number
zones I-III : field in the zones in the oven
start : time the run was started
sp : time the setpoint was reached
cool : time cooling started
"""
low, k, iz = start, 0, 0
Tzero = []
f = open('chart.txt', 'w')
vline = ('\t%s\n' %
' | | | | | | | |')
hline = (
'______________________________________________________________________________\n'
)
f.write('file:_________________ field:___________uT\n\n\n')
f.write('%s\n' %
' date | run# | zone I | zone II | zone III | start | sp | cool|'
)
f.write(hline)
f.write('\t%s' % ' 0.0')
f.write(vline)
f.write(hline)
for k in range(len(Top)):
for t in range(low, Top[k] + Int[k], Int[k]):
if iz == 0:
Tzero.append(t)
f.write('%s \t %s' % ('Z', str(t) + '.' + str(iz)))
f.write(vline)
f.write(hline)
if len(Tzero) > 1:
f.write('%s \t %s' % ('P', str(Tzero[-2]) + '.' + str(2)))
f.write(vline)
f.write(hline)
iz = 1
f.write('%s \t %s' % ('I', str(t) + '.' + str(iz)))
f.write(vline)
f.write(hline)
elif iz == 1:
f.write('%s \t %s' % ('I', str(t) + '.' + str(iz)))
f.write(vline)
f.write(hline)
iz = 0
f.write('%s \t %s' % ('Z', str(t) + '.' + str(iz)))
f.write(vline)
f.write(hline)
try:
low = Top[k] + Int[k + 1]
except:
f.close()
print('output stored in: chart.txt')
|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/lightsail.pyfile:/pyboto3/lightsail.py:function:generate_presigned_url/generate_presigned_url | def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None,
HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
|