repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
OFS | OFS//interfaces.pyclass:ITraversable/absolute_url | def absolute_url(relative=0):
"""Return the absolute URL of the object.
This a canonical URL based on the object's physical
containment path. It is affected by the virtual host
configuration, if any, and can be used by external
agents, such as a browser, to address the object.
If the relative argument is provided, with a true value, then
the value of virtual_url_path() is returned.
Some Products incorrectly use '/'+absolute_url(1) as an
absolute-path reference. This breaks in certain virtual
hosting situations, and should be changed to use
absolute_url_path() instead.
"""
|
maxh-1.0.0 | maxh-1.0.0//maxh.pyfile:/maxh.py:function:say_hello/say_hello | def say_hello():
"""This function takes no parameter and just print "Hello, world!"
onto the screen."""
print('Hello, world!')
|
massgenotyping | massgenotyping//allele_call.pyfile:/allele_call.py:function:read_blast_results/read_blast_results | def read_blast_results(filepath):
"""
Read blast results and return counts of blast search hits
"""
hits = {}
with open(filepath) as infile:
line = infile.readline()
while line:
hit = line.strip().split(',')[1]
if hit in hits:
hits[hit] += 1
else:
hits[hit] = 1
line = infile.readline()
return dict(sorted(hits.items(), key=lambda x: x[1])[::-1])
|
alpaca-monitor-0.3.0 | alpaca-monitor-0.3.0//alpaca/common/services/interfaces.pyclass:IUserService/get_all_users | def get_all_users():
"""Returns iterable of all users sorted by email."""
|
abipy-0.7.0 | abipy-0.7.0//abipy/abilab.pyfile:/abipy/abilab.py:function:abipy_logo2/abipy_logo2 | def abipy_logo2():
"""http://www.text-image.com/convert/pic2ascii.cgi"""
return """
MMMMMMMMMMMMMMMMNhdMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMdhmMMMMMMMMMMMMMMM
MMMMMMMMMddNMMmoyNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNyomMMmhmMMMMMMMM
MMMmmMMhomMMMy/hMMMMMMMMMMMMMMMMMMMN::MMMMMMMMMm:oMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMd+yMMMhomMmmMMM
MmsmMMs+NMMMy+yMMMMMMMMMMMNhyyhmMMMN::mhyyhmMMMNhdMMMMmhyydNMMMdyNMMMMMmyNMMMMMMMMMMMMy+yMMMh+dMmsmM
m+mMMy+hMMMd++mMMMMMMMMMm+:+ss+:+mMN:::/ss+:+mMd:/MMd/:+so/:oNM/:dMMMMMo:yMMMMMMMMMMMMm++dMMMo+NMN+m
osMMMo+mMMMy+oMMMMMMMMMM::dMMMMd:/MN::hMMMMd::Nd:/Mm:/NMMMMy:oM/:dMMMMMo:yMMMMMMMMMMMMMo+yMMMy+hMMso
oyMMMooNMMMyooMMMMMMMMMN::mMMMMm::NM::mMMMMN::Nd:/Mh:/MMMMMh:+Mo:yMMMMM+:yMMMMMMMMMMMMMooyMMMyohMMyo
dyMMMysmMMMdooNMMMMMMMMMd/:oyys:::NMd/:oyys::dMd:/Mh::/shyo:/mMN+:+yhs/::yMMMMMMMMMMMMNoodMMMysmMMyh
MddMMNyhMMMMysdMMMMMMMMMMMdyooydssMMMMdysoydMMMNsyMh:+hsosydMMMMMmysosho:yMMMMMMMMMMMMdsyMMMNydMMddM
MMNmNMMddMMMMhyNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMh:oMMMMMMMMMMMMMMMMMo:yMMMMMMMMMMMNyhMMMNhmMNmNMM
MMMMMMMMNmmMMMmhmMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMmNMMMMMMMMMMMMMMMMMNdMMMMMMMMMMMmhmMMNmmMMMMMMMM
MMMMMMMMMMMMMMMMmmNMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNmmMMMMMMMMMMMMMMM
"""
|
pynufft | pynufft//src/re_subroutine.pyfile:/src/re_subroutine.py:function:cHypot/cHypot | def cHypot():
"""
Return the kernel code for hypot, which computes the sqrt(x*x + y*y) without intermediate overflow.
"""
R = """
KERNEL void cHypot(GLOBAL_MEM float2 *x,
GLOBAL_MEM const float2 *y)
{
const unsigned int gid = get_global_id(0);
float2 tmp_x;
float2 tmp_y;
tmp_x = x[gid];
tmp_y = y[gid];
tmp_x.x = hypot( tmp_x.x, tmp_x.y); // sqrt( tmp_x.x*tmp_x.x + tmp_x.y*tmp_x.y);
tmp_y.x = hypot( tmp_y.x, tmp_y.y); // sqrt( tmp_y.x*tmp_y.x + tmp_y.y*tmp_y.y);
x[gid].x = hypot(tmp_x.x, tmp_y.x);
x[gid].y = 0.0;
};
"""
return R
|
battleship | battleship//game.pyfile:/game.py:function:game_type/game_type | def game_type():
"""User can choose playing against computer or another user.
"""
pass
|
pya2l-0.0.1 | pya2l-0.0.1//pya2l/parser/grammar/parser.pyclass:A2lParser/p_a2ml_keyword | @staticmethod
def p_a2ml_keyword(p):
"""a2ml_keyword : STRING"""
|
pinax | pinax//referrals/utils.pyfile:/referrals/utils.py:function:ensure_session_key/ensure_session_key | def ensure_session_key(request):
"""
Given a request return a session key that will be used. There may already
be a session key associated, but if there is not, we force the session to
create itself and persist between requests for the client behind the given
request.
"""
key = request.session.session_key
if key is None:
request.session.save()
request.session.modified = True
key = request.session.session_key
return key
|
dropbox-10.1.2 | dropbox-10.1.2//dropbox/team_log.pyclass:EventType/camera_uploads_policy_changed | @classmethod
def camera_uploads_policy_changed(cls, val):
"""
Create an instance of this class set to the
``camera_uploads_policy_changed`` tag with value ``val``.
:param CameraUploadsPolicyChangedType val:
:rtype: EventType
"""
return cls('camera_uploads_policy_changed', val)
|
oci-2.14.1 | oci-2.14.1//src/oci/events/models/action.pyclass:Action/get_subtype | @staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['actionType']
if type == 'OSS':
return 'StreamingServiceAction'
if type == 'ONS':
return 'NotificationServiceAction'
if type == 'FAAS':
return 'FaaSAction'
else:
return 'Action'
|
moca_core | moca_core//utils.pyfile:/utils.py:function:check_length/check_length | def check_length(min_length: int, max_length: int, mode: str='and', *args
) ->bool:
"""
check items length is between min_length and max_length
:param min_length: minimum length
:param max_length: maximum length
:param mode: check mode, 'and': all items need clear length check, 'or': more than one item need clear length check
:param args: items
:return: status, [correct] or [incorrect]
"""
if mode == 'and':
for item in args:
if not min_length <= len(item) <= max_length:
return False
return True
else:
for item in args:
if min_length <= len(item) <= max_length:
return True
return False
|
torchbench | torchbench//utils.pyfile:/utils.py:function:accuracy/accuracy | def accuracy(output, target, topk=(1,)):
"""Computes precision@k for the specified values of k."""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
|
m3-mutex-2.0.3 | m3-mutex-2.0.3//src/m3_mutex/helpers.pyfile:/src/m3_mutex/helpers.py:function:get_backend/get_backend | def get_backend(mutex_id):
u"""Возвращает backend, который используется для хранения информации о
семафорах.
:param mutex_id: идентификатор семафора, для которого определяется backend
"""
return None
|
textpipe-pattern-3.6.1 | textpipe-pattern-3.6.1//pattern/vector/svm/libsvm.pyfile:/pattern/vector/svm/libsvm.py:function:toPyModel/toPyModel | def toPyModel(model_ptr):
"""
toPyModel(model_ptr) -> svm_model
Convert a ctypes POINTER(svm_model) to a Python svm_model
"""
if bool(model_ptr) == False:
raise ValueError('Null pointer')
m = model_ptr.contents
m.__createfrom__ = 'C'
return m
|
cx_Freeze-6.1 | cx_Freeze-6.1//cx_Freeze/hooks.pyfile:/cx_Freeze/hooks.py:function:load_Numeric/load_Numeric | def load_Numeric(finder, module):
"""the Numeric module optionally loads the dotblas module; ignore the error
if this modules does not exist."""
module.IgnoreName('dotblas')
|
hurry.workflow-3.0.2 | hurry.workflow-3.0.2//src/hurry/workflow/interfaces.pyclass:IWorkflowState/setId | def setId(id):
"""Set workflow version id for this object.
This is used to mark all versions of an object with the
same id.
"""
|
magni-1.7.0 | magni-1.7.0//magni/cs/reconstruction/it/_stop_criterion.pyfile:/magni/cs/reconstruction/it/_stop_criterion.py:function:get_function_handle/get_function_handle | def get_function_handle(method, var):
"""
Return a function handle to a given calculation method.
Parameters
----------
method : str
Identifier of the calculation method to return a handle to.
var : dict
Local variables needed in the calculation method.
Returns
-------
f_handle : function
Handle to calculation `method` defined in this globals scope.
"""
return globals()['wrap_calculate_using_' + method](var)
|
orange-canvas-core-0.1.12 | orange-canvas-core-0.1.12//orangecanvas/utils/propertybindings.pyfile:/orangecanvas/utils/propertybindings.py:function:find_meta_property/find_meta_property | def find_meta_property(obj, name):
"""
Return a named (`name`) `QMetaProperty` of a `QObject` instance `obj`.
If a property by taht name does not exist raise an AttributeError.
"""
meta = obj.metaObject()
index = meta.indexOfProperty(name)
if index == -1:
raise AttributeError('%s does no have a property named %r.' % (meta
.className(), name))
return meta.property(index)
|
pipenv-2018.11.26 | pipenv-2018.11.26//pipenv/core.pyfile:/pipenv/core.py:function:convert_three_to_python/convert_three_to_python | def convert_three_to_python(three, python):
"""Converts a Three flag into a Python flag, and raises customer warnings
in the process, if needed.
"""
if not python:
if three is False:
return '2'
elif three is True:
return '3'
else:
return python
|
pyboto3-1.4.4 | pyboto3-1.4.4//pyboto3/rekognition.pyfile:/pyboto3/rekognition.py:function:delete_faces/delete_faces | def delete_faces(CollectionId=None, FaceIds=None):
"""
Deletes faces from a collection. You specify a collection ID and an array of face IDs to remove from the collection.
This operation requires permissions to perform the rekognition:DeleteFaces action.
See also: AWS API Documentation
Examples
This operation deletes one or more faces from a Rekognition collection.
Expected Output:
:example: response = client.delete_faces(
CollectionId='string',
FaceIds=[
'string',
]
)
:type CollectionId: string
:param CollectionId: [REQUIRED]
Collection from which to remove the specific faces.
:type FaceIds: list
:param FaceIds: [REQUIRED]
An array of face IDs to delete.
(string) --
:rtype: dict
:return: {
'DeletedFaces': [
'string',
]
}
:returns:
(string) --
"""
pass
|
carbspec | carbspec//spectro/fitting.pyfile:/spectro/fitting.py:function:specmix/specmix | def specmix(x, wv, aspl, bspl, **kwargs):
"""
Return the mixture of aspl and bspl specified by the parameters in x.
Parameters
==========
x : tuple
A tuple containing the model parameters as (a, b, B0, c, m)
wv : array-like
The wavelength of the data
aspl, bspl : UnivariateSpline
Spline objects that produce the acid (aspl) or base (aspl)
molal absorption given a wavelength.
"""
a, b, B0, c, m = x
return a * aspl(m * wv + c) + b * bspl(m * wv + c) + B0
|
audiotools | audiotools//toc/yaccrules.pyfile:/toc/yaccrules.py:function:p_track_mode/p_track_mode | def p_track_mode(t):
"""track_mode : AUDIO
| MODE1
| MODE1_RAW
| MODE2
| MODE2_FORM1
| MODE2_FORM2
| MODE2_FORM_MIX
| MODE2_RAW"""
t[0] = t[1]
|
srunner | srunner//challenge/utils/route_configuration_parser.pyfile:/challenge/utils/route_configuration_parser.py:function:convert_waypoint_float/convert_waypoint_float | def convert_waypoint_float(waypoint):
"""
Convert waypoint values to float
"""
waypoint['x'] = float(waypoint['x'])
waypoint['y'] = float(waypoint['y'])
waypoint['z'] = float(waypoint['z'])
waypoint['yaw'] = float(waypoint['yaw'])
|
ndlib | ndlib//utils.pyfile:/utils.py:function:__execute/__execute | def __execute(model, iteration_number):
"""
Execute a simulation model
:param model: a configured diffusion model
:param iteration_number: number of iterations
:return: computed trends
"""
iterations = model.iteration_bunch(iteration_number, False)
trends = model.build_trends(iterations)[0]
del iterations
del model
return trends
|
OpenNMT-tf-2.9.3 | OpenNMT-tf-2.9.3//opennmt/utils/misc.pyfile:/opennmt/utils/misc.py:function:extract_prefixed_keys/extract_prefixed_keys | def extract_prefixed_keys(dictionary, prefix):
"""Returns a dictionary with all keys from :obj:`dictionary` that are prefixed
with :obj:`prefix`.
"""
sub_dict = {}
for key, value in dictionary.items():
if key.startswith(prefix):
original_key = key[len(prefix):]
sub_dict[original_key] = value
return sub_dict
|
pyerector | pyerector//tasks/_container.pyclass:Uncontainer/retrieve_members | @staticmethod
def retrieve_members(contfile, files):
"""To be overridden."""
|
inline_importer | inline_importer//inline_importer.pyclass:InlineImporter/_call_with_frames_removed | @staticmethod
def _call_with_frames_removed(f, *args, **kwds):
"""remove_importlib_frames in import.c will always remove sequences
of importlib frames that end with a call to this function
Use it instead of a normal call in places where including the importlib
frames introduces unwanted noise into the traceback (e.g. when executing
module code)
"""
return f(*args, **kwds)
|
rtctree_test-4.2.5 | rtctree_test-4.2.5//rtctree/utils.pyfile:/rtctree/utils.py:function:nvlist_to_dict/nvlist_to_dict | def nvlist_to_dict(nvlist):
"""Convert a CORBA namevalue list into a dictionary."""
result = {}
for item in nvlist:
result[item.name] = item.value.value()
return result
|
fake-blender-api-2.79-0.3.1 | fake-blender-api-2.79-0.3.1//bpy/ops/text.pyfile:/bpy/ops/text.py:function:duplicate_line/duplicate_line | def duplicate_line():
"""Duplicate the current line
"""
pass
|
uncrumpled_kivy-1.9.2.dev0 | uncrumpled_kivy-1.9.2.dev0//kivy/tools/pep8checker/pep8.pyfile:/kivy/tools/pep8checker/pep8.py:function:update_counts/update_counts | def update_counts(s, counts):
"""Adds one to the counts of each appearance of characters in s,
for characters in counts"""
for char in s:
if char in counts:
counts[char] += 1
|
thoraxe | thoraxe//transcript_info/phases.pyfile:/transcript_info/phases.py:function:_check_exon_order/_check_exon_order | def _check_exon_order(data_frame, row_number, row_index, prev_row_index,
exon_pos):
"""Check that exons are ordered by rank in the transcript."""
if exon_pos != 0:
assert data_frame.loc[row_index, 'ExonRank'] > data_frame.loc[
prev_row_index, 'ExonRank'
], "Exons aren't sorted by rank, error with row number " + str(
row_number) + ', index ' + str(row_index) + '.'
|
blender-1.4 | blender-1.4//blender/2.79/scripts/freestyle/modules/freestyle/utils.pyfile:/blender/2.79/scripts/freestyle/modules/freestyle/utils.py:function:stroke_curvature/stroke_curvature | def stroke_curvature(it):
"""
Compute the 2D curvature at the stroke vertex pointed by the iterator 'it'.
K = 1 / R
where R is the radius of the circle going through the current vertex and its neighbors
"""
for _ in it:
if it.is_begin or it.is_end:
yield 0.0
continue
else:
it.decrement()
prev, current, succ = it.object.point.copy(), next(it).point.copy(
), next(it).point.copy()
it.decrement()
ab = current - prev
bc = succ - current
ac = prev - succ
a, b, c = ab.length, bc.length, ac.length
try:
area = 0.5 * ab.cross(ac)
K = 4 * area / (a * b * c)
except ZeroDivisionError:
K = 0.0
yield abs(K)
|
Keras-2.3.1 | Keras-2.3.1//keras/engine/training_utils.pyfile:/keras/engine/training_utils.py:function:iter_sequence_infinite/iter_sequence_infinite | def iter_sequence_infinite(seq):
"""Iterate indefinitely over a Sequence.
# Arguments
seq: Sequence object
# Returns
Generator yielding batches.
"""
while True:
for item in seq:
yield item
|
evalys-4.0.4 | evalys-4.0.4//evalys/metrics.pyfile:/evalys/metrics.py:function:_load_insert_element_if_necessary/_load_insert_element_if_necessary | def _load_insert_element_if_necessary(load_df, at):
"""
Insert an event at the specified point that conserve data consistency
for "area" and "load" values
"""
if len(load_df[load_df.time == at]) == 0:
prev_el = load_df[load_df.time <= at].tail(1)
new_el = prev_el.copy()
next_el = load_df[load_df.time >= at].head(1)
new_el.time = at
new_el.area = float(new_el.load) * float(next_el.time - at)
load_df.loc[prev_el.index, 'area'] = float(prev_el.load) * float(at -
prev_el.time)
load_df.loc[len(load_df)] = [float(new_el.time), float(new_el.load),
float(new_el.area)]
load_df = load_df.sort_values(by=['time'])
return load_df
|
hackedit-1.0a2 | hackedit-1.0a2//hackedit/vendor/jedi/common.pyfile:/hackedit/vendor/jedi/common.py:function:indent_block/indent_block | def indent_block(text, indention=' '):
"""This function indents a text block with a default of four spaces."""
temp = ''
while text and text[-1] == '\n':
temp += text[-1]
text = text[:-1]
lines = text.split('\n')
return '\n'.join(map(lambda s: indention + s, lines)) + temp
|
pyramid-1.10.4 | pyramid-1.10.4//src/pyramid/interfaces.pyclass:IAssetDescriptor/stream | def stream():
"""
Returns an input stream for reading asset contents. Raises an
exception if the asset is a directory or does not exist.
"""
|
statick-0.4.0 | statick-0.4.0//statick_tool/plugins/tool/cccc_tool_plugin.pyclass:CCCCToolPlugin/convert_name_to_id | @classmethod
def convert_name_to_id(cls, name: str) ->str:
"""
Convert result name to configuration name.
The name given in CCCC results is different than the name given in CCCC
configuration. This will map the name in the configuration file to the
name given in the results.
"""
name_id = ''
if name == 'IF4':
name_id = 'IF4'
elif name == 'fan_out_concrete':
name_id = 'FOc'
elif name == 'IF4_visible':
name_id = 'IF4v'
elif name == 'coupling_between_objects':
name_id = 'CBO'
elif name == 'fan_in_visible':
name_id = 'FIv'
elif name == 'weighted_methods_per_class_unity':
name_id = 'WMC1'
elif name == 'fan_out':
name_id = 'FO'
elif name == 'weighted_methods_per_class_visibility':
name_id = 'WMCv'
elif name == 'fan_out_visible':
name_id = 'FOv'
elif name == 'IF4_concrete':
name_id = 'IF4c'
elif name == 'depth_of_inheritance_tree':
name_id = 'DIT'
elif name == 'number_of_children':
name_id = 'NOC'
elif name == 'fan_in_concrete':
name_id = 'FIc'
elif name == 'fan_in':
name_id = 'FI'
elif name == 'lines_of_comment':
name_id = 'COM'
elif name == 'lines_of_code_per_line_of_comment':
name_id = 'L_C'
elif name == 'McCabes_cyclomatic_complexity':
name_id = 'MVGper'
elif name == 'lines_of_code':
name_id = 'LOCp'
elif name == 'McCabes_cyclomatic_complexity_per_line_of_comment':
name_id = 'M_C'
return name_id
|
utify | utify//dataframe.pyfile:/dataframe.py:function:df_column_dtypes/df_column_dtypes | def df_column_dtypes(df, dtype: str=None, target: str=None, max_cardinality:
int=10):
"""Return the columns grouped by their dtypes or for a specific dtype"""
numeric_cols = []
cat_cols = []
for col in df.columns:
if col == target:
continue
if df[col].dtype in (int, float) and df[col].nunique(
) > max_cardinality:
numeric_cols.append(col)
else:
cat_cols.append(col)
dtypes = dict(numeric=numeric_cols, categorical=cat_cols)
return dtypes[dtype] if dtype else dtypes
|
django-apar-1.1.6.46 | django-apar-1.1.6.46//aparnik/contrib/pages/migrations/0005_auto_20191103_1240.pyfile:/aparnik/contrib/pages/migrations/0005_auto_20191103_1240.py:function:add_keys/add_keys | def add_keys(apps, schema_editor):
"""
We can't import the Post model directly as it may be a newer
version than this migration expects. We use the historical version.
"""
Page = apps.get_model('pages', 'Page')
ContentType = apps.get_model('contenttypes', 'ContentType')
key = ''
try:
key = 'home'
Page.objects.get(english_title=key)
except Exception:
Page.objects.create(title='صفحه اصلی', english_title=key,
is_show_in_home=True)
try:
key = 'library'
Page.objects.get(english_title=key)
except Exception:
Page.objects.create(title='کتابخانه', english_title=key,
is_show_in_home=False)
try:
key = 'category'
Page.objects.get(english_title=key)
except Exception:
Page.objects.create(title='دسته بندی', english_title=key,
is_show_in_home=False)
try:
key = 'contact_us'
Page.objects.get(english_title=key)
except Exception:
Page.objects.create(title='تماس با ما', english_title=key,
is_show_in_home=False)
try:
key = 'about_us'
Page.objects.get(english_title=key)
except Exception:
Page.objects.create(title='درباره ما', english_title=key,
is_show_in_home=False)
try:
key = 'my_products'
Page.objects.get(english_title=key)
except Exception:
Page.objects.create(title='محصولات من', english_title=key,
is_show_in_home=False)
try:
key = 'download_manager'
Page.objects.get(english_title=key)
except Exception:
Page.objects.create(title='دانلود منیجر', english_title=key,
is_show_in_home=False)
try:
key = 'file_manager'
Page.objects.get(english_title=key)
except Exception:
Page.objects.create(title='مدیریت فایل', english_title=key,
is_show_in_home=False)
try:
key = 'user_invitation'
Page.objects.get(english_title=key)
except Exception:
Page.objects.create(title='دعوت از دوستان', english_title=key,
is_show_in_home=False)
new_ct = ContentType.objects.get_for_model(Page)
Page.objects.filter(polymorphic_ctype__isnull=True).update(
polymorphic_ctype=new_ct)
|
numba-0.49.0 | numba-0.49.0//numba/core/tracing.pyfile:/numba/core/tracing.py:function:find_function_info/find_function_info | def find_function_info(func, spec, args):
"""Return function meta-data in a tuple.
(name, type)"""
module = getattr(func, '__module__', None)
name = getattr(func, '__name__', None)
self = getattr(func, '__self__', None)
cname = None
if self:
cname = self.__name__
elif len(spec.args) and spec.args[0] == 'self':
cname = args[0].__class__.__name__
elif len(spec.args) and spec.args[0] == 'cls':
cname = args[0].__name__
if name:
qname = []
if module and module != '__main__':
qname.append(module)
qname.append('.')
if cname:
qname.append(cname)
qname.append('.')
qname.append(name)
name = ''.join(qname)
return name, None
|
dk-1.1.2 | dk-1.1.2//dk/age.pyfile:/dk/age.py:function:_past_month/_past_month | def _past_month(m):
"""Subtract 1 from month."""
if m == 1:
return 12
else:
return m - 1
|
mdswordwrap | mdswordwrap//libwordwrap.pyfile:/libwordwrap.py:function:word_wrap/word_wrap | def word_wrap(string, width=80, ind1=0, ind2=0, prefix=''):
""" word wrapping function.
string: the string to wrap
width: the column number to wrap at
prefix: prefix each line with this string (goes before any indentation)
ind1: number of characters to indent the first line
ind2: number of characters to indent the rest of the lines
"""
string = prefix + ind1 * ' ' + string
newstring = ''
while len(string) > width:
marker = width - 1
while not string[marker].isspace():
marker = marker - 1
newline = string[0:marker] + '\n'
newstring = newstring + newline
string = prefix + ind2 * ' ' + string[marker + 1:]
return newstring + string
|
yapu | yapu//meta/decorator.pyfile:/meta/decorator.py:function:decorator_autogen_code/decorator_autogen_code | def decorator_autogen_code(original_func, decorator, new_func=None):
"""
auto generates code to create decorated functions with NEW NAMES
"""
if new_func is None:
new_func = '%s_verbose' % original_func
print(
"""
#
# decorator_autogen_code("{original_func}", "{decorator}", '{new_func}' )
#
@{decorator}
def {new_func}(*args, **kwargs):
return {func}(*args, **kwargs)
"""
.format(decorator=decorator, func=original_func, new_func=new_func,
original_func=original_func))
|
pyraf-2.1.15 | pyraf-2.1.15//required_pkgs/stsci.tools/lib/stsci/tools/parseinput.pyfile:/required_pkgs/stsci.tools/lib/stsci/tools/parseinput.py:function:isValidAssocExtn/isValidAssocExtn | def isValidAssocExtn(extname):
"""
Determine if the extension name given as input could
represent a valid association file.
Parameters
----------
extname : string
Returns
-------
isValid : boolean value
"""
validExtnNames = ['asn', 'asc']
for validName in validExtnNames:
if extname == validName:
return True
return False
|
spinandheave | spinandheave//main.pyfile:/main.py:function:is_tool/is_tool | def is_tool(name):
"""Check whether `name` is on PATH and marked as executable."""
from shutil import which
return which(name) is not None
|
layerstack-0.2.0 | layerstack-0.2.0//layerstack/layer.pyclass:ModelLayerBase/_cli_desc | @classmethod
def _cli_desc(cls):
"""
Get layer description
Returns
-------
'str'
Layer description
"""
return (cls.desc if cls.desc is not None else
"Apply Layer '{}' to model".format(cls.name))
|
dlcliche | dlcliche//image.pyfile:/image.py:function:fix_image_ch/fix_image_ch | def fix_image_ch(img):
"""Fix image channel so that it locates last in shape."""
if img.shape[0] <= 3:
return img.transpose(1, 2, 0)
return img
|
DynamicAnnotationDB-1.1.0 | DynamicAnnotationDB-1.1.0//dynamicannotationdb/key_utils.pyfile:/dynamicannotationdb/key_utils.py:function:get_table_name_from_table_id/get_table_name_from_table_id | def get_table_name_from_table_id(table_id):
""" Extracts dataset name from table_id
:param table_id: str
:return: str
"""
return table_id.split('__')[-1]
|
requests_core | requests_core//http_manager/_sync/poolmanager.pyfile:/http_manager/_sync/poolmanager.py:function:_default_key_normalizer/_default_key_normalizer | def _default_key_normalizer(key_class, request_context):
"""
Create a pool key out of a request context dictionary.
According to RFC 3986, both the scheme and host are case-insensitive.
Therefore, this function normalizes both before constructing the pool
key for an HTTPS request. If you wish to change this behaviour, provide
alternate callables to ``key_fn_by_scheme``.
:param key_class:
The class to use when constructing the key. This should be a namedtuple
with the ``scheme`` and ``host`` keys at a minimum.
:type key_class: namedtuple
:param request_context:
A dictionary-like object that contain the context for a request.
:type request_context: dict
:return: A namedtuple that can be used as a connection pool key.
:rtype: PoolKey
"""
context = request_context.copy()
context['scheme'] = context['scheme'].lower()
context['host'] = context['host'].lower()
for key in ('headers', '_proxy_headers', '_socks_options'):
if key in context and context[key] is not None:
context[key] = frozenset(context[key].items())
socket_opts = context.get('socket_options')
if socket_opts is not None:
context['socket_options'] = tuple(socket_opts)
for key in list(context.keys()):
context['key_' + key] = context.pop(key)
for field in key_class._fields:
if field not in context:
context[field] = None
return key_class(**context)
|
datatest | datatest//_utils.pyfile:/_utils.py:function:sortable/sortable | def sortable(obj):
"""Returns True if *obj* is sortable else returns False."""
try:
sorted([obj, obj])
return True
except TypeError:
return False
|
Products.CronUtility-1.0 | Products.CronUtility-1.0//Products/CronUtility/interfaces.pyclass:ICron/active | def active(datetime):
"""checks if the cron is active"""
|
_TFL | _TFL//predicate.pyfile:/predicate.py:function:successor_of/successor_of | def successor_of(element, iterable, pairwise=pairwise):
"""Returns the successor of `element` in `iterable`"""
for l, r in pairwise(iterable):
if l == element:
return r
raise IndexError
|
ipaas-python-sdk-core-1.0.0 | ipaas-python-sdk-core-1.0.0//core/vendored/requests/packages/urllib3/packages/ordered_dict.pyclass:OrderedDict/fromkeys | @classmethod
def fromkeys(cls, iterable, value=None):
"""OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
"""
d = cls()
for key in iterable:
d[key] = value
return d
|
whoosh | whoosh//qparser/common.pyfile:/qparser/common.py:function:get_single_text/get_single_text | def get_single_text(field, text, **kwargs):
"""Returns the first token from an analyzer's output.
"""
for t in field.process_text(text, mode='query', **kwargs):
return t
|
sfftk-0.5.2.dev3 | sfftk-0.5.2.dev3//sfftk/readers/segreader.pyfile:/sfftk/readers/segreader.py:function:get_root/get_root | def get_root(region_parent_zip, region_id):
"""
Return the penultimate `parent_id` for any `region_id`.
The penultimate parent is one layer below the root (0).
The set of penultimate parents are the distinct regions contained in the segmentation.
They correspond to putative functional regions.
:param tuple region_parent_zip: a list of 2-tuples of `region_ids` and `parent_ids`
:param int region_id: the `region_id` whose root parent_id is sought
:return int parent_id: the corresponding penultimate `parent_id` (one step below the root - value of `0`)
"""
if region_id == 0:
return 0
region_parent_dict = dict(region_parent_zip)
while region_parent_dict[region_id] != 0:
region_id = region_parent_dict[region_id]
parent_id = region_id
return parent_id
|
musamusa | musamusa//formats/ang_verses__fra/parsingtreeutils.pyfile:/formats/ang_verses__fra/parsingtreeutils.py:function:get_tag_marker/get_tag_marker | def get_tag_marker(identifier, markername, markervalue):
"""
get_tag_marker()
____________________________________________________________________
Return the formatted tag for node <identifier> and
the <substring> associated to this node.
Use this function if the node is a marker.
____________________________________________________________________
PARAMETERS :
▪ identifier : (int) the node's identifier.
▪ markername : (str) the node's substring, reduced to its markername
▪ markervalue : (str) the node's substring, reduced to its markervalue
RETURNED VALUE : (str)the formatted tag
"""
return '(#{0}) marker="{1}" (marker value={2})'.format(identifier,
markername, markervalue)
|
model_mommy_dev-1.2.6 | model_mommy_dev-1.2.6//model_mommy/recipe.pyfile:/model_mommy/recipe.py:function:_total_secs/_total_secs | def _total_secs(td):
"""
python 2.6 compatible timedelta total seconds calculation
backport from
https://docs.python.org/2.7/library/datetime.html#datetime.timedelta.total_seconds
"""
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6
) / 10.0 ** 6
|
marshmallow | marshmallow//utils.pyfile:/utils.py:function:local_rfcformat/local_rfcformat | def local_rfcformat(dt):
"""Return the RFC822-formatted representation of a timezone-aware datetime
with the UTC offset.
"""
weekday = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][dt.weekday()]
month = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec'][dt.month - 1]
tz_offset = dt.strftime('%z')
return '%s, %02d %s %04d %02d:%02d:%02d %s' % (weekday, dt.day, month,
dt.year, dt.hour, dt.minute, dt.second, tz_offset)
|
djexcept-0.1.3 | djexcept-0.1.3//djexcept/util.pyfile:/djexcept/util.py:function:populate_context/populate_context | def populate_context(context, exc, status=None):
"""
Populates the given context dictionary with djexcept's handy default
values. The dictionary is altered in-place, but values that are
already present won't be overwritten.
The following values are added to the context:
* exc: the exception object
* exc_name: the name of the exception type
(e.g. PermissionDenied or ValueError)
* exc_module: the module name of the exception's type
(e.g. django.core.exceptions or builtins)
* exc_modname: both concatenated, separated by a period
(e.g. django.core.exceptions.PermissionDenied or
builtins.ValueError)
* status: the HTTP status code used (only added if not None)
"""
context.setdefault('exc', exc)
context.setdefault('exc_name', exc.__class__.__name__)
context.setdefault('exc_module', exc.__class__.__module__)
context.setdefault('exc_modname', '{}.{}'.format(exc.__class__.
__module__, exc.__class__.__name__))
if status is not None:
context.setdefault('status', status)
|
aiida_fleur | aiida_fleur//tools/common_fleur_wf_util.pyfile:/tools/common_fleur_wf_util.py:function:get_natoms_element/get_natoms_element | def get_natoms_element(formula):
"""
Converts 'Be24W2' to {'Be': 24, 'W' : 2}, also BeW to {'Be' : 1, 'W' : 1}
"""
import re
elem_count_dict = {}
elements = re.findall('[A-Z][^A-Z]*', formula)
for i, elm in enumerate(elements):
elem_count = re.findall('\\d+|\\D+', elm)
if len(elem_count) == 1:
elem_count_dict[elem_count[0]] = 1
else:
elem_count_dict[elem_count[0]] = float(elem_count[1])
return elem_count_dict
|
mmlspark | mmlspark//featurize/ValueIndexer.pyclass:ValueIndexerModel/getJavaPackage | @staticmethod
def getJavaPackage():
""" Returns package name String. """
return 'com.microsoft.ml.spark.featurize.ValueIndexerModel'
|
invenio-db-1.0.4 | invenio-db-1.0.4//invenio_db/alembic/96e796392533_create_database_migrations.pyfile:/invenio_db/alembic/96e796392533_create_database_migrations.py:function:upgrade/upgrade | def upgrade():
"""Update database."""
|
tibiawikisql | tibiawikisql//utils.pyfile:/utils.py:function:client_color_to_rgb/client_color_to_rgb | def client_color_to_rgb(value: int):
"""Converts a color number from Tibia's client data to a RGB value.
Parameters
----------
value: :class:`int`
A numeric value representing a color.
Returns
-------
int:
The hexadecimal color represented."""
if value < 0 or value > 215:
return 0
return (value // 36 * 51 << 16) + (value // 6 % 6 * 51 << 8) + (value %
6 * 51 & 255)
|
flickr2markdown-0.3 | flickr2markdown-0.3//flickr2markdown/flickr.pyfile:/flickr2markdown/flickr.py:function:page_url/page_url | def page_url(user_id, photo_id):
"""flickr picture page url"""
return 'https://www.flickr.com/photos/{}/{}/'.format(user_id, photo_id)
|
boolexpr | boolexpr//misc.pyfile:/misc.py:function:exists/exists | def exists(xs, f):
"""
Return an expression that means
"there exists a variable in *xs* such that *f* true."
This is identical to ``f.smoothing(xs)``.
"""
return f.smoothing(xs)
|
flywheel_cli | flywheel_cli//util.pyfile:/util.py:function:set_nested_attr/set_nested_attr | def set_nested_attr(obj, key, value):
"""Set a nested attribute in dictionary, creating sub dictionaries as necessary.
Arguments:
obj (dict): The top-level dictionary
key (str): The dot-separated key
value: The value to set
"""
parts = key.split('.')
for part in parts[:-1]:
obj.setdefault(part, {})
obj = obj[part]
obj[parts[-1]] = value
|
benford_py-0.2.7 | benford_py-0.2.7//benford/stats.pyfile:/benford/stats.py:function:kolmogorov_smirnov_2/kolmogorov_smirnov_2 | def kolmogorov_smirnov_2(frame):
"""Computes the Kolmogorov-Smirnov test of the found distributions
Args:
frame: DataFrame with Foud and Expected distributions.
Returns:
The Suprem, which is the greatest absolute difference between the
Found end th expected proportions
"""
ks_frame = frame.sort_index()[['Found', 'Expected']].cumsum()
return (ks_frame.Found - ks_frame.Expected).abs().max()
|
fake-bpy-module-2.78-20200428 | fake-bpy-module-2.78-20200428//bpy/ops/file.pyfile:/bpy/ops/file.py:function:pack_libraries/pack_libraries | def pack_libraries():
"""Pack all used Blender library files into the current .blend
"""
pass
|
deserialize-1.5.1 | deserialize-1.5.1//deserialize/decorators/unhandled.pyfile:/deserialize/decorators/unhandled.py:function:_should_allow_unhandled/_should_allow_unhandled | def _should_allow_unhandled(class_reference, key_name):
"""Check if a property is allowed to be unhandled."""
if not hasattr(class_reference, '__deserialize_allow_unhandled_map__'):
return False
return class_reference.__deserialize_allow_unhandled_map__.get(key_name,
False)
|
TMDA | TMDA//Util.pyfile:/Util.py:function:gethomedir/gethomedir | def gethomedir(username):
"""Return the home directory of username."""
import pwd
return pwd.getpwnam(username)[5]
|
kubeflow | kubeflow//fairing/preprocessors/base.pyfile:/fairing/preprocessors/base.py:function:reset_tar_mtime/reset_tar_mtime | def reset_tar_mtime(tarinfo):
"""Reset the mtime on the the tarball for reproducibility.
:param tarinfo: the tarball var
:returns: tarinfo: the modified tar ball
"""
tarinfo.mtime = 0
return tarinfo
|
CRIkit2-0.2.5 | CRIkit2-0.2.5//crikit/ui/classes_ui.pyclass:SingleColor/_imgnormcompress | @staticmethod
def _imgnormcompress(img):
"""
Compress normalized image. Values:
> 1 -> 1
< 0 -> 0
"""
mask_pass = (img <= 1) * (img >= 0)
mask_high = img > 1
return mask_pass * img + mask_high
|
ripozo-sqlalchemy-1.0.2 | ripozo-sqlalchemy-1.0.2//ripozo_sqlalchemy/session_handlers.pyclass:SessionHandler/handle_session | @staticmethod
def handle_session(session, exc=None):
"""
rolls back the session if appropriate.
:param Session session: The session in use.
:param Exception exc: The exception raised,
If an exception was raised, else None
"""
if exc:
session.rollback()
|
sparse | sparse//_dok.pyclass:DOK/from_coo | @classmethod
def from_coo(cls, x):
"""
Get a :obj:`DOK` array from a :obj:`COO` array.
Parameters
----------
x : COO
The array to convert.
Returns
-------
DOK
The equivalent :obj:`DOK` array.
Examples
--------
>>> from sparse import COO
>>> s = COO.from_numpy(np.eye(4))
>>> s2 = DOK.from_coo(s)
>>> s2
<DOK: shape=(4, 4), dtype=float64, nnz=4, fill_value=0.0>
"""
ar = cls(x.shape, dtype=x.dtype, fill_value=x.fill_value)
for c, d in zip(x.coords.T, x.data):
ar.data[tuple(c)] = d
return ar
|
PyCBC-1.16.1 | PyCBC-1.16.1//pycbc/events/coinc.pyclass:LiveCoincTimeslideBackgroundEstimator/restore_state | @staticmethod
def restore_state(filename):
"""Restore state of the background buffers from a file"""
from six.moves import cPickle
return cPickle.load(filename)
|
fresco-1.0.1 | fresco-1.0.1//fresco/util/common.pyfile:/fresco/util/common.py:function:fq_path/fq_path | def fq_path(ob):
"""
Return the fully qualified path of ``ob``, expected to be a function or
method
"""
name = getattr(ob, '__qualname__', None) or getattr(ob, '__name__', None
) or repr(ob)
module = getattr(ob, '__module__', None) or ''
return '{}.{}'.format(module, name)
|
praxxis-0.1.1 | praxxis-0.1.1//src/praxxis/display/display_scene.pyfile:/src/praxxis/display/display_scene.py:function:display_init_scene_folder/display_init_scene_folder | def display_init_scene_folder(root):
"""display function for initializing the scene folder"""
print('Created scenes directory at %s' % root)
|
url | url//crawl/database_utils.pyfile:/crawl/database_utils.py:function:optimize_db/optimize_db | def optimize_db(cursor):
"""Set options to make sqlite more efficient on a high memory machine"""
cursor.execute('PRAGMA cache_size = -%i' % (0.1 * 10 ** 7))
cursor.execute('PRAGMA temp_store = 2')
|
bumps-0.7.14 | bumps-0.7.14//bumps/plugin.pyfile:/bumps/plugin.py:function:new_model/new_model | def new_model():
"""
Return a new empty model or None.
Called in response to >File >New from the GUI. Creates a new empty
model. Also triggered if GUI is started without a model.
"""
return None
|
grimoire_elk | grimoire_elk//enriched/enrich.pyclass:Enrich/update_author_min_max_date | @staticmethod
def update_author_min_max_date(min_date, max_date, target_author,
author_field='author_uuid'):
"""
Get the query to update demography_min_date and demography_max_date of a given author
:param min_date: new demography_min_date
:param max_date: new demography_max_date
:param target_author: target author to be updated
:param author_field: author field
:return: the query to be executed to update demography data of an author
"""
es_query = (
"""
{
"script": {
"source":
"ctx._source.demography_min_date = params.min_date;ctx._source.demography_max_date = params.max_date;",
"lang": "painless",
"params": {
"min_date": "%s",
"max_date": "%s"
}
},
"query": {
"term": {
"%s": "%s"
}
}
}
"""
% (min_date, max_date, author_field, target_author))
return es_query
|
s3lncoll-0.1.post16 | s3lncoll-0.1.post16//versioneer.pyfile:/versioneer.py:function:render_pep440_old/render_pep440_old | def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
return rendered
|
datapoint-0.9.6 | datapoint-0.9.6//versioneer.pyfile:/versioneer.py:function:plus_or_dot/plus_or_dot | def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if '+' in pieces.get('closest-tag', ''):
return '.'
return '+'
|
notedown | notedown//notedown.pyclass:MarkdownReader/pre_process_text_block | @staticmethod
def pre_process_text_block(block):
"""Apply pre processing to text blocks.
Currently just strips whitespace from the beginning
and end of the block.
"""
block['content'] = block['content'].strip()
|
asyncssh-2.2.1 | asyncssh-2.2.1//asyncssh/connection.pyfile:/asyncssh/connection.py:function:_select_algs/_select_algs | def _select_algs(alg_type, algs, possible_algs, none_value=None):
"""Select a set of allowed algorithms"""
if algs == ():
return possible_algs
elif algs:
result = []
for alg_str in algs:
alg = alg_str.encode('ascii')
if alg not in possible_algs:
raise ValueError('%s is not a valid %s algorithm' % (
alg_str, alg_type))
result.append(alg)
return result
elif none_value:
return [none_value]
else:
raise ValueError('No %s algorithms selected' % alg_type)
|
ptpulse-0.1.1 | ptpulse-0.1.1//ptpulse/configuration.pyfile:/ptpulse/configuration.py:function:set_debug_print_state/set_debug_print_state | def set_debug_print_state(debug_enable):
"""Enable/disable debug prints"""
global _debug
_debug = debug_enable
|
impacket | impacket//dcerpc/v5/enum.pyfile:/dcerpc/v5/enum.py:function:_is_descriptor/_is_descriptor | def _is_descriptor(obj):
"""Returns True if obj is a descriptor, False otherwise."""
return hasattr(obj, '__get__') or hasattr(obj, '__set__') or hasattr(obj,
'__delete__')
|
cihai-0.9.0.post1 | cihai-0.9.0.post1//cihai/conversion.pyfile:/cihai/conversion.py:function:python_to_euc/python_to_euc | def python_to_euc(uni_char, as_bytes=False):
"""
Return EUC character from a Python Unicode character.
Converts a one character Python unicode string (e.g. u'\\u4e00') to the
corresponding EUC hex ('d2bb').
"""
euc = repr(uni_char.encode('gb2312'))[1:-1].replace('\\x', '').strip("'")
if as_bytes:
euc = euc.encode('utf-8')
assert isinstance(euc, bytes)
return euc
|
magic-wormhole-0.12.0 | magic-wormhole-0.12.0//src/wormhole/_interfaces.pyclass:IDeferredWormhole/input_code | def input_code():
"""
Ask the wormhole to perform interactive entry of the code, with
completion on the nameplate and/or words.
This does not actually interact with the user, but instead returns a
'code-entry helper' object. The application is responsible for doing
the IO: the helper is used to get completion lists and to submit the
finished code. See ``input_with_completion`` for a wrapper function
that uses ``readline`` to do CLI-style input completion.
Any Deferreds returned by ``get_code()`` will be fired when the final
code is submitted to the helper. Only one of
generate_code/set_code/input_code may be used.
:return: a code-entry helper instance
:rtype: IHelper
"""
|
pyunderscore-0.5 | pyunderscore-0.5//underscore/collection.pyfile:/underscore/collection.py:function:count_by/count_by | def count_by(iterable, iteratee):
"""
Think of it like a harry potter sorting hat, tells you final number of students in every group.
Similar to group_by, instead of returning a list with every grouped_key, returns count of grouped elements only.
params: array, iteratee
iterable-> list, set, generator
iteratee-> a function or a lambda for grouping the elements
Examples
>>> _.count_by([1, 2, 3, 4, 5], lambda x: 'even' if x % 2 == 0 else 'odd')
>>> {"odd": 3, "even": 2}
"""
from collections import defaultdict
d = defaultdict(lambda : 0)
for item in iterable:
key = iteratee(item)
d[iteratee(item)] += 1
return dict(d)
|
remofile-1.0.0.dev6 | remofile-1.0.0.dev6//remofile/server.pyfile:/remofile/server.py:function:normalize_directory/normalize_directory | def normalize_directory(directory):
""" Normalize a directory path.
Normalizing a directory path consists of turning it into a relative
directory path, even if it initially was a relative directory path.
/foo/bar/qaz -> foo/bar/qaz
foo/bar/qaz -> foo/bar/qaz
A normalized directory path can be combined with the (path of the)
root directory.
"""
return directory.relative_to(directory.root)
|
apiview-1.3.25 | apiview-1.3.25//apiview/admintools.pyclass:LimitQuerySetMixin/from_queryset | @classmethod
def from_queryset(cls, other, **kwargs):
"""根据其他QuerySet对象的数据生成实例"""
query = other.query.clone()
if other._sticky_filter:
query.filter_is_sticky = True
clone = cls(model=other.model, query=query, using=other._db, hints=
other._hints)
clone._for_write = other._for_write
clone._prefetch_related_lookups = other._prefetch_related_lookups[:]
clone._known_related_objects = other._known_related_objects
clone._iterable_class = other._iterable_class
clone._fields = other._fields
clone.__dict__.update(kwargs)
return clone
|
regression | regression//processing.pyfile:/processing.py:function:_is_float/_is_float | def _is_float(value):
"""Return True if value is a floating point number, False otherwise."""
try:
float(value)
return True
except ValueError:
return False
|
threeML-1.1.0 | threeML-1.1.0//threeML/minimizer/minuit_minimizer.pyclass:MinuitMinimizer/_parameter_name_to_minuit_name | @staticmethod
def _parameter_name_to_minuit_name(parameter):
"""
Translate the name of the parameter to the format accepted by Minuit
:param parameter: the parameter name, of the form source.component.shape.parname
:return: a minuit-friendly name for the parameter, such as source_component_shape_parname
"""
return parameter.replace('.', '_')
|
ezcheck-1.0.0 | ezcheck-1.0.0//ezcheck/ezcheck.pyfile:/ezcheck/ezcheck.py:function:parse_ffl_number/parse_ffl_number | def parse_ffl_number(ffl_number, parse_values=True):
"""
This take an FFL number a dictionary representation of the appropriate fields
:param ffl_number: String with the FFL number, this can be in the format of X-XX-XXX-XX-XX-XXXXX or just the string
:param parse_values: Parse the values as an int or string, int is useful for working in databases
:return: dict of parsed FFL number
"""
ffl_number = ffl_number.replace('-', '')
if len(ffl_number) != 15:
raise ValueError('Invalid ffl Length: %s for %s' % (len(ffl_number),
ffl_number))
if parse_values:
render = int
else:
render = str
return {'FFLRegion': render(ffl_number[0]), 'FFLDistrict': render(
ffl_number[1:3]), 'FFLCounty': render(ffl_number[3:6]), 'FFLType':
render(ffl_number[6:8]), 'FFLExpiration': ffl_number[8:10],
'FFLSequence': render(ffl_number[10:]), 'FFLNumber': ffl_number}
|
janggu-0.9.7 | janggu-0.9.7//src/janggu/data/genomic_indexer.pyfile:/src/janggu/data/genomic_indexer.py:function:check_gindexer_compatibility/check_gindexer_compatibility | def check_gindexer_compatibility(gindexer, resolution, store_whole_genome):
"""Sanity check for gindexer.
This function tests if the gindexer is compatible with
other properties of the dataset, including the resolution and
the store_whole_genome argument
A ValueError is thrown if the gindexer is not valid.
"""
if resolution is not None and resolution > 1 and store_whole_genome:
if gindexer is not None and gindexer.binsize % resolution > 0:
raise ValueError(
'binsize must be an integer-multipe of resolution. Got binsize={} and resolution={}'
.format(gindexer.binsize, resolution))
for iv_ in (gindexer or []):
if iv_.start % resolution > 0:
raise ValueError(
'Please ensure that all interval starts line up with the resolution-sized bins. This is necessary to prevent rounding issues. Interval ({}:{}-{}) not compatible with resolution {}. '
.format(iv_.chrom, iv_.start, iv_.end, resolution) +
'Consider using "janggu-trim <input_roi> <trun_output> -divisible_by {resolution}"'
.format(resolution=resolution))
if not store_whole_genome and gindexer is None:
raise ValueError('Either specify roi or store_whole_genome=True')
|
DateTime | DateTime//interfaces.pyclass:IDateTime/year | def year():
"""Return the calendar year of the object"""
|
gaitutils | gaitutils//viz/plot_plotly.pyfile:/viz/plot_plotly.py:function:_plot_timedep_vels/_plot_timedep_vels | def _plot_timedep_vels(vels, labels, title=None):
"""Plot trial time-dependent velocities"""
traces = list()
for vel, label in zip(vels, labels):
trace = dict(y=vel, text=label, name=label, hoverinfo='x+y+text')
traces.append(trace)
layout = dict(title=title, xaxis=dict(title='% of trial', automargin=
True), yaxis=dict(title='Velocity (m/s)'))
return dict(data=traces, layout=layout)
|
dropbox-10.1.2 | dropbox-10.1.2//dropbox/team_log.pyclass:EventType/pending_secondary_email_added | @classmethod
def pending_secondary_email_added(cls, val):
"""
Create an instance of this class set to the
``pending_secondary_email_added`` tag with value ``val``.
:param PendingSecondaryEmailAddedType val:
:rtype: EventType
"""
return cls('pending_secondary_email_added', val)
|
fake-bpy-module-2.78-20200428 | fake-bpy-module-2.78-20200428//bpy/ops/sequencer.pyfile:/bpy/ops/sequencer.py:function:snap/snap | def snap(frame: int=0):
"""Frame where selected strips will be snapped
:param frame: Frame, Frame where selected strips will be snapped
:type frame: int
"""
pass
|