repo
stringlengths
1
29
path
stringlengths
24
332
code
stringlengths
39
579k
shuttle
shuttle//providers/bytom/utils.pyfile:/providers/bytom/utils.py:function:contract_arguments/contract_arguments
def contract_arguments(amount, address): """ Get contract arguments. :param amount: bytom amount. :type amount: int :param address: bytom address. :type address: str :returns: list -- bytom contract arguments. >>> from shuttle.providers.bytom.utils import contract_arguments >>> contract_arguments(bytom_amount, bytom_address) [...] """ return [dict(type=str('integer'), value=amount), dict(type=str( 'address'), value=address), dict(type=str('data'), value=str())]
jcs-dss-sdk-1.0.4
jcs-dss-sdk-1.0.4//src/jcsclient/utils.pyfile:/src/jcsclient/utils.py:function:push_indexed_params/push_indexed_params
def push_indexed_params(params, key, vals): """Populate the params dict for list of vals Here the key would be changed from plural to singular, dropping the last 's'. So --image-ids jmi-xxx jmi-yyy would be treated as ImageId.1=jmi-xxx&ImageId.2=jmi-yyy param params: dictionary to populate param key: key to be used in the dictionary param vals: list of values to be saved in the dict return: Nothing """ if key[-1] == 's': key = key[:-1] idx = 0 for val in vals: idx += 1 elements = val key_index = key + '.' + str(idx) elements = val.split(',') if len(elements) == 1 and val.find('=') == -1: params[key_index] = val continue for element in elements: if element.find('=') != -1: parts = element.split('=') if len(parts) != 2: msg = 'Unsupported value ' + element + 'given in request.' raise ValueError(msg) element_key, element_val = parts[0], parts[1] if element_key == 'Values': element_key = element_key[:-1] + '.' + str(idx) updated_key = key_index + '.' + element_key params[updated_key] = element_val else: msg = 'Bad request syntax. Please see help for valid request.' raise ValueError(msg)
pagebot
pagebot//toolbox/transformer.pyfile:/toolbox/transformer.py:function:pyAttrName2XmlAttrName/pyAttrName2XmlAttrName
def pyAttrName2XmlAttrName(key): """ The @pyAttrName2XmlAttrName@ converts the Python XML attribute name @key@ to an appropriate XML attribute identifier. If the *key* is 'cssClass' then it is translated into 'class'. If there is an HTML5 attribute *data_xxxx* used, then change that to *data-xxxx*. """ if key == 'cssClass': key = 'class' elif key == 'cssId': key = 'id' elif key.startswith('data'): key = key.replace('_', '-') return key
metano-1.3.0
metano-1.3.0//src/to_check_constraints.pyfile:/src/to_check_constraints.py:function:checkBoundsByModel/checkBoundsByModel
def checkBoundsByModel(solution, model): """ check if the flux distribution 'solution' violates any flux bounds defined for the reactions in the given model Returns: lbViolations, ubViolations - {reaction : flux, bound} dicts """ lbViolations, ubViolations = {}, {} for rea in solution: if rea not in model.reactionDict: continue rea_index = model.reactionDict[rea] flux = solution[rea] lb, ub = model.reactions[rea_index].lb, model.reactions[rea_index].ub if flux < lb: lbViolations[rea] = flux, lb elif flux > ub: ubViolations[rea] = flux, ub return lbViolations, ubViolations
mymcplus-3.0.2
mymcplus-3.0.2//mymcplus/gui/utils.pyfile:/mymcplus/gui/utils.py:function:single_title/single_title
def single_title(title): """Convert the two parts of an icon.sys title into one string.""" title = title[0] + ' ' + title[1] return ' '.join(title.split())
dropbox
dropbox//team_log.pyclass:EventType/shared_link_download
@classmethod def shared_link_download(cls, val): """ Create an instance of this class set to the ``shared_link_download`` tag with value ``val``. :param SharedLinkDownloadType val: :rtype: EventType """ return cls('shared_link_download', val)
simplegmail-2.0.0
simplegmail-2.0.0//simplegmail/query.pyfile:/simplegmail/query.py:function:_google_sheets/_google_sheets
def _google_sheets(): """ Returns a query item matching messages that have Google Sheets attachments. """ return f'has:spreadsheet'
colander-1.7.0
colander-1.7.0//colander/compat.pyfile:/colander/compat.py:function:text_/text_
def text_(s, encoding='latin-1', errors='strict'): """ If ``s`` is an instance of ``bytes``, return ``s.decode(encoding, errors)``, otherwise return ``s``""" if isinstance(s, bytes): return s.decode(encoding, errors) return s
OpenTEA-3.2.0
OpenTEA-3.2.0//src/opentea/noob/asciigraph.pyfile:/src/opentea/noob/asciigraph.py:function:_nob_print_params/_nob_print_params
def _nob_print_params(tab_length=2, line_length=40): """Defining parameters and special characters for nested object printing Inputs: ---------- tab_length : Number of spaces describing a tab line_length : the maximum allowed number of characters in a line Returns : --------- chars : a dictionnary gathering special characters and max line length """ chars = dict() chars['vertical_bar'] = u'┃' chars['horizontal_bar'] = u'━' chars['child_bar'] = u'┓' chars['standard_child'] = u'┣' chars['last_child'] = u'┗' chars['tab'] = ' ' * tab_length chars['max_length'] = line_length return chars
hdx
hdx//utilities/dictandlist.pyfile:/utilities/dictandlist.py:function:args_to_dict/args_to_dict
def args_to_dict(args): """Convert command line arguments in a comma separated string to a dictionary Args: args (str): Command line arguments Returns: DictUpperBound[str,str]: Dictionary of arguments """ arguments = dict() for arg in args.split(','): key, value = arg.split('=') arguments[key] = value return arguments
m01.i18n-3.1.0
m01.i18n-3.1.0//src/m01/i18n/interfaces.pyclass:II18nRead/queryAttribute
def queryAttribute(name, lang=None, default=None): """Get name attribute of the language specific translation or default. Parameter: name -- Attribute name. language -- Language code for example 'de'. If None the default language is returned. default -- Any object. Return Value: object -- Evaluate of the language specific data object or return default if not found. """
DateTime
DateTime//interfaces.pyclass:IDateTime/strftime
def strftime(format): """Format the date/time using the *current timezone representation*."""
ftw.tabbedview-4.2.0
ftw.tabbedview-4.2.0//ftw/tabbedview/interfaces.pyclass:ITabbedViewEndpoints/reorder
def reorder(): """Called when the items in the grid are reordered"""
enablebanking
enablebanking//models/account_links.pyclass:AccountLinks/__ne__
def __ne__(A, other): """Returns true if both objects are not equal""" return not A == other
printdevDAG-0.1
printdevDAG-0.1//src/printdevDAG/_depth.pyclass:GraphXformLines/xform
@classmethod def xform(cls, column_headers, lines): """ Transform column values and yield just the line info. :param column_headers: the column headers :type column_headers: list of str :param lines: information about each line :type lines: dict of str * str """ key = column_headers[0] for line in lines: line_info = line['node'] line_info[key] = cls.calculate_prefix(line) + line_info[key] yield line_info
reveal-user-annotation-0.2.2
reveal-user-annotation-0.2.2//reveal_user_annotation/text/text_util.pyfile:/reveal_user_annotation/text/text_util.py:function:reduce_list_of_bags_of_words/reduce_list_of_bags_of_words
def reduce_list_of_bags_of_words(list_of_keyword_sets): """ Reduces a number of keyword sets to a bag-of-words. Input: - list_of_keyword_sets: This is a python list of sets of strings. Output: - bag_of_words: This is the corresponding multi-set or bag-of-words, in the form of a python dictionary. """ bag_of_words = dict() get_bag_of_words_keys = bag_of_words.keys for keyword_set in list_of_keyword_sets: for keyword in keyword_set: if keyword in get_bag_of_words_keys(): bag_of_words[keyword] += 1 else: bag_of_words[keyword] = 1 return bag_of_words
OttoDiff-2.0.7
OttoDiff-2.0.7//OttoDiff/reverse.pyfile:/OttoDiff/reverse.py:function:find_df_dx/find_df_dx
def find_df_dx(f, x): """Find the partial derivative of f with respect to given x EXAMPLES ========= >>> x = VariableNode(3) >>> f = 2 * x >>> print(find_df_dx(f=v, x=x)) 6 """ f._grad = 1 return x.grad
hickle
hickle//loaders/load_astropy.pyfile:/loaders/load_astropy.py:function:create_astropy_constant/create_astropy_constant
def create_astropy_constant(py_obj, h_group, call_id=0, **kwargs): """ dumps an astropy constant Args: py_obj: python object to dump; should be a python type (int, float, bool etc) h_group (h5.File.group): group to dump data into. call_id (int): index to identify object's relative location in the iterable. """ d = h_group.create_dataset('data_%i' % call_id, data=py_obj.value, dtype='float64') d.attrs['type'] = [b'astropy_constant'] d.attrs['unit'] = [str(py_obj.unit)] d.attrs['abbrev'] = [str(py_obj.abbrev)] d.attrs['name'] = [str(py_obj.name)] d.attrs['reference'] = [str(py_obj.reference)] d.attrs['uncertainty'] = [py_obj.uncertainty] if py_obj.system: d.attrs['system'] = [py_obj.system]
pynusmv-tools-1.0rc10
pynusmv-tools-1.0rc10//pynusmv_tools/arctl/cmd/trace.pyfile:/pynusmv_tools/arctl/cmd/trace.py:function:print_bdd/print_bdd
def print_bdd(bdd): """Print bdd at stdout.""" for var, val in bdd.get_str_values().items(): print(var, '=', val)
hera-py-1.0.2
hera-py-1.0.2//hera/data.pyclass:Token/Sym
@classmethod def Sym(cls, s, location=None): """Construct a symbol token.""" return cls(cls.SYMBOL, s, location)
openmdao-3.1.0
openmdao-3.1.0//openmdao/solvers/linear/petsc_ksp.pyfile:/openmdao/solvers/linear/petsc_ksp.py:function:_get_petsc_vec_array_new/_get_petsc_vec_array_new
def _get_petsc_vec_array_new(vec): """ Get the array of values for the given PETSc vector. Helper function to handle a petsc backwards incompatibility. Parameters ---------- vec : petsc vector Vector whose data is being requested. Returns ------- ndarray A readonly copy of the array of values from vec. """ return vec.getArray(readonly=True)
TelethonGoblenusTest-0.15.5
TelethonGoblenusTest-0.15.5//telethon/crypto/factorization.pyclass:Factorization/gcd
@staticmethod def gcd(a, b): """ Calculates the Greatest Common Divisor. :param a: the first number. :param b: the second number. :return: GCD(a, b) """ while b: a, b = b, a % b return a
wflow
wflow//wflow_fit.pyfile:/wflow_fit.py:function:configget/configget
def configget(config, section, var, default): """ gets parameter from config file and returns a default value if the parameter is not found """ try: ret = config.get(section, var) except: print('returning default (' + default + ') for ' + section + ':' + var) ret = default return ret
khnum
khnum//khnum.pyfile:/khnum.py:function:hnum/hnum
def hnum(num, units='decimal'): """ returns rounded human readable str with units suffix >>> khnum.hnum(123456789) # decimal '123.5M' # million >>> khnum.hnum(123456789, 'b') # bytes '123.5MB' # megabytes >>> khnum.hnum(123456789, 's') # SI '117.7MiB' # mebibytes >>> khnum.hnum(123456789e24, 'si') '102121062.3YiB' # yobibytes raises ValueError for un-supported units Power Decimal Bytes SI (binary) --------------------------------------------- 10^3 Kilo (K) Kilo (KB) 1024^1 Kibi (KiB) 10^6 Mill (M) Mega (MB) 1024^2 Mebi (MiB) 10^9 Bill (B) Giga (GB) 1024^3 Gibi (GiB) 10^12 Tril (T) Tera (TB) 1024^4 Tebi (TiB) 10^15 Quad (Q) Peta (PB) 1024^5 Pebi (PiB) 10^18 Quin (Qn) Exa- (EB) 1024^6 Exbi (EiB) 10^21 Sext (S) Zeta (ZB) 1024^7 Zebi (ZiB) 10^24 Sept (Sp) Yota (YB) 1024^8 Yobi (YiB) """ if units.lower().startswith('d'): units = ['', 'K', 'M', 'B', 'T', 'Q', 'Qn', 'S'] boundary = 1000.0 last_unit = 'Sp' suffix = '' elif units.lower().startswith('b'): units = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z'] boundary = 1000.0 last_unit = 'Y' suffix = 'B' elif units.lower().startswith('s'): units = ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi'] boundary = 1024.0 last_unit = 'Yi' suffix = 'B' else: raise ValueError('unsupported units: %s' % units) for unit in units: if abs(num) < boundary: return '%3.1f%s%s' % (num, unit, suffix) num /= boundary return '%.1f%s%s' % (num, last_unit, suffix)
pyretis
pyretis//visualization/common.pyfile:/visualization/common.py:function:get_min_max/get_min_max
def get_min_max(mydata, min_max, mini, maxi, acc): """Find minimum and maximum indeces. Parameters ---------- mydata : list List of cycle numbers to be checked for index of given min/max min_max : list List of min/max values to search for mini : dict Dictionary of found min cycle number of lists acc/rej maxi : dict Dictionary of found max cycle number of lists acc/rej acc : string 'a' or 'r' for acc/rej lists, respectively Returns/updates --------------- mini, maxi : dict Stores values for min/max in acc and rej lists to the correct dicts. """ for i, item in enumerate(mydata): if item == min_max[0]: mini[acc] = i break elif item > min_max[0]: mini[acc] = i - 1 if i > 0 else 0 break for i, item in enumerate(mydata[mini[acc]:]): if item > min_max[1]: if i == 0: maxi[acc] = 0 break else: maxi[acc] = mini[acc] + i - 1 break elif i == len(mydata) - 1: maxi[acc] = len(mydata) - 1
zc
zc//intid/interfaces.pyclass:IIntIdsManage/__len__
def __len__(): """Return the number of objects indexed."""
pyboto3-1.4.4
pyboto3-1.4.4//pyboto3/cognitoidentityprovider.pyfile:/pyboto3/cognitoidentityprovider.py:function:can_paginate/can_paginate
def can_paginate(operation_name=None): """ Check if an operation can be paginated. :type operation_name: string :param operation_name: The operation name. This is the same name as the method name on the client. For example, if the method name is create_foo, and you'd normally invoke the operation as client.create_foo(**kwargs), if the create_foo operation can be paginated, you can use the call client.get_paginator('create_foo'). """ pass
arat-0.0
arat-0.0//server/annotator.pyfile:/server/annotator.py:function:_offsets_equal/_offsets_equal
def _offsets_equal(o1, o2): """ Given two lists of (start, end) integer offset sets, returns whether they identify the same sets of characters. """ if o1 == o2: return True return sorted(o1) == sorted(o2)
django-viewpack-0.1.4
django-viewpack-0.1.4//src/viewpack/utils.pyfile:/src/viewpack/utils.py:function:get_app_name/get_app_name
def get_app_name(cls): """Return the app_name associated with the given ViewPack class.""" app_name = getattr(cls, 'app_name', None) if app_name is not None: return app_name return cls.__module__.partition('.')[0]
numsys
numsys//nonstandard.pyfile:/nonstandard.py:function:n1_to/n1_to
def n1_to(n, sgn='-', sep='.'): """converts a base negative one value to base ten""" l = len(n) if not l % 2: return -l // 2 else: return l // 2 + 1
fake-bpy-module-2.78-20200428
fake-bpy-module-2.78-20200428//bpy/ops/ptcache.pyfile:/bpy/ops/ptcache.py:function:free_bake_all/free_bake_all
def free_bake_all(): """Free all baked caches of all objects in the current scene """ pass
thapbi_pict-0.7.0
thapbi_pict-0.7.0//thapbi_pict/utils.pyfile:/thapbi_pict/utils.py:function:sample_sort/sample_sort
def sample_sort(sample_names): """Sort sample names like a human. Our samples have occasionally used underscores and minus signs inconsistently in sample names as field separators or space substitutions. Therefore simple ASCII sorting can give surprises such as not grouping by prefix (since the underscore is sorted after the digits and letter): >>> sorted(["N01-a", "N01_b", "N01 c", "N011-a"]) ['N01 c', 'N01-a', 'N011-a', 'N01_b'] We specifically want "_" (ASCII , after the letters) to sort like " " or "-" (ASCII 32 or 45, both before the digits and letters). In case any samples are using plus/minus, will map underscore and space to the minus sign for sorting. >>> sample_sort(["N01-a", "N01_b", "N01 c", "N011-d"]) ['N01-a', 'N01_b', 'N01 c', 'N011-a'] """ return sorted(sample_names, key=lambda _: _.replace('_', '-').replace( ' ', '-'))
gitdb-4.0.5
gitdb-4.0.5//gitdb/fun.pyfile:/gitdb/fun.py:function:delta_chunk_apply/delta_chunk_apply
def delta_chunk_apply(dc, bbuf, write): """Apply own data to the target buffer :param bbuf: buffer providing source bytes for copy operations :param write: write method to call with data to write""" if dc.data is None: write(bbuf[dc.so:dc.so + dc.ts]) elif dc.ts < len(dc.data): write(dc.data[:dc.ts]) else: write(dc.data)
pytzer
pytzer//parameters.pyfile:/parameters.py:function:psi_Mg_CO3_SO4_HMW84/psi_Mg_CO3_SO4_HMW84
def psi_Mg_CO3_SO4_HMW84(T, P): """c-a-a': magnesium carbonate sulfate [HMW84].""" psi = 0.0 valid = T == 298.15 return psi, valid
set_algebra
set_algebra//set_.pyclass:Set/__and
@staticmethod def __and(A, B): """Return a new Set that is an intersection of A and B.""" return A - ~B
wiki-0.5
wiki-0.5//src/wiki/core/version.pyfile:/src/wiki/core/version.py:function:get_complete_version/get_complete_version
def get_complete_version(version=None): """Returns a tuple of the version. If version argument is non-empty, then checks for correctness of the tuple provided. """ if version is None: from wiki import VERSION as version else: assert len(version) == 5 assert version[3] in ('alpha', 'beta', 'rc', 'final') return version
latex2edx
latex2edx//main.pyclass:latex2edx/fix_table
@staticmethod def fix_table(tree): """ Force tables to have table-layout: auto, no borders on table data """ for table in tree.findall('.//table'): table.set('style', 'table-layout:auto') for td in table.findall('.//td'): newstyle = td.get('style', '') if newstyle: newstyle += '; ' newstyle += 'border:none' td.set('style', newstyle)
nuxeo
nuxeo//compat.pyfile:/compat.py:function:get_bytes/get_bytes
def get_bytes(data): """ If data is not bytes, encode it. :param data: the input data :return: the bytes of data """ if not isinstance(data, bytes): data = data.encode('utf-8') return data
dragonlib-0.1.7
dragonlib-0.1.7//dragonlib/utils/byte_word_values.pyfile:/dragonlib/utils/byte_word_values.py:function:word2bytes/word2bytes
def word2bytes(value): """ >>> word2bytes(0xff09) (255, 9) >>> [hex(i) for i in word2bytes(0xffab)] ['0xff', '0xab'] >>> word2bytes(0xffff +1) Traceback (most recent call last): ... AssertionError """ assert 0 <= value <= 65535 return value >> 8, value & 255
autonomie_oidc_provider-4.2.1
autonomie_oidc_provider-4.2.1//autonomie_oidc_provider/models.pyfile:/autonomie_oidc_provider/models.py:function:includeme/includeme
def includeme(config): """ void function used to ensure the models are added to the metadatas """ pass
funhandler
funhandler//base.pyfile:/base.py:function:set_file_storage/set_file_storage
def set_file_storage(source='local', **kwargs): """ Creates file storage depending Parameters ---------- source: str Default is 'local' which will create file storage in the local environment kwargs: dict Contains data to setup S3 storage """ pass
RutishauserLabtoNWB-1.0.5
RutishauserLabtoNWB-1.0.5//RutishauserLabtoNWB/events/newolddelay/python/analysis/.ipynb_checkpoints/helper-checkpoint.pyfile:/RutishauserLabtoNWB/events/newolddelay/python/analysis/.ipynb_checkpoints/helper-checkpoint.py:function:adjustHF/adjustHF
def adjustHF(H, F, n_new, n_old): """ adjust hit/false alarm rate for ceiling effects according to Macmillan&Creelman, pp8 urut/nov06 """ if H == 1: H = 1 - 1 / (2 * n_old) if F == 1: F = 1 - 1 / (2 * n_new) if H == 0: H = 1 / (2 * n_old) if F == 0: F = 1 / (2 * n_new) return H, F
chicksexer
chicksexer//util.pyfile:/util.py:function:set_log_level/set_log_level
def set_log_level(logger, log_level: int): """Set log level for the given logger.""" logger.setLevel(log_level) for handler in logger.handlers: handler.setLevel(log_level)
taskflow
taskflow//listeners/base.pyfile:/listeners/base.py:function:_bulk_deregister/_bulk_deregister
def _bulk_deregister(notifier, registered, details_filter=None): """Bulk deregisters callbacks associated with many states.""" while registered: state, cb = registered.pop() notifier.deregister(state, cb, details_filter=details_filter)
wright
wright//util.pyfile:/util.py:function:camel_case/camel_case
def camel_case(name): """Convert words into CamelCase.""" return ''.join(name.capitalize().split())
planet
planet//api/filters.pyfile:/api/filters.py:function:is_filter_like/is_filter_like
def is_filter_like(filter_like): """Check if the provided dict looks like a search request or filter.""" if 'item_types' in filter_like or 'filter' in filter_like: filter_like = filter_like.get('filter', {}) return 'type' in filter_like and 'config' in filter_like
xthematic
xthematic//themes.pyfile:/themes.py:function:_read_text/_read_text
def _read_text(file): """ Read a file in utf-8 encoding.""" with open(file, mode='r', encoding='utf-8') as f: return f.read()
django-comments-dab-1.4.0
django-comments-dab-1.4.0//comment/templatetags/comment_tags.pyfile:/comment/templatetags/comment_tags.py:function:include_static_jquery/include_static_jquery
def include_static_jquery(): """ include static files """ return None
snipskit-0.6.0
snipskit-0.6.0//src/snipskit/hermes/decorators.pyfile:/src/snipskit/hermes/decorators.py:function:session_queued/session_queued
def session_queued(method): """Apply this decorator to a method of class :class:`.HermesSnipsComponent` to register it as a callback to be triggered when the dialogue manager queues the current session. """ method.subscribe_method = 'subscribe_session_queued' return method
graphql-example-0.4.4
graphql-example-0.4.4//vendor/pip/_vendor/requests/packages/urllib3/packages/six.pyfile:/vendor/pip/_vendor/requests/packages/urllib3/packages/six.py:function:_add_doc/_add_doc
def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc
Beamr-0.3.5
Beamr-0.3.5//beamr/parsers/image.pyfile:/beamr/parsers/image.py:function:p_main/p_main
def p_main(t): """main : files shape dims""" t[0] = t[1], t[2], t[3]
samsum
samsum//alignment_utils.pyfile:/alignment_utils.py:function:calculate_normalization_metrics/calculate_normalization_metrics
def calculate_normalization_metrics(genome_dict: dict) ->None: """ Calculates the normalized abundance values for each header's RefSeq instance in genome_dict 1. Reads per kilobase (RPK) is calculated using the reference sequence's length and number of reads (provided by the user via CLI) 2. Fragments per kilobase per million mappable reads (FPKM) is calculated from the number of fragments (this is different from reads by, in a paired-end library, forward and reverse pair makes up one fragment) normalized by the reference sequence length and the number of reads mapped. 2. Transcripts per million (TPM) is calculated similarly to FPKM but the order of operations is different. :param genome_dict: A dictionary of RefSeq instances indexed by headers (sequence names) :return: None """ rpk_sum = 0 for header in sorted(genome_dict.keys()): ref_seq = genome_dict[header] ref_seq.calc_rpk(ref_seq.weight_total) rpk_sum += ref_seq.rpk for header in sorted(genome_dict.keys()): ref_seq = genome_dict[header] if ref_seq.weight_total == 0: continue ref_seq.calc_fpkm(ref_seq.weight_total) denominator = rpk_sum / 1000000.0 for header in genome_dict.keys(): ref_seq = genome_dict[header] ref_seq.calc_tpm(denominator) return
Cohen3-0.9.3
Cohen3-0.9.3//coherence/base.pyclass:Coherence/check_louie
@staticmethod def check_louie(receiver, signal, method='connect'): """ Check if the connect or disconnect method's arguments are valid in order to automatically convert to EventDispatcher's bind The old valid signals are: - Coherence.UPnP.Device.detection_completed - Coherence.UPnP.RootDevice.detection_completed - Coherence.UPnP.Device.removed - Coherence.UPnP.RootDevice.removed .. versionadded:: 0.9.0 """ if not callable(receiver): raise Exception( 'The receiver should be callable in order to use the method {method}' ) if not signal: raise Exception(f'We need a signal in order to use method {method}') if not (signal.startswith('Coherence.UPnP.Device.') or signal. startswith('Coherence.UPnP.RootDevice.')): raise Exception( 'We need a signal an old signal starting with: "Coherence.UPnP.Device." or "Coherence.UPnP.RootDevice."' )
nmrglue
nmrglue//fileio/pipe.pyfile:/fileio/pipe.py:function:unshape_data/unshape_data
def unshape_data(data): """ Return 1D version of data. """ return data.flatten()
intergalactic
intergalactic//functions.pyfile:/functions.py:function:total_energy_ejected/total_energy_ejected
def total_energy_ejected(t): """ Thermal and kinetic energy released by each type of SN up to the time t after the explosion where tc is the characteristic cooling time of the shell sorrounding the remnant (53000 yrs) from Ferrini & Poggiantti, 1993, ApJ, 410, 44F """ if t <= 0: return 0.0 tc = 5.3e-05 if t > tc: rt = (tc / t) ** 0.4 return 1 - 0.44 * rt ** 2 * (1 - 0.41 * rt) - 0.22 * rt ** 2 else: return 9811.32 * t
tooz
tooz//utils.pyfile:/utils.py:function:convert_blocking/convert_blocking
def convert_blocking(blocking): """Converts a multi-type blocking variable into its derivatives.""" timeout = None if not isinstance(blocking, bool): timeout = float(blocking) blocking = True return blocking, timeout
mercurial-5.4
mercurial-5.4//mercurial/interfaces/repository.pyclass:ilocalrepositorymain/tagtype
def tagtype(tagname): """Return the type of a given tag."""
python-powerdns-0.2.1
python-powerdns-0.2.1//powerdns/client.pyclass:PDNSApiClient/_get_error
@staticmethod def _get_error(response): """Get error message from API response :param dict response: API response :return: Error message as :func:`str` """ if 'error' in response: err = response.get('error') elif 'errors' in response: err = response.get('errors') else: err = 'No error message found' return err
iapws-1.4.1
iapws-1.4.1//iapws/_iapws.pyfile:/iapws/_iapws.py:function:_Refractive/_Refractive
def _Refractive(rho, T, l=0.5893): """Equation for the refractive index Parameters ---------- rho : float Density, [kg/m³] T : float Temperature, [K] l : float, optional Light Wavelength, [μm] Returns ------- n : float Refractive index, [-] Notes ------ Raise :class:`NotImplementedError` if input isn't in limit: * 0 ≤ ρ ≤ 1060 * 261.15 ≤ T ≤ 773.15 * 0.2 ≤ λ ≤ 1.1 Examples -------- >>> _Refractive(997.047435, 298.15, 0.2265) 1.39277824 >>> _Refractive(30.4758534, 773.15, 0.5893) 1.00949307 References ---------- IAPWS, Release on the Refractive Index of Ordinary Water Substance as a Function of Wavelength, Temperature and Pressure, http://www.iapws.org/relguide/rindex.pdf """ if rho < 0 or rho > 1060 or T < 261.15 or T > 773.15 or l < 0.2 or l > 1.1: raise NotImplementedError('Incoming out of bound') Lir = 5.432937 Luv = 0.229202 d = rho / 1000.0 Tr = T / 273.15 L = l / 0.589 a = [0.244257733, 0.00974634476, -0.00373234996, 0.000268678472, 0.0015892057, 0.00245934259, 0.90070492, -0.0166626219] A = d * (a[0] + a[1] * d + a[2] * Tr + a[3] * L ** 2 * Tr + a[4] / L ** 2 + a[5] / (L ** 2 - Luv ** 2) + a[6] / (L ** 2 - Lir ** 2) + a[7] * d ** 2) return ((2 * A + 1) / (1 - A)) ** 0.5
bambu-blog-3.3.1
bambu-blog-3.3.1//bambu_blog/helpers.pyfile:/bambu_blog/helpers.py:function:view_filter/view_filter
def view_filter(**kwargs): """ Filters blog posts by year/month/day, category, tag or author """ from bambu_blog.models import Post, Category posts = Post.objects.select_related().live() if 'year' in kwargs: posts = posts.filter(date__year=int(kwargs['year'])) if 'month' in kwargs: posts = posts.filter(date__month=int(kwargs['month'])) if 'day' in kwargs: posts = posts.filter(date__day=int(kwargs['day'])) if 'category' in kwargs: posts = posts.filter(categories__slug=kwargs['category']) elif 'tag' in kwargs: posts = posts.filter(tags__slug=kwargs['tag']) elif 'username' in kwargs: posts = posts.filter(author__username=kwargs['username']) return posts
node-0.9.25
node-0.9.25//src/node/interfaces.pyclass:IAsAttrAccess/as_attribute_access
def as_attribute_access(): """Return this node as IAttributeAccess implementing object. """
dvha-0.7.9
dvha-0.7.9//dvha/models/import_dicom.pyfile:/dvha/models/import_dicom.py:function:get_study_uid_dict/get_study_uid_dict
def get_study_uid_dict(checked_uids, parsed_dicom_data, multi_plan_only=False): """ This thread iterates through self.checked_uids which contains plan uids, but we need to iterate through study instance uids so that plans on the same study are imported adjacently. :return: a dictionary with study uids for the keys and a list of associated plan uids for values :rtype: dict """ study_uids = {} for plan_uid in checked_uids: study_uid = parsed_dicom_data[plan_uid ].study_instance_uid_to_be_imported if study_uid not in list(study_uids): study_uids[study_uid] = [] study_uids[study_uid].append(plan_uid) if multi_plan_only: for study_uid in list(study_uids): if len(study_uids[study_uid]) < 2: study_uids.pop(study_uid) return study_uids
audiotools
audiotools//flac.pyfile:/flac.py:function:sizes_to_offsets/sizes_to_offsets
def sizes_to_offsets(sizes): """takes list of (frame_size, frame_frames) tuples and converts it to a list of (cumulative_size, frame_frames) tuples""" current_position = 0 offsets = [] for frame_size, frame_frames in sizes: offsets.append((current_position, frame_frames)) current_position += frame_size return offsets
dynetx
dynetx//classes/function.pyfile:/classes/function.py:function:add_path/add_path
def add_path(G, nodes, t, **attr): """Add a path at time t. Parameters ---------- G : graph A DyNetx graph nodes : iterable container A container of nodes. t : snapshot id (default=None) snapshot id See Also -------- add_path, add_cycle Examples -------- >>> G = dn.DynGraph() >>> dn.add_path(G, [0,1,2,3], t=0) """ nlist = list(nodes) edges = list(zip(nlist[:-1], nlist[1:])) G.add_interactions_from(edges, t, **attr)
git_pep8_commit_hook
git_pep8_commit_hook//commit_hook.pyfile:/commit_hook.py:function:_is_python_file/_is_python_file
def _is_python_file(filename): """Check if the input file looks like a Python script Returns True if the filename ends in ".py" or if the first line contains "python" and "#!", returns False otherwise. """ if filename.endswith('.py'): return True else: with open(filename, 'r') as file_handle: first_line = file_handle.readline() return 'python' in first_line and '#!' in first_line
awscfncli-0.5.2
awscfncli-0.5.2//awscfncli/commands/utils.pyfile:/awscfncli/commands/utils.py:function:custom_paginator/custom_paginator
def custom_paginator(f, l, **kwargs): """Simple custom paginator for those can_pageniate() returns false :param f: API function :param l: name of the list object to paginate :param kwargs: Args passes to the API function :return: iterator of result object """ next_token = None while True: if next_token is None: r = f(**kwargs) else: r = f(NextToken=next_token, **kwargs) for i in r[l]: yield i try: next_token = r['NextToken'] except KeyError: break
abdbeam
abdbeam//core.pyfile:/core.py:function:_no_overlap/_no_overlap
def _no_overlap(y, z, y1, z1, y2, z2, pt_id, pt_1_id, pt_2_id): """ Checks if a point (y,z) is inside a line given by (y1,z1) and (y2,z2). Also check if the point id matches with the ids from the line. Returns ------- Boolean """ if y < min(y1, y2) or y > max(y1, y2) or z < min(z1, z2) or z > max(z1, z2 ): return True elif pt_id == pt_1_id or pt_id == pt_2_id: return True else: return False
lifelib-0.0.14
lifelib-0.0.14//lifelib/projects/ifrs17sim/projection.pyfile:/lifelib/projects/ifrs17sim/projection.py:function:PolsAccDeath/PolsAccDeath
def PolsAccDeath(t): """Number of policies: Accidental death""" return 0
mbdb-0.6
mbdb-0.6//mbdb/sqlparser.pyfile:/mbdb/sqlparser.py:function:p_statement_create_db/p_statement_create_db
def p_statement_create_db(p): """statement : CREATE DATABASE identifier""" p[0] = p[1:]
geosoup-0.1.21
geosoup-0.1.21//geosoup/common.pyclass:Sublist/calc_parabola_param
@staticmethod def calc_parabola_param(pt1, pt2, pt3): """ define a parabola using three points :param pt1: First point (x,y) :param pt2: Second point (x,y) :param pt3: Third point (x,y) :return tuple of a, b, and c for parabola a(x^2) + b*x + c = 0 """ x1, y1 = pt1 x2, y2 = pt2 x3, y3 = pt3 _m_ = (x1 - x2) * (x1 - x3) * (x2 - x3) a_param = (x3 * (y2 - y1) + x2 * (y1 - y3) + x1 * (y3 - y2)) / _m_ b_param = (x3 * x3 * (y1 - y2) + x2 * x2 * (y3 - y1) + x1 * x1 * (y2 - y3) ) / _m_ c_param = (x2 * x3 * (x2 - x3) * y1 + x3 * x1 * (x3 - x1) * y2 + x1 * x2 * (x1 - x2) * y3) / _m_ return a_param, b_param, c_param
AnyBlok-0.22.5
AnyBlok-0.22.5//anyblok/bloks/anyblok_core/system/relationship.pyclass:RelationShip/add_field
@classmethod def add_field(cls, rname, relation, model, table, ftype): """ Insert a relationship definition :param rname: name of the relationship :param relation: instance of the relationship :param model: namespace of the model :param table: name of the table of the model :param ftype: type of the AnyBlok Field """ local_column = relation.info.get('local_column') remote_column = relation.info.get('remote_column') remote_model = relation.info.get('remote_model') remote_name = relation.info.get('remote_name') label = relation.info.get('label') nullable = relation.info.get('nullable', True) vals = dict(code=table + '.' + rname, model=model, name=rname, local_column=local_column, remote_model=remote_model, remote_name= remote_name, remote_column=remote_column, label=label, nullable= nullable, ftype=ftype) cls.insert(**vals) if remote_name: remote_type = 'Many2One' if ftype == 'Many2One': remote_type = 'One2Many' elif ftype == 'Many2Many': remote_type = 'Many2Many' elif ftype == 'One2One': remote_type = 'One2One' m = cls.registry.get(remote_model) vals = dict(code=m.__tablename__ + '.' + remote_name, model= remote_model, name=remote_name, local_column=remote_column, remote_model=model, remote_name=rname, remote_column= local_column, label=remote_name.capitalize().replace('_', ' '), nullable=True, ftype=remote_type, remote=True) cls.insert(**vals)
pvfactors
pvfactors//geometry/pvarray.pyclass:OrderedPVArray/init_from_dict
@classmethod def init_from_dict(cls, pvarray_params, param_names=None): """Instantiate ordered PV array from dictionary of parameters Parameters ---------- pvarray_params : dict The parameters defining the PV array param_names : list of str, optional List of parameter names to pass to surfaces (Default = None) Returns ------- OrderedPVArray Initialized Ordered PV Array """ return cls(axis_azimuth=pvarray_params['axis_azimuth'], gcr= pvarray_params['gcr'], pvrow_height=pvarray_params['pvrow_height'], n_pvrows=pvarray_params['n_pvrows'], pvrow_width=pvarray_params[ 'pvrow_width'], cut=pvarray_params.get('cut', {}), param_names= param_names)
pywinauto-0.6.8
pywinauto-0.6.8//pywinauto/findbestmatch.pyfile:/pywinauto/findbestmatch.py:function:is_above_or_to_left/is_above_or_to_left
def is_above_or_to_left(ref_control, other_ctrl): """Return true if the other_ctrl is above or to the left of ref_control""" text_r = other_ctrl.rectangle() ctrl_r = ref_control.rectangle() if text_r.left >= ctrl_r.right: return False if text_r.top >= ctrl_r.bottom: return False if text_r.top >= ctrl_r.top and text_r.left >= ctrl_r.left: return False return True
agilent-lightwave-0.2
agilent-lightwave-0.2//agilent-lightwave/laser.pyclass:Laser/dbm_to_watts
@staticmethod def dbm_to_watts(power_dbm): """Converts [dBm] to [W]. Args: power_dbm (int, float): Power in [dBm]. Returns: float: Power in [W]. """ power_W = 10.0 ** (power_dbm / 10.0) / 1000.0 return power_W
control-0.8.3
control-0.8.3//control/lti.pyfile:/control/lti.py:function:dcgain/dcgain
def dcgain(sys): """Return the zero-frequency (or DC) gain of the given system Returns ------- gain : ndarray The zero-frequency gain, or np.nan if the system has a pole at the origin """ return sys.dcgain()
spike_py-0.99.17
spike_py-0.99.17//spike/File/HDF5File.pyfile:/spike/File/HDF5File.py:function:determine_chunkshape/determine_chunkshape
def determine_chunkshape(size1, size2): """ returns optimum size for chuncks for a dataset of file size1, size2 and update cachesize for accomodating dataset """ c1 = int(size1 / 64.0 + 1) c2 = int(size2 / 64.0 + 1) return c1, c2
ukbb_parser-1.0.0
ukbb_parser-1.0.0//ukbb_parser/shared_utils/util.pyfile:/ukbb_parser/shared_utils/util.py:function:get_job_and_subjob_indices/get_job_and_subjob_indices
def get_job_and_subjob_indices(n_jobs, n_tasks, task_index): """ For example, if there are 170 tasks for working on 50 jobs, than each job will be divided to 3-4 tasks. Since 170 % 50 = 20, the 20 first jobs will receive 4 tasks and the last 30 jobs will receive only 3 tasks. In total, the first 80 tasks will be dedicated to jobs with 4 tasks each, and the 90 last tasks will be dedicated to jobs with 3 tasks each. Hence, tasks 0-3 will go to job 0, tasks 4-7 will go to job 1, and so on; tasks 80-82 will go to job 21, tasks 83-85 will job to job 22, and so on. """ assert n_tasks >= n_jobs n_tasks_in_unprivileged_jobs = n_tasks // n_jobs n_tasks_in_privileged_jobs = n_tasks_in_unprivileged_jobs + 1 n_privileged_jobs = n_tasks % n_jobs n_tasks_of_privileged_jobs = n_tasks_in_privileged_jobs * n_privileged_jobs if task_index < n_tasks_of_privileged_jobs: job_index = task_index // n_tasks_in_privileged_jobs index_within_job = task_index % n_tasks_in_privileged_jobs n_tasks_in_job = n_tasks_in_privileged_jobs else: task_index_in_unprivileged_group = (task_index - n_tasks_of_privileged_jobs) job_index = (n_privileged_jobs + task_index_in_unprivileged_group // n_tasks_in_unprivileged_jobs) index_within_job = (task_index_in_unprivileged_group % n_tasks_in_unprivileged_jobs) n_tasks_in_job = n_tasks_in_unprivileged_jobs return job_index, index_within_job, n_tasks_in_job
BTrees-4.7.2
BTrees-4.7.2//BTrees/Interfaces.pyclass:IMerge/union
def union(c1, c2): """Compute the Union of c1 and c2. If c1 is None, then c2 is returned, otherwise, if c2 is None, then c1 is returned. The output is a Set containing keys from the input collections. """
txkube
txkube//_model.pyfile:/_model.py:function:required_unique/required_unique
def required_unique(objects, key): """ A pyrsistent invariant which requires all objects in the given iterable to have a unique key. :param objects: The objects to check. :param key: A one-argument callable to compute the key of an object. :return: An invariant failure if any two or more objects have the same key computed. An invariant success otherwise. """ keys = {} duplicate = set() for k in map(key, objects): keys[k] = keys.get(k, 0) + 1 if keys[k] > 1: duplicate.add(k) if duplicate: return False, u'Duplicate object keys: {}'.format(duplicate) return True, u''
campaign-planning-tool-0.1.3
campaign-planning-tool-0.1.3//campaign_planning_tool/_export.pyclass:Export/__generate_range_gate_file
@staticmethod def __generate_range_gate_file(template_str, no_los, range_gates, lidar_mode, fft_size, accumulation_time): """ Range gate file generator. Attributes --------- template_str : str A string containing the base range gate file template. no_los : int Number of line of sight. range_gates : ndarray nD array of type int containing range gate center positions. lidar_mode : str A string indicating the operational mode a scanning lidar. fft_size : int An integer indicating number of fft points used during the spectral analysis of the backscattered signal. accumulation_time : int An integer indicating the accumulation time of the Doppler spectra. Notes ----- This method is only applicabe for long-range WindScanners! """ range_gate_file = template_str range_gate_file = range_gate_file.replace('insertMODE', str(lidar_mode)) range_gate_file = range_gate_file.replace('insertMaxRange', str(max( range_gates))) range_gate_file = range_gate_file.replace('insertFFTSize', str(fft_size)) rows = '' range_gate_row = '\t'.join(list(map(str, range_gates))) for i in range(0, no_los): row_temp = str(i + 1) + '\t' + str(accumulation_time) + '\t' row_temp = row_temp + range_gate_row if i < no_los - 1: row_temp = row_temp + '\n' rows = rows + row_temp range_gate_file = range_gate_file.replace('insertRangeGates', rows) return range_gate_file
nti
nti//i18n/locales/interfaces.pyclass:ICcTLDInformation/getLanguagesForTLD
def getLanguagesForTLD(tld): """ Return the relevant languages for a top level domain as a sequence. """
timo-zappa-0.48.2.2
timo-zappa-0.48.2.2//timo_zappa/utilities.pyfile:/timo_zappa/utilities.py:function:merge_headers/merge_headers
def merge_headers(event): """ Merge the values of headers and multiValueHeaders into a single dict. Opens up support for multivalue headers via API Gateway and ALB. See: https://github.com/Miserlou/Zappa/pull/1756 """ headers = event.get('headers') or {} multi_headers = (event.get('multiValueHeaders') or {}).copy() for h in set(headers.keys()): if h not in multi_headers: multi_headers[h] = [headers[h]] for h in multi_headers.keys(): multi_headers[h] = ', '.join(multi_headers[h]) return multi_headers
aiida-core-1.2.1
aiida-core-1.2.1//aiida/orm/users.pyclass:User/normalize_email
@staticmethod def normalize_email(email): """Normalize the address by lowercasing the domain part of the email address (taken from Django).""" email = email or '' try: email_name, domain_part = email.strip().rsplit('@', 1) except ValueError: pass else: email = '@'.join([email_name, domain_part.lower()]) return email
owlrl
owlrl//DatatypeHandling.pyfile:/DatatypeHandling.py:function:_strToBase64Binary/_strToBase64Binary
def _strToBase64Binary(v): """Rudimentary test for the base64Binary value. The problem is that the built-in b64 module functions ignore the fact that only a certain family of characters are allowed to appear in the lexical value, so this is checked first. @param v: the literal string defined as a base64encoded string @return the decoded (binary) content @raise ValueError: invalid base 64 binary value """ import base64 if v.replace('=', 'x').replace('+', 'y').replace('/', 'z').isalnum(): try: return base64.standard_b64decode(v) except: raise ValueError('Invalid Base64Binary %s' % v) else: raise ValueError('Invalid Base64Binary %s' % v)
pynata-0.4.1
pynata-0.4.1//pynata/logger/common.pyclass:LoggerCommon/set_log_format
@classmethod def set_log_format(cls, format_str: str) ->None: """Sets the log record format that will be used with logging.Formatter instances""" cls.log_format = format_str
fake-blender-api-2.79-0.3.1
fake-blender-api-2.79-0.3.1//bpy/ops/outliner.pyfile:/bpy/ops/outliner.py:function:drivers_delete_selected/drivers_delete_selected
def drivers_delete_selected(): """Delete drivers assigned to selected items """ pass
squint
squint//_vendor/predicate.pyfile:/_vendor/predicate.py:function:_check_truthy/_check_truthy
def _check_truthy(value): """Return true if *value* is truthy.""" return bool(value)
pyjoystick-1.1.2
pyjoystick-1.1.2//pyjoystick/interface.pyclass:Key/to_keyname
@classmethod def to_keyname(cls, key): """Return this key as a string keyname. * Format is "{minus}{keytype} {number}". * Hat format is "{keytype} {number} {hat_name}" Examples * "Axis 0" - For Axis 0 with a positive or 0 value. * "-Axis 1" - For an Axis Key that has a negative value and needs to be inverted. * "Button 0" - Buttons wont have negative values * "Hat 0 [Left Up]" - Hat values also give the key value as a hat name. """ prefix = '' if key.value and key.value < 0: prefix = '-' if key.keytype == cls.HAT: return '{}{} {} [{}]'.format(prefix, key.keytype, key.number, key. get_hat_name()) else: return '{}{} {}'.format(prefix, key.keytype, key.number)
PICOS-2.0.8
PICOS-2.0.8//picos/solvers/solver.pyclass:Solver/penalty
@classmethod def penalty(cls, options): """Report solver penalty given an :class:`~picos.Options` object.""" return options['penalty_{}'.format(cls.names()[0])]
pyNastran
pyNastran//bdf/cards/elements/axisymmetric_shells.pyclass:CTRAX6/export_to_hdf5
@classmethod def export_to_hdf5(cls, h5_file, model, eids): """exports the elements in a vectorized way""" pids = [] nodes = [] thetas = [] for eid in eids: element = model.elements[eid] pids.append(element.pid) nodes.append(element.nodes) thetas.append(element.theta) h5_file.create_dataset('eid', data=eids) h5_file.create_dataset('pid', data=pids) h5_file.create_dataset('nodes', data=nodes) h5_file.create_dataset('theta', data=thetas)
alignak_module_backend-1.4.3
alignak_module_backend-1.4.3//alignak_module_backend/arbiter/module.pyclass:AlignakBackendArbiter/clean_unusable_keys
@classmethod def clean_unusable_keys(cls, resource): """Delete keys of dictionary not used :param resource: dictionary got from alignak-backend :type resource: dict :return: """ fields = ['_links', '_updated', '_created', '_etag', '_id', 'name', 'ui', '_realm', '_sub_realm', '_users_read', '_users_update', '_users_delete', '_parent', '_tree_parents', '_all_children', '_level', 'customs', 'host', 'service', 'back_role_super_admin', 'token', '_templates', '_template_fields', 'note', '_is_template', '_templates_with_services', '_templates_from_host_template', 'merge_host_users', 'hosts_critical_threshold', 'hosts_warning_threshold', 'services_critical_threshold', 'services_warning_threshold', 'global_critical_threshold', 'global_warning_threshold', '_children', 'hostgroups', 'hosts', 'dependent_hostgroups', 'dependent_hosts', 'servicegroups', 'services', 'dependent_servicegroups', 'dependent_services', 'usergroups', 'users', 'location', 'duplicate_foreach', 'tags', '_overall_state_id', 'trigger', 'schema_version'] for field in resource: if field.startswith('ls_'): fields.append(field) for field in fields: if field in resource: del resource[field]
verify
verify//core/builder.pyclass:BuilderBase/fix
@staticmethod def fix(frame: 'Image.Image', char: 'Image.Image', position: 'tuple') ->None: """ Fix char to the frame according to position. """ x, y = position width, high = char.size box = x, y, x + width, y + high frame.paste(char, box=box, mask=char.split()[3])
mailman-3.3.1
mailman-3.3.1//src/mailman/interfaces/mailinglist.pyclass:IMailingList/send_one_last_digest_to
def send_one_last_digest_to(address, delivery_mode): """Make sure to send one last digest to an address. This is used when a person transitions from digest delivery to regular delivery and wants to make sure they don't miss anything. By indicating that they'd like to receive one last digest, they will ensure continuity in receiving mailing lists posts. :param address: The address of the person receiving one last digest. :type address: `IAddress` :param delivery_mode: The type of digest to receive. :type delivery_mode: `DeliveryMode` """
breezy-3.0.2
breezy-3.0.2//breezy/merge3.pyfile:/breezy/merge3.py:function:intersect/intersect
def intersect(ra, rb): """Given two ranges return the range where they intersect or None. >>> intersect((0, 10), (0, 6)) (0, 6) >>> intersect((0, 10), (5, 15)) (5, 10) >>> intersect((0, 10), (10, 15)) >>> intersect((0, 9), (10, 15)) >>> intersect((0, 9), (7, 15)) (7, 9) """ sa = max(ra[0], rb[0]) sb = min(ra[1], rb[1]) if sa < sb: return sa, sb else: return None
seamm
seamm//parameters.pyfile:/parameters.py:function:set_context/set_context
def set_context(context): """Set the default root context for evaluating variables and expressions in parameters.""" global root_context root_context = context
dyndnsc
dyndnsc//plugins/manager.pyclass:PluginManager/plugin_name
@staticmethod def plugin_name(plugin): """Discover the plugin name and return it.""" return plugin.__class__.__name__.lower()
exporters-0.7.0
exporters-0.7.0//exporters/iterio.pyfile:/exporters/iterio.py:function:iterate_chunks/iterate_chunks
def iterate_chunks(file, chunk_size): """ Iterate chunks of size chunk_size from a file-like object """ chunk = file.read(chunk_size) while chunk: yield chunk chunk = file.read(chunk_size)
numba-0.49.0
numba-0.49.0//numba/core/types/containers.pyclass:ListType/refine
@classmethod def refine(cls, itemty): """Refine to a precise list type """ res = cls(itemty) assert res.is_precise() return res
dropbox
dropbox//team_log.pyclass:EventType/team_merge_request_canceled_shown_to_primary_team
@classmethod def team_merge_request_canceled_shown_to_primary_team(cls, val): """ Create an instance of this class set to the ``team_merge_request_canceled_shown_to_primary_team`` tag with value ``val``. :param TeamMergeRequestCanceledShownToPrimaryTeamType val: :rtype: EventType """ return cls('team_merge_request_canceled_shown_to_primary_team', val)
colony
colony//base/util.pyfile:/base/util.py:function:module_import/module_import
def module_import(module_name): """ Imports the module with the given name, this import operation is recursive meaning that inner packages are also going to be imported. :type module_name: String :param module_name: The name of the module to be imported, this value may contain multiple "sub" packages. :rtype: module :return: The imported module as a variable reference. """ module = __import__(module_name) components = module_name.split('.') for component in components[1:]: module = getattr(module, component) return module