repo
stringlengths
1
29
path
stringlengths
24
332
code
stringlengths
39
579k
duckling
duckling//language.pyclass:Language/is_supported
@classmethod def is_supported(cls, lang): """Check if a language is supported by the current duckling version.""" return lang in cls.SUPPORTED_LANGUAGES
watchm8
watchm8//lib.pyfile:/lib.py:function:class_loader/class_loader
def class_loader(name): """ Imports and returns given class/func/variable/module name Args: name: A string of what to import ex. foo.bar.MyClass Returns: class/func/variable/module """ components = name.split('.') mod = __import__(components[0]) for comp in components[1:]: mod = getattr(mod, comp) return mod
inplace-abn-1.0.12
inplace-abn-1.0.12//scripts/imagenet/utils.pyfile:/scripts/imagenet/utils.py:function:accuracy_sum/accuracy_sum
def accuracy_sum(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" maxk = max(topk) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0)) return res
mgsub
mgsub//mgsub.pyfile:/mgsub.py:function:filter_matches/filter_matches
def filter_matches(this, prev): """ filter match array Logic to determine if a match is overlapped by another match :param this: the match currently be checked :param prev: an earlier (longer) match currently being compared against :return boolean indicating overlap """ s = this[1] e = this[3] ps = prev[1] pe = prev[3] return (ps <= s) & (pe > s) | (ps < e) & (pe >= e)
distl-0.1.0.dev1
distl-0.1.0.dev1//distl/distl.pyfile:/distl/distl.py:function:is_int_positive/is_int_positive
def is_int_positive(value): """must be a positive integer""" return isinstance(value, int) and value > 0, value
poco-0.96.9
poco-0.96.9//poco/services/catalog_handler.pyclass:CatalogHandler/get_branch
@staticmethod def get_branch(config): """Get catalog branch if its an remote repository""" if config is not None: return config.get('branch', 'master') return 'master'
maputil-0.1.0
maputil-0.1.0//src/maputil/types_multimap.pyfile:/src/maputil/types_multimap.py:function:is_independent/is_independent
def is_independent(L): """Return True if all mappings in the list are decomposed into independent subgroups, i.e., no key and no value are shared between two different groups.""" keys = [] values = [] for g in L: keys.extend(g.unique_keys()) values.extend(g.unique_values()) return len(values) == len(set(values)) and len(keys) == len(set(keys))
featuretoolsOnSpark
featuretoolsOnSpark//column_types.pyclass:Column/create_from
@classmethod def create_from(cls, column): """Create new column this type from existing Args: column (Column) : Existing column to create from. Returns: :class:`.Column` : new column """ v = cls(id=column.id, table=column.table) return v
stellar-base-1.1.2.0
stellar-base-1.1.2.0//stellar_base/stellarxdr/xdrgen.pyfile:/stellar_base/stellarxdr/xdrgen.py:function:p_specification/p_specification
def p_specification(t): """specification : definition_list"""
sqt-0.8.0
sqt-0.8.0//sqt/commands/fastagrep.pyfile:/sqt/commands/fastagrep.py:function:iupac_to_regex/iupac_to_regex
def iupac_to_regex(iupac): """ Converts a IUPAC string with wildcards to a regular expression. """ wildcards = {'R': 'AG', 'Y': 'CT', 'S': 'CG', 'W': 'AT', 'K': 'GT', 'M': 'AC', 'B': 'CGT', 'D': 'AGT', 'H': 'ACT', 'V': 'ACG', 'N': 'ACGT', 'X': 'ACGT'} regex = '' for c in iupac.upper(): if c in 'ACGT': regex += c elif c in wildcards: regex += '[' + wildcards[c] + ']' else: raise ValueError("don't know how to handle character %s" % c) return regex
clld-7.1.0
clld-7.1.0//src/clld/web/app.pyfile:/src/clld/web/app.py:function:menu_item/menu_item
def menu_item(route_name, ctx, req, label=None): """Factory function for a menu item specified by route name. :return: A pair (URL, label) to create a menu item. """ return req.route_url(route_name), label or req.translate(route_name. capitalize())
phply
phply//phpparse.pyfile:/phpparse.py:function:p_non_empty_for_expr/p_non_empty_for_expr
def p_non_empty_for_expr(p): """non_empty_for_expr : non_empty_for_expr COMMA expr | expr""" if len(p) == 4: p[0] = p[1] + [p[3]] else: p[0] = [p[1]]
pyign-1.0.4
pyign-1.0.4//pyign/functions/core.pyfile:/pyign/functions/core.py:function:check_tc_data/check_tc_data
def check_tc_data(*args): """Access stored Thermocouple data from the sensor limits and collected data to index and build an integer numpy array for sensor values that have exceeded the set bounds. Parameters ---------- tc_limits : int numpy array Values for thermocouple limits. tc_data : float numpy array Collected sensor data used to. Returns ------- tc_input_data : numpy array numpy array of sensed thermocouple values data. """ limits = args[0].tc_limits tc = [] for i in range(len(limits)): x = args[1][i] - limits[i] if x >= 0: tc.append(args[1][i]) else: tc.append(0) return tc
mxnet-1.6.0.data
mxnet-1.6.0.data//purelib/mxnet/contrib/onnx/onnx2mx/_op_translations.pyfile:/purelib/mxnet/contrib/onnx/onnx2mx/_op_translations.py:function:relu/relu
def relu(attrs, inputs, proto_obj): """Computes rectified linear function.""" return 'relu', attrs, inputs
dust_extinction-0.8
dust_extinction-0.8//dust_extinction/conversions.pyclass:AxAvToExv/evaluate
@staticmethod def evaluate(axav, Av): """ AlAvToElv function Parameters ---------- axav : np array (float) E(x-V)/E(B-V) values Returns ------- exv : np array (float) E(x - V) """ return (axav - 1.0) * Av
kolibri-0.13.2
kolibri-0.13.2//kolibri/dist/urllib3/util/response.pyfile:/kolibri/dist/urllib3/util/response.py:function:is_response_to_head/is_response_to_head
def is_response_to_head(response): """ Checks whether the request of a response has been a HEAD-request. Handles the quirks of AppEngine. :param conn: :type conn: :class:`httplib.HTTPResponse` """ method = response._method if isinstance(method, int): return method == 3 return method.upper() == 'HEAD'
ludology
ludology//game.pyclass:Game/__hash__
def __hash__(G): """ Define the hash of a game as the hash of the left and right options. Returns ------- hash : str The hash of G. """ return hash((frozenset(G._left), frozenset(G._right)))
mercurial-5.4
mercurial-5.4//mercurial/thirdparty/zope/interface/interfaces.pyclass:IComponentRegistry/registeredHandlers
def registeredHandlers(): """Return an iterable of IHandlerRegistration instances. These registrations describe the current handler registrations in the object. """
citrine-0.20.0
citrine-0.20.0//src/citrine/_utils/functions.pyfile:/src/citrine/_utils/functions.py:function:validate_type/validate_type
def validate_type(data_dict: dict, type_name: str) ->dict: """Ensure that dict has field 'type' with given value.""" data_dict_copy = data_dict.copy() if 'type' in data_dict_copy: if data_dict_copy['type'] != type_name: raise Exception('Object type must be {}, but was instead {}.'. format(type_name, data_dict['type'])) else: data_dict_copy['type'] = type_name return data_dict_copy
oci
oci//application_migration/models/discovery_details.pyclass:DiscoveryDetails/get_subtype
@staticmethod def get_subtype(object_dictionary): """ Given the hash representation of a subtype of this class, use the info in the hash to return the class of the subtype. """ type = object_dictionary['type'] if type == 'OIC': return 'OicDiscoveryDetails' if type == 'PCS': return 'PcsDiscoveryDetails' if type == 'ICS': return 'IcsDiscoveryDetails' if type == 'OAC': return 'OacDiscoveryDetails' if type == 'JCS': return 'JcsDiscoveryDetails' if type == 'SOACS': return 'SoacsDiscoveryDetails' else: return 'DiscoveryDetails'
logreader
logreader//console.pyfile:/console.py:function:set_cursor/set_cursor
def set_cursor(line, col): """Sets the terminal cursor position""" print('\x1b[%i;%iH' % (line, col))
Diofant-0.11.0
Diofant-0.11.0//diofant/printing/printer.pyclass:Printer/set_global_settings
@classmethod def set_global_settings(cls, **settings): """Set system-wide printing settings.""" for key, val in settings.items(): if val is not None: cls._global_settings[key] = val
coremltools
coremltools//converters/keras/_layers2.pyfile:/converters/keras/_layers2.py:function:convert_advanced_relu/convert_advanced_relu
def convert_advanced_relu(builder, layer, input_names, output_names, keras_layer): """ Convert an ReLU layer with maximum value from keras to coreml. Parameters ---------- keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = input_names[0], output_names[0] if keras_layer.max_value is None: builder.add_activation(layer, 'RELU', input_name, output_name) return relu_output_name = output_name + '_relu' builder.add_activation(layer, 'RELU', input_name, relu_output_name) neg_output_name = relu_output_name + '_neg' builder.add_activation(layer + '__neg__', 'LINEAR', relu_output_name, neg_output_name, [-1.0, 0]) clip_output_name = relu_output_name + '_clip' builder.add_unary(layer + '__clip__', neg_output_name, clip_output_name, 'threshold', alpha=-keras_layer.max_value) builder.add_activation(layer + '_neg2', 'LINEAR', clip_output_name, output_name, [-1.0, 0])
kartothek
kartothek//serialization/_generic.pyfile:/serialization/_generic.py:function:filter_df/filter_df
def filter_df(df, filter_query=None): """ General implementation of query filtering. Serialisation formats such as Parquet that support predicate push-down may pre-filter in their own implementations. """ if df.shape[0] > 0 and filter_query is not None: df = df.query(filter_query) return df
sqlalchemy_mate
sqlalchemy_mate//credential.pyclass:Credential/from_home_db_json
@classmethod def from_home_db_json(cls, identifier, key_mapping=None): """ Read credential from $HOME/.db.json file. :type identifier: str :param identifier: str, database identifier. :type key_mapping: Dict[str, str] :param key_mapping: dict ``.db.json````:: { "identifier1": { "host": "example.com", "port": 1234, "database": "test", "username": "admin", "password": "admin", }, "identifier2": { ... } } """ return cls.from_json(json_file=cls.path_db_json, json_path=identifier, key_mapping=key_mapping)
xosgenx-4.0.1
xosgenx-4.0.1//xosgenx/jinja2_extensions/tosca.pyfile:/xosgenx/jinja2_extensions/tosca.py:function:xproto_tosca_field_type/xproto_tosca_field_type
def xproto_tosca_field_type(type): """ TOSCA requires fields of type 'bool' to be 'boolean' TOSCA requires fields of type 'int32' to be 'integer' """ if type == 'bool': return 'boolean' elif type == 'int32': return 'integer' else: return type
ddrage-1.7.1
ddrage-1.7.1//ddrage/rad_reads.pyclass:ProtoReadp5/p5_prefix_length
@staticmethod def p5_prefix_length(barcode, spacer, overhang): """Compute the total length of a p5 prefix from the given fragments. Arguments: barcode (str or bytes): barcode sequence spacer (str or bytes): spacer sequence overhang (str or bytes): overhang sequence Returns: int: Length of the p5 prefix. """ return len(barcode) + len(spacer) + len(overhang)
tensorflow-datasets-3.1.0
tensorflow-datasets-3.1.0//tensorflow_datasets/text/super_glue.pyfile:/tensorflow_datasets/text/super_glue.py:function:_get_record_entities/_get_record_entities
def _get_record_entities(passage): """Returns the unique set of entities.""" text = passage['text'] entities = set() for entity in passage['entities']: entities.add(text[entity['start']:entity['end'] + 1]) return sorted(entities)
cityenergyanalyst-3.2.0
cityenergyanalyst-3.2.0//cea/config.pyfile:/cea/config.py:function:parse_command_line_args/parse_command_line_args
def parse_command_line_args(args): """Group the arguments into a dictionary: parameter-name -> value""" parameters = {} values = [] argument_stack = list(args) while len(argument_stack): token = argument_stack.pop() if token.startswith('--'): parameter_name = token[2:] parameters[parameter_name] = ' '.join(reversed(values)) values = [] else: values.append(token) assert len(values) == 0, 'Bad arguments: %s' % args return parameters
ecco_v4_py-1.1.6
ecco_v4_py-1.1.6//ecco_v4_py/get_section_masks.pyfile:/ecco_v4_py/get_section_masks.py:function:get_available_sections/get_available_sections
def get_available_sections(): """Return pre-defined section names for computing transports across this section Returns ------- section_list : list of str list of available pre-defined sections """ section_list = ['Bering Strait', 'Gibraltar', 'Florida Strait', 'Florida Strait W1', 'Florida Strait S1', 'Florida Strait E1', 'Florida Strait E2', 'Florida Strait E3', 'Florida Strait E4', 'Davis Strait', 'Denmark Strait', 'Iceland Faroe', 'Faroe Scotland', 'Scotland Norway', 'Drake Passage', 'Indonesia W1', 'Indonesia W2', 'Indonesia W3', 'Indonesia W4', 'Australia Antarctica', 'Madagascar Channel', 'Madagascar Antarctica', 'South Africa Antarctica'] return section_list
elastic_lib-19.4.23
elastic_lib-19.4.23//elastic_lib/elastic.pyclass:ElasticModel/remote_reindex
@classmethod def remote_reindex(cls, script=None, source_query=None, params=None, target_index=None, pipeline=None): """Reindex data from remote source. # requesting server ip should be in whitelist. # https://www.elastic.co/guide/en/elasticsearch/reference/current/reindex-upgrade-remote.html # when change something here also update the elastic py in mail magazine """ booking_es = cls.get_connection() if not target_index: target_index = cls.get_index_name() if not params: params = {'wait_for_completion': 'false'} body = {'source': {'remote': {'host': 'http://%s' % cls.REMOTE_HOST}, 'index': cls.REMOTE_INDEX}, 'dest': {'index': target_index}, 'script': {'source': """ ctx._type = 'doc'; """, 'lang': 'painless'}} if script: body.update(script) if source_query: if hasattr(cls, 'REMOTE_TYPE'): source_query['bool']['must'].append({'type': {'value': cls. REMOTE_TYPE}}) body['source']['query'] = source_query if pipeline: body['dest']['pipeline'] = pipeline return booking_es.reindex(body, params=params)
cdptools
cdptools//event_scrapers/seattle_event_scraper.pyclass:SeattleEventScraper/_clean_string
@staticmethod def _clean_string(s: str) ->str: """ Simply remove any leading and trailing spaces and punctuation. Parameters ---------- s: str The string to be cleaned. Returns ------- cleaned: str The cleaned string that has had leading and trailing spaces and punctuation removed. """ s = s.replace('\n', '') s = s.replace('\t', '') s = s.replace('\xa0', '') if s[0] == ' ': s = s[1:] if s[-1] == ' ': s = s[:-1] if s[-1] == '.': s = s[:-1] return s
fake-bpy-module-2.79-20200428
fake-bpy-module-2.79-20200428//bpy/ops/mesh.pyfile:/bpy/ops/mesh.py:function:colors_rotate/colors_rotate
def colors_rotate(use_ccw: bool=False): """Rotate vertex colors inside faces :param use_ccw: Counter Clockwise :type use_ccw: bool """ pass
pybktree-1.1
pybktree-1.1//pybktree.pyfile:/pybktree.py:function:hamming_distance/hamming_distance
def hamming_distance(x, y): """Calculate the hamming distance (number of bits different) between the two integers given. >>> [hamming_distance(x, 15) for x in [0, 8, 10, 12, 14, 15]] [4, 3, 2, 2, 1, 0] """ return bin(x ^ y).count('1')
discord
discord//colour.pyclass:Colour/orange
@classmethod def orange(cls): """A factory method that returns a :class:`Colour` with a value of ``0xe67e22``.""" return cls(15105570)
typedargs
typedargs//doc_parser.pyclass:ParsedDocstring/_join_paragraphs
@classmethod def _join_paragraphs(cls, lines, use_indent=False, leading_blanks=False, trailing_blanks=False): """Join adjacent lines together into paragraphs using either a blank line or indent as separator.""" curr_para = [] paragraphs = [] for line in lines: if use_indent: if line.startswith(' '): curr_para.append(line.lstrip()) continue elif line == '': continue else: if len(curr_para) > 0: paragraphs.append(cls._join_paragraph(curr_para, leading_blanks, trailing_blanks)) curr_para = [line.lstrip()] elif len(line) != 0: curr_para.append(line) else: paragraphs.append(cls._join_paragraph(curr_para, leading_blanks, trailing_blanks)) curr_para = [] if len(curr_para) > 0: paragraphs.append(cls._join_paragraph(curr_para, leading_blanks, trailing_blanks)) return paragraphs
pyqtgraph
pyqtgraph//exceptionHandling.pyfile:/exceptionHandling.py:function:setTracebackClearing/setTracebackClearing
def setTracebackClearing(clear=True): """ Enable or disable traceback clearing. By default, clearing is disabled and Python will indefinitely store unhandled exception stack traces. This function is provided since Python's default behavior can cause unexpected retention of large memory-consuming objects. """ global clear_tracebacks clear_tracebacks = clear
toil
toil//job.pyclass:JobNode/fromJobGraph
@classmethod def fromJobGraph(cls, jobGraph): """ Takes a job graph object and returns a job node object :param toil.jobGraph.JobGraph jobGraph: A job graph object to be transformed into a job node :return: A job node object :rtype: toil.job.JobNode """ return cls(jobStoreID=jobGraph.jobStoreID, requirements=jobGraph. _requirements, command=jobGraph.command, jobName=jobGraph.jobName, unitName=jobGraph.unitName, displayName=jobGraph.displayName, predecessorNumber=jobGraph.predecessorNumber)
sourmash-3.3.0
sourmash-3.3.0//utils/compute-dna-mh-another-way.pyfile:/utils/compute-dna-mh-another-way.py:function:reverse/reverse
def reverse(s): """ Return reverse of 's'. """ r = ''.join(reversed(s)) return r
vk_dev
vk_dev//vk.pyclass:Button/line
@classmethod def line(cls) ->'Button': """ Add Buttons line """ self = cls._button_init() self.info = None return self
eDisGo-0.0.10
eDisGo-0.0.10//edisgo/data/import_data.pyfile:/edisgo/data/import_data.py:function:_import_genos_from_pypsa/_import_genos_from_pypsa
def _import_genos_from_pypsa(network, file): """Import generator data from a pyPSA file. TBD Parameters ---------- network: :class:`~.grid.network.Network` The eDisGo container object file: :obj:`str` File including path """ raise NotImplementedError
malaya-gpu-3.4.2
malaya-gpu-3.4.2//malaya/entity.pyfile:/malaya/entity.py:function:describe/describe
def describe(): """ Describe Entities supported """ print('OTHER - Other') print('law - law, regulation, related law documents, documents, etc') print('location - location, place') print('organization - organization, company, government, facilities, etc') print( 'person - person, group of people, believes, unique arts (eg; food, drink), etc' ) print('quantity - numbers, quantity') print('time - date, day, time, etc') print('event - unique event happened, etc')
datalab-client-1.0.18
datalab-client-1.0.18//vos/html2text.pyfile:/vos/html2text.py:function:list_numbering_start/list_numbering_start
def list_numbering_start(attrs): """extract numbering from list element attributes""" if 'start' in attrs: return int(attrs['start']) - 1 else: return 0
bbarchivist
bbarchivist//scriptutilstcl.pyfile:/scriptutilstcl.py:function:tcl_mainscan_printer/tcl_mainscan_printer
def tcl_mainscan_printer(curef, tvver, ota=None): """ Print output of TCL scanning. :param curef: PRD of the phone variant to check. :type curef: str :param tvver: Target software version. :type tvver: str :param ota: The starting version if OTA, None if not. Default is None. :type ota: str """ if ota is not None: print('{0}: {2} to {1}'.format(curef, tvver, ota.upper())) else: print('{0}: {1}'.format(curef, tvver))
remisc-0.0.3
remisc-0.0.3//remisc/service.pyfile:/remisc/service.py:function:isop/isop
def isop(func): """Defines the @isop decorator used to indicate specific methods of a Service-derived class are meant to provide REST-ful operations. Such methods take two arguments, the urlparse object for the request and a dictionary of query string arguments. """ func.isop = True return func
lpschedule-generator-0.10.0
lpschedule-generator-0.10.0//lps_gen.pyfile:/lps_gen.py:function:read_file/read_file
def read_file(filename): """Read file and return it as a string. :param str filename: Absolute pathname of the file. """ content = '' try: with open(filename, 'r') as f: for line in f: content = content + line except IOError: print('Error: unable to open {}'.format(filename)) return content
nextstrain
nextstrain//cli/runner/native.pyfile:/cli/runner/native.py:function:update/update
def update() ->bool: """ No-op. Updating the native environment isn't reasonably possible. """ return True
reynir-2.2.0
reynir-2.2.0//src/reynir/binparser.pyclass:WordMatchers/matcher_töl
@staticmethod def matcher_töl(token, terminal, m): """ Undeclinable number word ('fimm', 'sex', 'tuttugu'...) """ return terminal.matches_first(m.ordfl, m.stofn, token.t1_lower)
dx_utilities
dx_utilities//geometry/planar.pyclass:PlanarShape/create_generic
@classmethod def create_generic(cls, vertices=None, holes=None, *args, **kwargs): """Construct a planar shape by passing explicitly a sequence of vertices that represent its boundary, and optionaly a set of similar sequences to describe internal holes. This is a factory method that essentially wraps the superclass constructor. :param list(tuple) vertices: A list of ``(x, y)`` 2-tuples representing the coordinates of the shape boundary. :param list holes: A list of lists of ``(x, y)`` coordinate pairs, each representing internal holes. :rtype: PlanarShape :param \\*args: For extension with positional arguments in subclasses. :param \\*\\*kwargs: For extension with keyword arguments in subclasses. """ return cls(*args, shell=vertices, holes=holes, **kwargs)
kubeflow-fairing-0.7.2
kubeflow-fairing-0.7.2//kubeflow/fairing/utils.pyfile:/kubeflow/fairing/utils.py:function:get_current_k8s_namespace/get_current_k8s_namespace
def get_current_k8s_namespace(): """Get the current namespace of kubernetes.""" with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r' ) as f: return f.readline()
papermill
papermill//translators.pyclass:Translator/translate_none
@classmethod def translate_none(cls, val): """Default behavior for translation""" return cls.translate_raw_str(val)
colony
colony//libs/map_util.pyfile:/libs/map_util.py:function:map_copy/map_copy
def map_copy(source_map, destiny_map): """ Copies the contents of the source map to the destiny map. Note that in case the value already exists in the destiny map the copy step will be ignored. :type source_map: Dictionary :param source_map: The source map of the copy. :type destiny_map: Dictionary :param destiny_map: The destiny map of the copy. """ for source_map_key in source_map: source_map_value = source_map[source_map_key] if not source_map_key in destiny_map or destiny_map[source_map_key ] == None or destiny_map[source_map_key] == 'none': destiny_map[source_map_key] = source_map_value
pyboto3-1.4.4
pyboto3-1.4.4//pyboto3/cognitosync.pyfile:/pyboto3/cognitosync.py:function:list_records/list_records
def list_records(IdentityPoolId=None, IdentityId=None, DatasetName=None, LastSyncCount=None, NextToken=None, MaxResults=None, SyncSessionToken=None ): """ Gets paginated records, optionally changed after a particular sync count for a dataset and identity. With Amazon Cognito Sync, each identity has access only to its own data. Thus, the credentials used to make this API call need to have access to the identity data. ListRecords can be called with temporary user credentials provided by Cognito Identity or with developer credentials. You should use Cognito Identity credentials to make this API call. See also: AWS API Documentation :example: response = client.list_records( IdentityPoolId='string', IdentityId='string', DatasetName='string', LastSyncCount=123, NextToken='string', MaxResults=123, SyncSessionToken='string' ) :type IdentityPoolId: string :param IdentityPoolId: [REQUIRED] A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region. :type IdentityId: string :param IdentityId: [REQUIRED] A name-spaced GUID (for example, us-east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon Cognito. GUID generation is unique within a region. :type DatasetName: string :param DatasetName: [REQUIRED] A string of up to 128 characters. Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' (dot). :type LastSyncCount: integer :param LastSyncCount: The last server sync count for this record. :type NextToken: string :param NextToken: A pagination token for obtaining the next page of results. :type MaxResults: integer :param MaxResults: The maximum number of results to be returned. :type SyncSessionToken: string :param SyncSessionToken: A token containing a session ID, identity ID, and expiration. :rtype: dict :return: { 'Records': [ { 'Key': 'string', 'Value': 'string', 'SyncCount': 123, 'LastModifiedDate': datetime(2015, 1, 1), 'LastModifiedBy': 'string', 'DeviceLastModifiedDate': datetime(2015, 1, 1) }, ], 'NextToken': 'string', 'Count': 123, 'DatasetSyncCount': 123, 'LastModifiedBy': 'string', 'MergedDatasetNames': [ 'string', ], 'DatasetExists': True|False, 'DatasetDeletedAfterRequestedSyncCount': True|False, 'SyncSessionToken': 'string' } :returns: (dict) -- Returned for a successful ListRecordsRequest. Records (list) -- A list of all records. (dict) -- The basic data structure of a dataset. Key (string) -- The key for the record. Value (string) -- The value for the record. SyncCount (integer) -- The server sync count for this record. LastModifiedDate (datetime) -- The date on which the record was last modified. LastModifiedBy (string) -- The user/device that made the last change to this record. DeviceLastModifiedDate (datetime) -- The last modified date of the client device. NextToken (string) -- A pagination token for obtaining the next page of results. Count (integer) -- Total number of records. DatasetSyncCount (integer) -- Server sync count for this dataset. LastModifiedBy (string) -- The user/device that made the last change to this record. MergedDatasetNames (list) -- Names of merged datasets. (string) -- DatasetExists (boolean) -- Indicates whether the dataset exists. DatasetDeletedAfterRequestedSyncCount (boolean) -- A boolean value specifying whether to delete the dataset locally. SyncSessionToken (string) -- A token containing a session ID, identity ID, and expiration. """ pass
ffdb-2.3.2
ffdb-2.3.2//ffdb.pyfile:/ffdb.py:function:read_from_size/read_from_size
def read_from_size(fh, begin, size): """ reads from filehandle a specified amount of bytes """ fh.seek(begin) return fh.read(size)
dolmen.collection-0.3
dolmen.collection-0.3//src/dolmen/collection/interfaces.pyclass:ICollection/get
def get(id, default=None): """Return the component with the given ID. """
spectrochempy
spectrochempy//extern/nmrglue/analysis/analysisbase.pyfile:/extern/nmrglue/analysis/analysisbase.py:function:valid_pt/valid_pt
def valid_pt(pt, shape): """ Determind if a point (indices) is valid for a given shaped """ for i, j in zip(pt, shape): if i < 0: return False if i >= j: return False return True
spectra_lexer-11.1.0
spectra_lexer-11.1.0//spectra_lexer/resource/rules.pyclass:StenoRule/unmatched
@classmethod def unmatched(cls, keys: str) ->'StenoRule': """ Return a placeholder rule mapping leftover keys from a lexer result to an empty string of letters. """ return cls('', keys, '', 'unmatched keys', {cls.is_unmatched})
spiflash
spiflash//serialflash.pyclass:At45FlashDevice/has_feature
@classmethod def has_feature(cls, feature): """Flash device feature""" return bool(cls.FEATURES & feature)
ez
ez//ezs.pyfile:/ezs.py:function:integer/integer
def integer(number: float): """Convert a float to an int if they have the same value. For example convert 1.0 to 1.""" try: int_n = int(number) return int_n if number == int_n else number except: return number
UFL-2017.1.0
UFL-2017.1.0//ufl/utils/sequences.pyfile:/ufl/utils/sequences.py:function:product/product
def product(sequence): """Return the product of all elements in a sequence.""" p = 1 for f in sequence: p *= f return p
modelx-0.6.1
modelx-0.6.1//modelx/core/util.pyfile:/modelx/core/util.py:function:rel_to_abs/rel_to_abs
def rel_to_abs(target: str, namespace: str): """Convert name relative to namespace to absolute""" ns = namespace.split('.') nslen = len(ns) dots = 0 while dots < len(target) and target[dots] == '.': dots += 1 shared = nslen - dots + 1 tg = target[dots:].split('.') if dots < len(target) else [] abs = ns[:shared] + tg return '.'.join(abs)
pygeoid
pygeoid//reduction/atmosphere.pyfile:/reduction/atmosphere.py:function:wenzel_atm_corr/wenzel_atm_corr
def wenzel_atm_corr(height): """Return atmospheric correction by Wenzel, in mGal. Parameters ---------- height : float or array_like of floats Height above sea level, in metres. References ---------- .. [1] Wenzel, H., 1985, Hochauflosende Kugelfunktionsmodelle fur des Gravitationspotential der Erde [1]: Wissenschaftliche arbeiten der Fachrichtung Vermessungswesen der Universitat Hannover, 137 """ return 0.874 - 9.9e-05 * height + 3.56e-09 * height ** 2
pylastic-0.1.0
pylastic-0.1.0//pylastic/helpers.pyfile:/pylastic/helpers.py:function:create_index/create_index
def create_index(elastic_client, index, body): """ Create a new index in Elastic with the provided settings and mapping :param elastic_client: Elastic client :param index: Index which is to be created :param body: Mapping and settings for the new index :return: Response from Elastic """ response = elastic_client.indices.create(index=index, body=body) if response is None: raise Exception('Problem occurred creating index. Index: {}'.format (index)) return response
keycloak-config-tool-0.0.6
keycloak-config-tool-0.0.6//keycloak_config/actions/custom_action.pyclass:CustomActionWrapper/valid_deploy_env
@staticmethod def valid_deploy_env(deploy_env): """ Returns True if the provided deployment environment is valid for this action, False otherwise :param deploy_env: The target deployment environment. :return: True if this is a valid deploy environment for this action, False otherwise """ return deploy_env == 'local'
fake-blender-api-2.79-0.3.1
fake-blender-api-2.79-0.3.1//bpy/ops/transform.pyfile:/bpy/ops/transform.py:function:delete_orientation/delete_orientation
def delete_orientation(): """Delete transformation orientation """ pass
gentleman-15.5
gentleman-15.5//gentleman/base.pyfile:/gentleman/base.py:function:ShutdownInstance/ShutdownInstance
def ShutdownInstance(r, instance, dry_run=False, no_remember=False, timeout=120 ): """ Shuts down an instance. @type instance: str @param instance: the instance to shut down @type dry_run: bool @param dry_run: whether to perform a dry run @type no_remember: bool @param no_remember: if true, will not record the state change @rtype: string @return: job id """ query = {'dry-run': dry_run, 'no-remember': no_remember} content = {'timeout': timeout} return r.request('put', '/2/instances/%s/shutdown' % instance, query= query, content=content)
nti.schema-1.15.0
nti.schema-1.15.0//src/nti/schema/schema.pyclass:SchemaConfigured/sc_changed
@classmethod def sc_changed(cls, orig_changed=None): """ Call this method if you assign a fieldproperty to this class after creation. """ if cls.__FP_KEY in cls.__dict__: try: delattr(cls, cls.__FP_KEY) except AttributeError: pass
pydruid-0.5.9
pydruid-0.5.9//env/lib/python3.7/stat.pyfile:/env/lib/python3.7/stat.py:function:S_ISPORT/S_ISPORT
def S_ISPORT(mode): """Return True if mode is from an event port.""" return False
synonymscrawler-1.0.0
synonymscrawler-1.0.0//synonymscrawler/blockspring_synonyms_crawler.pyfile:/synonymscrawler/blockspring_synonyms_crawler.py:function:_invert_index/_invert_index
def _invert_index(orig_map): """ Essentially performs an inverted index operation on orig_map and returns the results. In other words, the resulting dict will use orig_map's values as the keys; result's values will be the list of keys in orig_map that correspond to each value. """ inverted_map = {} for key in orig_map: orig_value = orig_map[key] inverted_values = inverted_map.get(orig_value, []) inverted_values.append(key) inverted_map[orig_value] = inverted_values return inverted_map
dropbox-10.1.2
dropbox-10.1.2//dropbox/team_log.pyclass:EventType/sso_add_logout_url
@classmethod def sso_add_logout_url(cls, val): """ Create an instance of this class set to the ``sso_add_logout_url`` tag with value ``val``. :param SsoAddLogoutUrlType val: :rtype: EventType """ return cls('sso_add_logout_url', val)
hazelcast
hazelcast//protocol/codec/ringbuffer_tail_sequence_codec.pyfile:/protocol/codec/ringbuffer_tail_sequence_codec.py:function:decode_response/decode_response
def decode_response(client_message, to_object=None): """ Decode response from client message""" parameters = dict(response=None) parameters['response'] = client_message.read_long() return parameters
pyxley-0.1.0
pyxley-0.1.0//pyxley/charts/datatables/datatable.pyclass:DataTable/format_row
@staticmethod def format_row(row, bounds, columns): """Formats a single row of the dataframe""" for c in columns: if c not in row: continue if 'format' in columns[c]: row[c] = columns[c]['format'] % row[c] if c in bounds: b = bounds[c] row[c] = [b['min'], row[b['lower']], row[b['upper']], b['max']] return row
django-watermark-0.1.8
django-watermark-0.1.8//watermarker/utils.pyfile:/watermarker/utils.py:function:_val/_val
def _val(var, is_percent=False): """ Tries to determine the appropriate value of a particular variable that is passed in. If the value is supposed to be a percentage, a whole integer will be sought after and then turned into a floating point number between 0 and 1. If the value is supposed to be an integer, the variable is cast into an integer. """ try: if is_percent: var = float(int(var.strip('%')) / 100.0) else: var = int(var) except ValueError: raise ValueError('invalid watermark parameter: ' + var) return var
pip_utils
pip_utils//dependants.pyfile:/dependants.py:function:is_dependant/is_dependant
def is_dependant(package, project_name): """Determine whether `package` is a dependant of `project_name`.""" for requirement in package.requires(): if requirement.project_name.lower() == project_name.lower(): return True
pylinkirc-3.0.0
pylinkirc-3.0.0//utils.pyfile:/utils.py:function:wrap_arguments/wrap_arguments
def wrap_arguments(prefix, args, length, separator=' ', max_args_per_line=0): """ Takes a static prefix and a list of arguments, and returns a list of strings with the arguments wrapped across multiple lines. This is useful for breaking up long SJOIN or MODE strings so they aren't cut off by message length limits. """ strings = [] assert args, 'wrap_arguments: no arguments given' buf = prefix args = list(args) while args: assert len(prefix + args[0] ) <= length, 'wrap_arguments: Argument %r is too long for the given length %s' % ( args[0], length) if len(buf + args[0]) + 1 <= length and (not max_args_per_line or len(buf.split(' ')) < max_args_per_line): if buf != prefix: buf += separator buf += args.pop(0) else: strings.append(buf) buf = prefix else: strings.append(buf) return strings
splash-3.4.1
splash-3.4.1//splash/utils.pyfile:/splash/utils.py:function:dedupe/dedupe
def dedupe(it): """ >>> list(dedupe([3,1,3,1,2])) [3, 1, 2] """ seen = set() for el in it: if el in seen: continue seen.add(el) yield el
neutron-15.0.2
neutron-15.0.2//neutron/agent/linux/utils.pyclass:RootwrapDaemonHelper/__new__
def __new__(cls): """There is no reason to instantiate this class""" raise NotImplementedError()
core_parser_app-1.8.0
core_parser_app-1.8.0//core_parser_app/tools/parser/utils/xml.pyfile:/core_parser_app/tools/parser/utils/xml.py:function:get_element_occurrences/get_element_occurrences
def get_element_occurrences(element): """Gets min/max occurrences information of the element Args: element: Returns: """ min_occurs = 1 max_occurs = 1 if 'minOccurs' in element.attrib: min_occurs = int(element.attrib['minOccurs']) if 'maxOccurs' in element.attrib: if element.attrib['maxOccurs'] == 'unbounded': max_occurs = -1 else: max_occurs = int(element.attrib['maxOccurs']) return min_occurs, max_occurs
flask_restful_swagger_2
flask_restful_swagger_2//swagger.pyfile:/swagger.py:function:set_nested/set_nested
def set_nested(d, key_spec, value): """ Sets a value in a nested dictionary. :param d: The dictionary to set :param key_spec: The key specifier in dotted notation :param value: The value to set """ keys = key_spec.split('.') for key in keys[:-1]: d = d.setdefault(key, {}) d[keys[-1]] = value
coalib
coalib//bearlib/languages/documentation/DocumentationExtraction.pyfile:/bearlib/languages/documentation/DocumentationExtraction.py:function:_extract_doc_comment_standard/_extract_doc_comment_standard
def _extract_doc_comment_standard(content, line, column, markers): """ Extract a documentation that starts at given beginning with standard layout. The standard layout applies e.g. for C doxygen-style documentation:: /** * documentation */ :param content: Presplitted lines of the source-code-string. :param line: Line where the documentation comment starts (behind the start marker). Zero-based. :param column: Column where the documentation comment starts (behind the start marker). Zero-based. :param markers: The documentation identifying markers. :return: If the comment matched layout a triple with end-of-comment line, column and the extracted documentation. If not matched, returns None. """ pos = content[line].find(markers[2], column) if pos != -1: return line, pos + len(markers[2]), content[line][column:pos] doc_comment = content[line][column:] line += 1 while line < len(content): pos = content[line].find(markers[2]) each_line_pos = content[line].find(markers[1]) if pos == -1: if each_line_pos == -1: return None doc_comment += content[line][each_line_pos + len(markers[1]):] else: if each_line_pos != -1 and each_line_pos + 1 < pos: doc_comment += content[line][each_line_pos + len(markers[1] ):pos] return line, pos + len(markers[2]), doc_comment line += 1 return None
fake-blender-api-2.79-0.3.1
fake-blender-api-2.79-0.3.1//bpy/ops/curve.pyfile:/bpy/ops/curve.py:function:make_segment/make_segment
def make_segment(): """Join two curves by their selected ends """ pass
ipydataclean-0.2.2
ipydataclean-0.2.2//dataclean/cleaning.pyfile:/dataclean/cleaning.py:function:type_convert_mean/type_convert_mean
def type_convert_mean(dataframe, colname, data_type): """Replace mistyped values with the mean on dataframe[colname]""" col = dataframe[colname] col_numerics = col.loc[col.apply(lambda x: isinstance(x, (int, float)))] dataframe.loc[col.notnull() & col.apply(lambda x: not isinstance(x, data_type)), colname] = col_numerics.mean() return dataframe
ftw.bridge.proxy-1.1.1
ftw.bridge.proxy-1.1.1//ftw/bridge/proxy/interfaces.pyclass:IProxy/__call__
def __call__(): """Executes the request to the target client and returns its response. """
bs_ds-0.11.1
bs_ds-0.11.1//bs_ds/capstone.pyfile:/bs_ds/capstone.py:function:compare_word_cloud/compare_word_cloud
def compare_word_cloud(text1, label1, text2, label2): """Compares the wordclouds from 2 sets of texts""" from wordcloud import WordCloud import matplotlib.pyplot as plt wordcloud1 = WordCloud(max_font_size=80, max_words=200, background_color='white').generate(' '.join(text1)) wordcloud2 = WordCloud(max_font_size=80, max_words=200, background_color='white').generate(' '.join(text2)) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(20, 15)) ax[0].imshow(wordcloud1, interpolation='bilinear') ax[0].set_aspect(1.5) ax[0].axis('off') ax[0].set_title(label1, fontsize=20) ax[1].imshow(wordcloud2, interpolation='bilinear') ax[1].set_aspect(1.5) ax[1].axis('off') ax[1].set_title(label2, fontsize=20) fig.tight_layout() return fig, ax
cdt
cdt//causality/graph/PC.pyfile:/causality/graph/PC.py:function:message_warning/message_warning
def message_warning(msg, *a, **kwargs): """Ignore everything except the message.""" return str(msg) + '\n'
python-bps-5
python-bps-5//bps/util.pyfile:/bps/util.py:function:read_var_int/read_var_int
def read_var_int(handle): """ Read a variable-length integer from the given file handle. """ res = 0 shift = 1 while True: byte = handle.read(1)[0] res += (byte & 127) * shift if byte & 128: break shift <<= 7 res += shift return res
easyshop.order-0.1a1
easyshop.order-0.1a1//easyshop/order/browser/order_view.pyclass:IOrderView/isPaymentAllowed
def isPaymentAllowed(): """Returns True if the redo of a payment is allowed. """
robotpy-hal-base-2019.2.3
robotpy-hal-base-2019.2.3//hal/functions.pyfile:/hal/functions.py:function:hal_wrapper/hal_wrapper
def hal_wrapper(f): """Decorator to support introspection. The wrapped function must be the same name as the wrapper function, but start with an underscore """ wrapped = globals()['_' + f.__name__] if hasattr(wrapped, 'fndata'): f.fndata = wrapped.fndata return f
photovoltaic
photovoltaic//semi.pyfile:/semi.py:function:bulkfeffective/bulkfeffective
def bulkfeffective(tau_eff, S, thickness): """Return the bulk lifetime (s) where taus Given tau_eff (s) surface recombination (cm/s) thickness (cm) """ return tau_eff - thickness / (2 * S)
ArangoPy-0.5.7
ArangoPy-0.5.7//arangodb/query/simple.pyclass:SimpleIndexQuery/get_by_example_hash
@classmethod def get_by_example_hash(cls, collection, index_id, example_data, allow_multiple=False, skip=None, limit=None): """ This will find all documents matching a given example, using the specified hash index. :param collection Collection instance :param index_id ID of the index which should be used for the query :param example_data The example document :param allow_multiple If the query can return multiple documents :param skip The number of documents to skip in the query :param limit The maximal amount of documents to return. The skip is applied before the limit restriction. :returns Single document / Document list """ kwargs = {'index': index_id, 'skip': skip, 'limit': limit} return cls._construct_query(name='by-example-hash', collection= collection, example=example_data, multiple=allow_multiple, **kwargs)
aim
aim//profiler/stat.pyclass:Stat/reset_proc_interval
@classmethod def reset_proc_interval(cls): """ Calls process `cpu_percent` which resets cpu utilization tracking cycle Read more: https://psutil.readthedocs.io/en/latest/#psutil.cpu_percent """ cls._process.cpu_percent(0.0)
cli_code
cli_code//util.pyfile:/util.py:function:findlower/findlower
def findlower(x, vec): """return the index of the first occurence of item in vec""" for i in range(len(vec)): if vec[i] < x: return i return -1
audio.quantizers-2.2
audio.quantizers-2.2//.lib/pkg_resources.pyclass:IResourceProvider/get_resource_string
def get_resource_string(manager, resource_name): """Return a string containing the contents of `resource_name` `manager` must be an ``IResourceManager``"""
detection-utils-1.0.0
detection-utils-1.0.0//versioneer.pyfile:/versioneer.py:function:scan_setup_py/scan_setup_py
def scan_setup_py(): """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False errors = 0 with open('setup.py', 'r') as f: for line in f.readlines(): if 'import versioneer' in line: found.add('import') if 'versioneer.get_cmdclass()' in line: found.add('cmdclass') if 'versioneer.get_version()' in line: found.add('get_version') if 'versioneer.VCS' in line: setters = True if 'versioneer.versionfile_source' in line: setters = True if len(found) != 3: print('') print('Your setup.py appears to be missing some important items') print('(but I might be wrong). Please make sure it has something') print('roughly like the following:') print('') print(' import versioneer') print(' setup( version=versioneer.get_version(),') print(' cmdclass=versioneer.get_cmdclass(), ...)') print('') errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print('now lives in setup.cfg, and should be removed from setup.py') print('') errors += 1 return errors
wbml-0.1.3
wbml-0.1.3//wbml/util.pyfile:/wbml/util.py:function:inv_perm/inv_perm
def inv_perm(perm): """Invert a permutation. Args: perm (list): Permutation to invert. Returns: list: Inverse permutation. """ out = [0] * len(perm) for i, p in enumerate(perm): out[p] = i return out
oci-2.14.1
oci-2.14.1//src/oci/secrets/models/secret_bundle_content_details.pyclass:SecretBundleContentDetails/get_subtype
@staticmethod def get_subtype(object_dictionary): """ Given the hash representation of a subtype of this class, use the info in the hash to return the class of the subtype. """ type = object_dictionary['contentType'] if type == 'BASE64': return 'Base64SecretBundleContentDetails' else: return 'SecretBundleContentDetails'
instapyshoms
instapyshoms//file_manager.pyfile:/file_manager.py:function:remove_last_slash/remove_last_slash
def remove_last_slash(path): """ Remove the last slash in the given path [if any] """ if path.endswith('/'): path = path[:-1] return path
skhubness
skhubness//neighbors/graph.pyfile:/neighbors/graph.py:function:_query_include_self/_query_include_self
def _query_include_self(X, include_self): """Return the query based on include_self param""" if include_self: query = X._fit_X else: query = None return query
psj.content-0.2
psj.content-0.2//src/psj/content/interfaces.pyclass:IMagazine/rebuild
def rebuild(): """This method is called, everytime an issue is updated. """
kick
kick//device2/general/actions/factory.pyclass:Factory/factory_by_name
@staticmethod def factory_by_name(class_name, args=(), kwargs={}): """from user specified device class name, return the proper device class. :param class_name: string (has to match the name of device class) :return: the class """ return globals()[class_name](*args, **kwargs)